aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mn10300')
-rw-r--r--arch/mn10300/Kconfig1
-rw-r--r--arch/mn10300/include/asm/cacheflush.h16
-rw-r--r--arch/mn10300/kernel/kprobes.c4
-rw-r--r--arch/mn10300/kernel/traps.c2
-rw-r--r--arch/mn10300/mm/Kconfig.cache34
-rw-r--r--arch/mn10300/mm/Makefile2
-rw-r--r--arch/mn10300/mm/cache-flush-icache.c137
-rw-r--r--arch/mn10300/mm/cache-inv-icache.c119
-rw-r--r--arch/mn10300/mm/cache.c90
9 files changed, 309 insertions, 96 deletions
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 069e34d4c4ac..21e2a534d98e 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -18,6 +18,7 @@ config AM33_3
18config AM34_2 18config AM34_2
19 def_bool n 19 def_bool n
20 select MN10300_HAS_ATOMIC_OPS_UNIT 20 select MN10300_HAS_ATOMIC_OPS_UNIT
21 select MN10300_HAS_CACHE_SNOOP
21 22
22config MMU 23config MMU
23 def_bool y 24 def_bool y
diff --git a/arch/mn10300/include/asm/cacheflush.h b/arch/mn10300/include/asm/cacheflush.h
index 748143f65418..faed90240ded 100644
--- a/arch/mn10300/include/asm/cacheflush.h
+++ b/arch/mn10300/include/asm/cacheflush.h
@@ -131,18 +131,22 @@ extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long s
131/* 131/*
132 * Physically-indexed cache management 132 * Physically-indexed cache management
133 */ 133 */
134#ifdef CONFIG_MN10300_CACHE_ENABLED 134#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
135 135extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
136extern void flush_icache_range(unsigned long start, unsigned long end);
137#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
138static inline void flush_icache_page(struct vm_area_struct *vma,
139 struct page *page)
140{
141 mn10300_icache_inv_page(page_to_phys(page));
142}
136extern void flush_icache_range(unsigned long start, unsigned long end); 143extern void flush_icache_range(unsigned long start, unsigned long end);
137extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
138
139#else 144#else
140
141#define flush_icache_range(start, end) do {} while (0) 145#define flush_icache_range(start, end) do {} while (0)
142#define flush_icache_page(vma, pg) do {} while (0) 146#define flush_icache_page(vma, pg) do {} while (0)
143
144#endif 147#endif
145 148
149
146#define flush_icache_user_range(vma, pg, adr, len) \ 150#define flush_icache_user_range(vma, pg, adr, len) \
147 flush_icache_range(adr, adr + len) 151 flush_icache_range(adr, adr + len)
148 152
diff --git a/arch/mn10300/kernel/kprobes.c b/arch/mn10300/kernel/kprobes.c
index 67e6389d625a..0311a7fcea16 100644
--- a/arch/mn10300/kernel/kprobes.c
+++ b/arch/mn10300/kernel/kprobes.c
@@ -377,8 +377,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
377 377
378void __kprobes arch_disarm_kprobe(struct kprobe *p) 378void __kprobes arch_disarm_kprobe(struct kprobe *p)
379{ 379{
380#ifndef CONFIG_MN10300_CACHE_SNOOP
380 mn10300_dcache_flush(); 381 mn10300_dcache_flush();
381 mn10300_icache_inv(); 382 mn10300_icache_inv();
383#endif
382} 384}
383 385
384void arch_remove_kprobe(struct kprobe *p) 386void arch_remove_kprobe(struct kprobe *p)
@@ -390,8 +392,10 @@ void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
390{ 392{
391 *p->addr = p->opcode; 393 *p->addr = p->opcode;
392 regs->pc = (unsigned long) p->addr; 394 regs->pc = (unsigned long) p->addr;
395#ifndef CONFIG_MN10300_CACHE_SNOOP
393 mn10300_dcache_flush(); 396 mn10300_dcache_flush();
394 mn10300_icache_inv(); 397 mn10300_icache_inv();
398#endif
395} 399}
396 400
397static inline 401static inline
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index a64604b512d5..c7257a1304a9 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -533,8 +533,10 @@ void __init set_intr_stub(enum exception_code code, void *handler)
533 vector[6] = 0xcb; 533 vector[6] = 0xcb;
534 vector[7] = 0xcb; 534 vector[7] = 0xcb;
535 535
536#ifndef CONFIG_MN10300_CACHE_SNOOP
536 mn10300_dcache_flush_inv(); 537 mn10300_dcache_flush_inv();
537 mn10300_icache_inv(); 538 mn10300_icache_inv();
539#endif
538} 540}
539 541
540/* 542/*
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache
index 97adc06e7128..653254a34f88 100644
--- a/arch/mn10300/mm/Kconfig.cache
+++ b/arch/mn10300/mm/Kconfig.cache
@@ -22,12 +22,26 @@ choice
22 22
23config MN10300_CACHE_WBACK 23config MN10300_CACHE_WBACK
24 bool "Write-Back" 24 bool "Write-Back"
25 help
26 The dcache operates in delayed write-back mode. It must be manually
27 flushed if writes are made that subsequently need to be executed or
28 to be DMA'd by a device.
25 29
26config MN10300_CACHE_WTHRU 30config MN10300_CACHE_WTHRU
27 bool "Write-Through" 31 bool "Write-Through"
32 help
33 The dcache operates in immediate write-through mode. Writes are
34 committed to RAM immediately in addition to being stored in the
35 cache. This means that the written data is immediately available for
36 execution or DMA.
37
38 This is not available for use with an SMP kernel if cache flushing
39 and invalidation by automatic purge register is not selected.
28 40
29config MN10300_CACHE_DISABLED 41config MN10300_CACHE_DISABLED
30 bool "Disabled" 42 bool "Disabled"
43 help
44 The icache and dcache are disabled.
31 45
32endchoice 46endchoice
33 47
@@ -64,3 +78,23 @@ config MN10300_CACHE_FLUSH_BY_TAG
64 78
65config MN10300_CACHE_FLUSH_BY_REG 79config MN10300_CACHE_FLUSH_BY_REG
66 def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK 80 def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
81
82
83config MN10300_HAS_CACHE_SNOOP
84 def_bool n
85
86config MN10300_CACHE_SNOOP
87 bool "Use CPU Cache Snooping"
88 depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
89 default y
90
91config MN10300_CACHE_FLUSH_ICACHE
92 def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
93 help
94 Set if we need the dcache flushing before the icache is invalidated.
95
96config MN10300_CACHE_INV_ICACHE
97 def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
98 help
99 Set if we need the icache to be invalidated, even if the dcache is in
100 write-through mode and doesn't need flushing.
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 7b997236ed20..56c5af83151b 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -3,6 +3,8 @@
3# 3#
4 4
5cacheflush-y := cache.o 5cacheflush-y := cache.o
6cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
7cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
6cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o 8cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
7cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o 9cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
8cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o 10cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c
new file mode 100644
index 000000000000..0e471e1cb2da
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-icache.c
@@ -0,0 +1,137 @@
1/* Flush dcache and invalidate icache when the dcache is in writeback mode
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <asm/cacheflush.h>
14/**
15 * flush_icache_page - Flush a page from the dcache and invalidate the icache
16 * @vma: The VMA the page is part of.
17 * @page: The page to be flushed.
18 *
19 * Write a page back from the dcache and invalidate the icache so that we can
20 * run code from it that we've just written into it
21 */
22void flush_icache_page(struct vm_area_struct *vma, struct page *page)
23{
24 unsigned long start = page_to_phys(page);
25
26 mn10300_dcache_flush_page(start);
27 mn10300_icache_inv_page(start);
28}
29EXPORT_SYMBOL(flush_icache_page);
30
31/**
32 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
33 * single page
34 * @start: The starting virtual address of the page part.
35 * @end: The ending virtual address of the page part.
36 *
37 * Flush the dcache and invalidate the icache for part of a single page, as
38 * determined by the virtual addresses given. The page must be in the paged
39 * area.
40 */
41static void flush_icache_page_range(unsigned long start, unsigned long end)
42{
43 unsigned long addr, size, off;
44 struct page *page;
45 pgd_t *pgd;
46 pud_t *pud;
47 pmd_t *pmd;
48 pte_t *ppte, pte;
49
50 /* work out how much of the page to flush */
51 off = start & ~PAGE_MASK;
52 size = end - start;
53
54 /* get the physical address the page is mapped to from the page
55 * tables */
56 pgd = pgd_offset(current->mm, start);
57 if (!pgd || !pgd_val(*pgd))
58 return;
59
60 pud = pud_offset(pgd, start);
61 if (!pud || !pud_val(*pud))
62 return;
63
64 pmd = pmd_offset(pud, start);
65 if (!pmd || !pmd_val(*pmd))
66 return;
67
68 ppte = pte_offset_map(pmd, start);
69 if (!ppte)
70 return;
71 pte = *ppte;
72 pte_unmap(ppte);
73
74 if (pte_none(pte))
75 return;
76
77 page = pte_page(pte);
78 if (!page)
79 return;
80
81 addr = page_to_phys(page);
82
83 /* flush the dcache and invalidate the icache coverage on that
84 * region */
85 mn10300_dcache_flush_range2(addr + off, size);
86 mn10300_icache_inv_range2(addr + off, size);
87}
88
89/**
90 * flush_icache_range - Globally flush dcache and invalidate icache for region
91 * @start: The starting virtual address of the region.
92 * @end: The ending virtual address of the region.
93 *
94 * This is used by the kernel to globally flush some code it has just written
95 * from the dcache back to RAM and then to globally invalidate the icache over
96 * that region so that that code can be run on all CPUs in the system.
97 */
98void flush_icache_range(unsigned long start, unsigned long end)
99{
100 unsigned long start_page, end_page;
101
102 if (end > 0x80000000UL) {
103 /* addresses above 0xa0000000 do not go through the cache */
104 if (end > 0xa0000000UL) {
105 end = 0xa0000000UL;
106 if (start >= end)
107 return;
108 }
109
110 /* kernel addresses between 0x80000000 and 0x9fffffff do not
111 * require page tables, so we just map such addresses
112 * directly */
113 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
114 mn10300_dcache_flush_range(start_page, end);
115 mn10300_icache_inv_range(start_page, end);
116 if (start_page == start)
117 return;
118 end = start_page;
119 }
120
121 start_page = start & PAGE_MASK;
122 end_page = end & PAGE_MASK;
123
124 if (start_page == end_page) {
125 /* the first and last bytes are on the same page */
126 flush_icache_page_range(start, end);
127 } else if (start_page + 1 == end_page) {
128 /* split over two virtually contiguous pages */
129 flush_icache_page_range(start, end_page);
130 flush_icache_page_range(end_page, end);
131 } else {
132 /* more than 2 pages; just flush the entire cache */
133 mn10300_dcache_flush();
134 mn10300_icache_inv();
135 }
136}
137EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
new file mode 100644
index 000000000000..4a3f7afcfe53
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -0,0 +1,119 @@
1/* Invalidate icache when dcache doesn't need invalidation as it's in
2 * write-through mode
3 *
4 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <asm/cacheflush.h>
15
16/**
17 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
18 * single page
19 * @start: The starting virtual address of the page part.
20 * @end: The ending virtual address of the page part.
21 *
22 * Invalidate the icache for part of a single page, as determined by the
23 * virtual addresses given. The page must be in the paged area. The dcache is
24 * not flushed as the cache must be in write-through mode to get here.
25 */
26static void flush_icache_page_range(unsigned long start, unsigned long end)
27{
28 unsigned long addr, size, off;
29 struct page *page;
30 pgd_t *pgd;
31 pud_t *pud;
32 pmd_t *pmd;
33 pte_t *ppte, pte;
34
35 /* work out how much of the page to flush */
36 off = start & ~PAGE_MASK;
37 size = end - start;
38
39 /* get the physical address the page is mapped to from the page
40 * tables */
41 pgd = pgd_offset(current->mm, start);
42 if (!pgd || !pgd_val(*pgd))
43 return;
44
45 pud = pud_offset(pgd, start);
46 if (!pud || !pud_val(*pud))
47 return;
48
49 pmd = pmd_offset(pud, start);
50 if (!pmd || !pmd_val(*pmd))
51 return;
52
53 ppte = pte_offset_map(pmd, start);
54 if (!ppte)
55 return;
56 pte = *ppte;
57 pte_unmap(ppte);
58
59 if (pte_none(pte))
60 return;
61
62 page = pte_page(pte);
63 if (!page)
64 return;
65
66 addr = page_to_phys(page);
67
68 /* invalidate the icache coverage on that region */
69 mn10300_icache_inv_range2(addr + off, size);
70}
71
72/**
73 * flush_icache_range - Globally flush dcache and invalidate icache for region
74 * @start: The starting virtual address of the region.
75 * @end: The ending virtual address of the region.
76 *
77 * This is used by the kernel to globally flush some code it has just written
78 * from the dcache back to RAM and then to globally invalidate the icache over
79 * that region so that that code can be run on all CPUs in the system.
80 */
81void flush_icache_range(unsigned long start, unsigned long end)
82{
83 unsigned long start_page, end_page;
84
85 if (end > 0x80000000UL) {
86 /* addresses above 0xa0000000 do not go through the cache */
87 if (end > 0xa0000000UL) {
88 end = 0xa0000000UL;
89 if (start >= end)
90 return;
91 }
92
93 /* kernel addresses between 0x80000000 and 0x9fffffff do not
94 * require page tables, so we just map such addresses
95 * directly */
96 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
97 mn10300_dcache_flush_range(start_page, end);
98 mn10300_icache_inv_range(start_page, end);
99 if (start_page == start)
100 return;
101 end = start_page;
102 }
103
104 start_page = start & PAGE_MASK;
105 end_page = end & PAGE_MASK;
106
107 if (start_page == end_page) {
108 /* the first and last bytes are on the same page */
109 flush_icache_page_range(start, end);
110 } else if (start_page + 1 == end_page) {
111 /* split over two virtually contiguous pages */
112 flush_icache_page_range(start, end_page);
113 flush_icache_page_range(end_page, end);
114 } else {
115 /* more than 2 pages; just flush the entire cache */
116 mn10300_icache_inv();
117 }
118}
119EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index 9261217e8d2c..bc35826f1357 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -37,96 +37,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_page);
37#endif 37#endif
38 38
39/* 39/*
40 * write a page back from the dcache and invalidate the icache so that we can
41 * run code from it that we've just written into it
42 */
43void flush_icache_page(struct vm_area_struct *vma, struct page *page)
44{
45 mn10300_dcache_flush_page(page_to_phys(page));
46 mn10300_icache_inv();
47}
48EXPORT_SYMBOL(flush_icache_page);
49
50/*
51 * write some code we've just written back from the dcache and invalidate the
52 * icache so that we can run that code
53 */
54void flush_icache_range(unsigned long start, unsigned long end)
55{
56#ifdef CONFIG_MN10300_CACHE_WBACK
57 unsigned long addr, size, base, off;
58 struct page *page;
59 pgd_t *pgd;
60 pud_t *pud;
61 pmd_t *pmd;
62 pte_t *ppte, pte;
63
64 if (end > 0x80000000UL) {
65 /* addresses above 0xa0000000 do not go through the cache */
66 if (end > 0xa0000000UL) {
67 end = 0xa0000000UL;
68 if (start >= end)
69 return;
70 }
71
72 /* kernel addresses between 0x80000000 and 0x9fffffff do not
73 * require page tables, so we just map such addresses directly */
74 base = (start >= 0x80000000UL) ? start : 0x80000000UL;
75 mn10300_dcache_flush_range(base, end);
76 if (base == start)
77 goto invalidate;
78 end = base;
79 }
80
81 for (; start < end; start += size) {
82 /* work out how much of the page to flush */
83 off = start & (PAGE_SIZE - 1);
84
85 size = end - start;
86 if (size > PAGE_SIZE - off)
87 size = PAGE_SIZE - off;
88
89 /* get the physical address the page is mapped to from the page
90 * tables */
91 pgd = pgd_offset(current->mm, start);
92 if (!pgd || !pgd_val(*pgd))
93 continue;
94
95 pud = pud_offset(pgd, start);
96 if (!pud || !pud_val(*pud))
97 continue;
98
99 pmd = pmd_offset(pud, start);
100 if (!pmd || !pmd_val(*pmd))
101 continue;
102
103 ppte = pte_offset_map(pmd, start);
104 if (!ppte)
105 continue;
106 pte = *ppte;
107 pte_unmap(ppte);
108
109 if (pte_none(pte))
110 continue;
111
112 page = pte_page(pte);
113 if (!page)
114 continue;
115
116 addr = page_to_phys(page);
117
118 /* flush the dcache and invalidate the icache coverage on that
119 * region */
120 mn10300_dcache_flush_range2(addr + off, size);
121 }
122#endif
123
124invalidate:
125 mn10300_icache_inv();
126}
127EXPORT_SYMBOL(flush_icache_range);
128
129/*
130 * allow userspace to flush the instruction cache 40 * allow userspace to flush the instruction cache
131 */ 41 */
132asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) 42asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)