aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAkira Takeuchi <takeuchi.akr@jp.panasonic.com>2010-10-27 12:28:47 -0400
committerDavid Howells <dhowells@redhat.com>2010-10-27 12:28:47 -0400
commit8be062892365b09f41d64cda7fa63d306e95e0c9 (patch)
tree89db630e8d0e8090d09a9dfc6535427456cda631 /arch
parentb478491f2628114b2eae76587f22ce3789b66012 (diff)
MN10300: Cache: Implement SMP global cache flushing
Implement SMP global cache flushing for MN10300. This will be used by the AM34 which is SMP capable. Signed-off-by: Akira Takeuchi <takeuchi.akr@jp.panasonic.com> Signed-off-by: Kiyoshi Owada <owada.kiyoshi@jp.panasonic.com> Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/mn10300/mm/Kconfig.cache1
-rw-r--r--arch/mn10300/mm/Makefile3
-rw-r--r--arch/mn10300/mm/cache-flush-icache.c36
-rw-r--r--arch/mn10300/mm/cache-inv-icache.c22
-rw-r--r--arch/mn10300/mm/cache-smp-flush.c156
-rw-r--r--arch/mn10300/mm/cache-smp-inv.c153
-rw-r--r--arch/mn10300/mm/cache-smp.c105
-rw-r--r--arch/mn10300/mm/cache-smp.h69
-rw-r--r--arch/mn10300/mm/cache.c5
9 files changed, 535 insertions, 15 deletions
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache
index 653254a34f88..c4fd923a55a0 100644
--- a/arch/mn10300/mm/Kconfig.cache
+++ b/arch/mn10300/mm/Kconfig.cache
@@ -60,6 +60,7 @@ choice
60 60
61config MN10300_CACHE_MANAGE_BY_TAG 61config MN10300_CACHE_MANAGE_BY_TAG
62 bool "Use the cache tag registers directly" 62 bool "Use the cache tag registers directly"
63 depends on !(SMP && MN10300_CACHE_WTHRU)
63 64
64config MN10300_CACHE_MANAGE_BY_REG 65config MN10300_CACHE_MANAGE_BY_REG
65 bool "Flush areas by way of automatic purge registers (AM34 only)" 66 bool "Flush areas by way of automatic purge registers (AM34 only)"
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 56c5af83151b..184745f94c32 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,7 +2,10 @@
2# Makefile for the MN10300-specific memory management code 2# Makefile for the MN10300-specific memory management code
3# 3#
4 4
5cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
6
5cacheflush-y := cache.o 7cacheflush-y := cache.o
8cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
6cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o 9cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
7cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o 10cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
8cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o 11cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c
index 0e471e1cb2da..fdb1a9db20f0 100644
--- a/arch/mn10300/mm/cache-flush-icache.c
+++ b/arch/mn10300/mm/cache-flush-icache.c
@@ -11,6 +11,9 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <asm/cacheflush.h> 13#include <asm/cacheflush.h>
14#include <asm/smp.h>
15#include "cache-smp.h"
16
14/** 17/**
15 * flush_icache_page - Flush a page from the dcache and invalidate the icache 18 * flush_icache_page - Flush a page from the dcache and invalidate the icache
16 * @vma: The VMA the page is part of. 19 * @vma: The VMA the page is part of.
@@ -22,9 +25,15 @@
22void flush_icache_page(struct vm_area_struct *vma, struct page *page) 25void flush_icache_page(struct vm_area_struct *vma, struct page *page)
23{ 26{
24 unsigned long start = page_to_phys(page); 27 unsigned long start = page_to_phys(page);
28 unsigned long flags;
29
30 flags = smp_lock_cache();
31
32 mn10300_local_dcache_flush_page(start);
33 mn10300_local_icache_inv_page(start);
25 34
26 mn10300_dcache_flush_page(start); 35 smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
27 mn10300_icache_inv_page(start); 36 smp_unlock_cache(flags);
28} 37}
29EXPORT_SYMBOL(flush_icache_page); 38EXPORT_SYMBOL(flush_icache_page);
30 39
@@ -82,8 +91,9 @@ static void flush_icache_page_range(unsigned long start, unsigned long end)
82 91
83 /* flush the dcache and invalidate the icache coverage on that 92 /* flush the dcache and invalidate the icache coverage on that
84 * region */ 93 * region */
85 mn10300_dcache_flush_range2(addr + off, size); 94 mn10300_local_dcache_flush_range2(addr + off, size);
86 mn10300_icache_inv_range2(addr + off, size); 95 mn10300_local_icache_inv_range2(addr + off, size);
96 smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
87} 97}
88 98
89/** 99/**
@@ -98,28 +108,32 @@ static void flush_icache_page_range(unsigned long start, unsigned long end)
98void flush_icache_range(unsigned long start, unsigned long end) 108void flush_icache_range(unsigned long start, unsigned long end)
99{ 109{
100 unsigned long start_page, end_page; 110 unsigned long start_page, end_page;
111 unsigned long flags;
112
113 flags = smp_lock_cache();
101 114
102 if (end > 0x80000000UL) { 115 if (end > 0x80000000UL) {
103 /* addresses above 0xa0000000 do not go through the cache */ 116 /* addresses above 0xa0000000 do not go through the cache */
104 if (end > 0xa0000000UL) { 117 if (end > 0xa0000000UL) {
105 end = 0xa0000000UL; 118 end = 0xa0000000UL;
106 if (start >= end) 119 if (start >= end)
107 return; 120 goto done;
108 } 121 }
109 122
110 /* kernel addresses between 0x80000000 and 0x9fffffff do not 123 /* kernel addresses between 0x80000000 and 0x9fffffff do not
111 * require page tables, so we just map such addresses 124 * require page tables, so we just map such addresses
112 * directly */ 125 * directly */
113 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; 126 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
114 mn10300_dcache_flush_range(start_page, end); 127 mn10300_local_dcache_flush_range(start_page, end);
115 mn10300_icache_inv_range(start_page, end); 128 mn10300_local_icache_inv_range(start_page, end);
129 smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
116 if (start_page == start) 130 if (start_page == start)
117 return; 131 goto done;
118 end = start_page; 132 end = start_page;
119 } 133 }
120 134
121 start_page = start & PAGE_MASK; 135 start_page = start & PAGE_MASK;
122 end_page = end & PAGE_MASK; 136 end_page = (end - 1) & PAGE_MASK;
123 137
124 if (start_page == end_page) { 138 if (start_page == end_page) {
125 /* the first and last bytes are on the same page */ 139 /* the first and last bytes are on the same page */
@@ -132,6 +146,10 @@ void flush_icache_range(unsigned long start, unsigned long end)
132 /* more than 2 pages; just flush the entire cache */ 146 /* more than 2 pages; just flush the entire cache */
133 mn10300_dcache_flush(); 147 mn10300_dcache_flush();
134 mn10300_icache_inv(); 148 mn10300_icache_inv();
149 smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
135 } 150 }
151
152done:
153 smp_unlock_cache(flags);
136} 154}
137EXPORT_SYMBOL(flush_icache_range); 155EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
index 4a3f7afcfe53..a8933a60b2d4 100644
--- a/arch/mn10300/mm/cache-inv-icache.c
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -12,6 +12,8 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/smp.h>
16#include "cache-smp.h"
15 17
16/** 18/**
17 * flush_icache_page_range - Flush dcache and invalidate icache for part of a 19 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
@@ -66,7 +68,8 @@ static void flush_icache_page_range(unsigned long start, unsigned long end)
66 addr = page_to_phys(page); 68 addr = page_to_phys(page);
67 69
68 /* invalidate the icache coverage on that region */ 70 /* invalidate the icache coverage on that region */
69 mn10300_icache_inv_range2(addr + off, size); 71 mn10300_local_icache_inv_range2(addr + off, size);
72 smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
70} 73}
71 74
72/** 75/**
@@ -81,28 +84,31 @@ static void flush_icache_page_range(unsigned long start, unsigned long end)
81void flush_icache_range(unsigned long start, unsigned long end) 84void flush_icache_range(unsigned long start, unsigned long end)
82{ 85{
83 unsigned long start_page, end_page; 86 unsigned long start_page, end_page;
87 unsigned long flags;
88
89 flags = smp_lock_cache();
84 90
85 if (end > 0x80000000UL) { 91 if (end > 0x80000000UL) {
86 /* addresses above 0xa0000000 do not go through the cache */ 92 /* addresses above 0xa0000000 do not go through the cache */
87 if (end > 0xa0000000UL) { 93 if (end > 0xa0000000UL) {
88 end = 0xa0000000UL; 94 end = 0xa0000000UL;
89 if (start >= end) 95 if (start >= end)
90 return; 96 goto done;
91 } 97 }
92 98
93 /* kernel addresses between 0x80000000 and 0x9fffffff do not 99 /* kernel addresses between 0x80000000 and 0x9fffffff do not
94 * require page tables, so we just map such addresses 100 * require page tables, so we just map such addresses
95 * directly */ 101 * directly */
96 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; 102 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
97 mn10300_dcache_flush_range(start_page, end);
98 mn10300_icache_inv_range(start_page, end); 103 mn10300_icache_inv_range(start_page, end);
104 smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
99 if (start_page == start) 105 if (start_page == start)
100 return; 106 goto done;
101 end = start_page; 107 end = start_page;
102 } 108 }
103 109
104 start_page = start & PAGE_MASK; 110 start_page = start & PAGE_MASK;
105 end_page = end & PAGE_MASK; 111 end_page = (end - 1) & PAGE_MASK;
106 112
107 if (start_page == end_page) { 113 if (start_page == end_page) {
108 /* the first and last bytes are on the same page */ 114 /* the first and last bytes are on the same page */
@@ -113,7 +119,11 @@ void flush_icache_range(unsigned long start, unsigned long end)
113 flush_icache_page_range(end_page, end); 119 flush_icache_page_range(end_page, end);
114 } else { 120 } else {
115 /* more than 2 pages; just flush the entire cache */ 121 /* more than 2 pages; just flush the entire cache */
116 mn10300_icache_inv(); 122 mn10300_local_icache_inv();
123 smp_cache_call(SMP_ICACHE_INV, 0, 0);
117 } 124 }
125
126done:
127 smp_unlock_cache(flags);
118} 128}
119EXPORT_SYMBOL(flush_icache_range); 129EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-smp-flush.c b/arch/mn10300/mm/cache-smp-flush.c
new file mode 100644
index 000000000000..fd51af5eaf70
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-flush.c
@@ -0,0 +1,156 @@
1/* Functions for global dcache flush when writeback caching in SMP
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/mm.h>
12#include <asm/cacheflush.h>
13#include "cache-smp.h"
14
15/**
16 * mn10300_dcache_flush - Globally flush data cache
17 *
18 * Flush the data cache on all CPUs.
19 */
20void mn10300_dcache_flush(void)
21{
22 unsigned long flags;
23
24 flags = smp_lock_cache();
25 mn10300_local_dcache_flush();
26 smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
27 smp_unlock_cache(flags);
28}
29
30/**
31 * mn10300_dcache_flush_page - Globally flush a page of data cache
32 * @start: The address of the page of memory to be flushed.
33 *
34 * Flush a range of addresses in the data cache on all CPUs covering
35 * the page that includes the given address.
36 */
37void mn10300_dcache_flush_page(unsigned long start)
38{
39 unsigned long flags;
40
41 start &= ~(PAGE_SIZE-1);
42
43 flags = smp_lock_cache();
44 mn10300_local_dcache_flush_page(start);
45 smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
46 smp_unlock_cache(flags);
47}
48
49/**
50 * mn10300_dcache_flush_range - Globally flush range of data cache
51 * @start: The start address of the region to be flushed.
52 * @end: The end address of the region to be flushed.
53 *
54 * Flush a range of addresses in the data cache on all CPUs, between start and
55 * end-1 inclusive.
56 */
57void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
58{
59 unsigned long flags;
60
61 flags = smp_lock_cache();
62 mn10300_local_dcache_flush_range(start, end);
63 smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
64 smp_unlock_cache(flags);
65}
66
67/**
68 * mn10300_dcache_flush_range2 - Globally flush range of data cache
69 * @start: The start address of the region to be flushed.
70 * @size: The size of the region to be flushed.
71 *
72 * Flush a range of addresses in the data cache on all CPUs, between start and
73 * start+size-1 inclusive.
74 */
75void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
76{
77 unsigned long flags;
78
79 flags = smp_lock_cache();
80 mn10300_local_dcache_flush_range2(start, size);
81 smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
82 smp_unlock_cache(flags);
83}
84
85/**
86 * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
87 *
88 * Flush and invalidate the data cache on all CPUs.
89 */
90void mn10300_dcache_flush_inv(void)
91{
92 unsigned long flags;
93
94 flags = smp_lock_cache();
95 mn10300_local_dcache_flush_inv();
96 smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
97 smp_unlock_cache(flags);
98}
99
100/**
101 * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
102 * cache
103 * @start: The address of the page of memory to be flushed and invalidated.
104 *
105 * Flush and invalidate a range of addresses in the data cache on all CPUs
106 * covering the page that includes the given address.
107 */
108void mn10300_dcache_flush_inv_page(unsigned long start)
109{
110 unsigned long flags;
111
112 start &= ~(PAGE_SIZE-1);
113
114 flags = smp_lock_cache();
115 mn10300_local_dcache_flush_inv_page(start);
116 smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
117 smp_unlock_cache(flags);
118}
119
120/**
121 * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
122 * cache
123 * @start: The start address of the region to be flushed and invalidated.
124 * @end: The end address of the region to be flushed and invalidated.
125 *
126 * Flush and invalidate a range of addresses in the data cache on all CPUs,
127 * between start and end-1 inclusive.
128 */
129void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
130{
131 unsigned long flags;
132
133 flags = smp_lock_cache();
134 mn10300_local_dcache_flush_inv_range(start, end);
135 smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
136 smp_unlock_cache(flags);
137}
138
139/**
140 * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
141 * cache
142 * @start: The start address of the region to be flushed and invalidated.
143 * @size: The size of the region to be flushed and invalidated.
144 *
145 * Flush and invalidate a range of addresses in the data cache on all CPUs,
146 * between start and start+size-1 inclusive.
147 */
148void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
149{
150 unsigned long flags;
151
152 flags = smp_lock_cache();
153 mn10300_local_dcache_flush_inv_range2(start, size);
154 smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
155 smp_unlock_cache(flags);
156}
diff --git a/arch/mn10300/mm/cache-smp-inv.c b/arch/mn10300/mm/cache-smp-inv.c
new file mode 100644
index 000000000000..ff1787358c8e
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-inv.c
@@ -0,0 +1,153 @@
1/* Functions for global i/dcache invalidation when caching in SMP
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/mm.h>
12#include <asm/cacheflush.h>
13#include "cache-smp.h"
14
15/**
16 * mn10300_icache_inv - Globally invalidate instruction cache
17 *
18 * Invalidate the instruction cache on all CPUs.
19 */
20void mn10300_icache_inv(void)
21{
22 unsigned long flags;
23
24 flags = smp_lock_cache();
25 mn10300_local_icache_inv();
26 smp_cache_call(SMP_ICACHE_INV, 0, 0);
27 smp_unlock_cache(flags);
28}
29
30/**
31 * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
32 * @start: The address of the page of memory to be invalidated.
33 *
34 * Invalidate a range of addresses in the instruction cache on all CPUs
35 * covering the page that includes the given address.
36 */
37void mn10300_icache_inv_page(unsigned long start)
38{
39 unsigned long flags;
40
41 start &= ~(PAGE_SIZE-1);
42
43 flags = smp_lock_cache();
44 mn10300_local_icache_inv_page(start);
45 smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
46 smp_unlock_cache(flags);
47}
48
49/**
50 * mn10300_icache_inv_range - Globally invalidate range of instruction cache
51 * @start: The start address of the region to be invalidated.
52 * @end: The end address of the region to be invalidated.
53 *
54 * Invalidate a range of addresses in the instruction cache on all CPUs,
55 * between start and end-1 inclusive.
56 */
57void mn10300_icache_inv_range(unsigned long start, unsigned long end)
58{
59 unsigned long flags;
60
61 flags = smp_lock_cache();
62 mn10300_local_icache_inv_range(start, end);
63 smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
64 smp_unlock_cache(flags);
65}
66
67/**
68 * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
69 * @start: The start address of the region to be invalidated.
70 * @size: The size of the region to be invalidated.
71 *
72 * Invalidate a range of addresses in the instruction cache on all CPUs,
73 * between start and start+size-1 inclusive.
74 */
75void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
76{
77 unsigned long flags;
78
79 flags = smp_lock_cache();
80 mn10300_local_icache_inv_range2(start, size);
81 smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
82 smp_unlock_cache(flags);
83}
84
85/**
86 * mn10300_dcache_inv - Globally invalidate data cache
87 *
88 * Invalidate the data cache on all CPUs.
89 */
90void mn10300_dcache_inv(void)
91{
92 unsigned long flags;
93
94 flags = smp_lock_cache();
95 mn10300_local_dcache_inv();
96 smp_cache_call(SMP_DCACHE_INV, 0, 0);
97 smp_unlock_cache(flags);
98}
99
100/**
101 * mn10300_dcache_inv_page - Globally invalidate a page of data cache
102 * @start: The address of the page of memory to be invalidated.
103 *
104 * Invalidate a range of addresses in the data cache on all CPUs covering the
105 * page that includes the given address.
106 */
107void mn10300_dcache_inv_page(unsigned long start)
108{
109 unsigned long flags;
110
111 start &= ~(PAGE_SIZE-1);
112
113 flags = smp_lock_cache();
114 mn10300_local_dcache_inv_page(start);
115 smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
116 smp_unlock_cache(flags);
117}
118
119/**
120 * mn10300_dcache_inv_range - Globally invalidate range of data cache
121 * @start: The start address of the region to be invalidated.
122 * @end: The end address of the region to be invalidated.
123 *
124 * Invalidate a range of addresses in the data cache on all CPUs, between start
125 * and end-1 inclusive.
126 */
127void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
128{
129 unsigned long flags;
130
131 flags = smp_lock_cache();
132 mn10300_local_dcache_inv_range(start, end);
133 smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
134 smp_unlock_cache(flags);
135}
136
137/**
138 * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
139 * @start: The start address of the region to be invalidated.
140 * @size: The size of the region to be invalidated.
141 *
142 * Invalidate a range of addresses in the data cache on all CPUs, between start
143 * and start+size-1 inclusive.
144 */
145void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
146{
147 unsigned long flags;
148
149 flags = smp_lock_cache();
150 mn10300_local_dcache_inv_range2(start, size);
151 smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
152 smp_unlock_cache(flags);
153}
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
new file mode 100644
index 000000000000..4a6e9a4b5b27
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.c
@@ -0,0 +1,105 @@
1/* SMP global caching code
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include <linux/threads.h>
15#include <linux/interrupt.h>
16#include <asm/page.h>
17#include <asm/pgtable.h>
18#include <asm/processor.h>
19#include <asm/cacheflush.h>
20#include <asm/io.h>
21#include <asm/uaccess.h>
22#include <asm/smp.h>
23#include "cache-smp.h"
24
25DEFINE_SPINLOCK(smp_cache_lock);
26static unsigned long smp_cache_mask;
27static unsigned long smp_cache_start;
28static unsigned long smp_cache_end;
29static cpumask_t smp_cache_ipi_map; /* Bitmask of cache IPI done CPUs */
30
31/**
32 * smp_cache_interrupt - Handle IPI request to flush caches.
33 *
34 * Handle a request delivered by IPI to flush the current CPU's
35 * caches. The parameters are stored in smp_cache_*.
36 */
37void smp_cache_interrupt(void)
38{
39 unsigned long opr_mask = smp_cache_mask;
40
41 switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
42 case SMP_DCACHE_NOP:
43 break;
44 case SMP_DCACHE_INV:
45 mn10300_local_dcache_inv();
46 break;
47 case SMP_DCACHE_INV_RANGE:
48 mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
49 break;
50 case SMP_DCACHE_FLUSH:
51 mn10300_local_dcache_flush();
52 break;
53 case SMP_DCACHE_FLUSH_RANGE:
54 mn10300_local_dcache_flush_range(smp_cache_start,
55 smp_cache_end);
56 break;
57 case SMP_DCACHE_FLUSH_INV:
58 mn10300_local_dcache_flush_inv();
59 break;
60 case SMP_DCACHE_FLUSH_INV_RANGE:
61 mn10300_local_dcache_flush_inv_range(smp_cache_start,
62 smp_cache_end);
63 break;
64 }
65
66 switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
67 case SMP_ICACHE_NOP:
68 break;
69 case SMP_ICACHE_INV:
70 mn10300_local_icache_inv();
71 break;
72 case SMP_ICACHE_INV_RANGE:
73 mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
74 break;
75 }
76
77 cpu_clear(smp_processor_id(), smp_cache_ipi_map);
78}
79
80/**
81 * smp_cache_call - Issue an IPI to request the other CPUs flush caches
82 * @opr_mask: Cache operation flags
83 * @start: Start address of request
84 * @end: End address of request
85 *
86 * Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt()
87 * above on those other CPUs and then waits for them to finish.
88 *
89 * The caller must hold smp_cache_lock.
90 */
91void smp_cache_call(unsigned long opr_mask,
92 unsigned long start, unsigned long end)
93{
94 smp_cache_mask = opr_mask;
95 smp_cache_start = start;
96 smp_cache_end = end;
97 smp_cache_ipi_map = cpu_online_map;
98 cpu_clear(smp_processor_id(), smp_cache_ipi_map);
99
100 send_IPI_allbutself(FLUSH_CACHE_IPI);
101
102 while (!cpus_empty(smp_cache_ipi_map))
103 /* nothing. lockup detection does not belong here */
104 mb();
105}
diff --git a/arch/mn10300/mm/cache-smp.h b/arch/mn10300/mm/cache-smp.h
new file mode 100644
index 000000000000..cb52892aa66a
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.h
@@ -0,0 +1,69 @@
1/* SMP caching definitions
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12
13/*
14 * Operation requests for smp_cache_call().
15 *
16 * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
17 */
18enum smp_icache_ops {
19 SMP_ICACHE_NOP = 0x0000,
20 SMP_ICACHE_INV = 0x0001,
21 SMP_ICACHE_INV_RANGE = 0x0002,
22};
23#define SMP_ICACHE_OP_MASK 0x0003
24
25enum smp_dcache_ops {
26 SMP_DCACHE_NOP = 0x0000,
27 SMP_DCACHE_INV = 0x0004,
28 SMP_DCACHE_INV_RANGE = 0x0008,
29 SMP_DCACHE_FLUSH = 0x000c,
30 SMP_DCACHE_FLUSH_RANGE = 0x0010,
31 SMP_DCACHE_FLUSH_INV = 0x0014,
32 SMP_DCACHE_FLUSH_INV_RANGE = 0x0018,
33};
34#define SMP_DCACHE_OP_MASK 0x001c
35
36#define SMP_IDCACHE_INV_FLUSH (SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
37#define SMP_IDCACHE_INV_FLUSH_RANGE (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
38
39/*
40 * cache-smp.c
41 */
42#ifdef CONFIG_SMP
43extern spinlock_t smp_cache_lock;
44
45extern void smp_cache_call(unsigned long opr_mask,
46 unsigned long addr, unsigned long end);
47
48static inline unsigned long smp_lock_cache(void)
49 __acquires(&smp_cache_lock)
50{
51 unsigned long flags;
52 spin_lock_irqsave(&smp_cache_lock, flags);
53 return flags;
54}
55
56static inline void smp_unlock_cache(unsigned long flags)
57 __releases(&smp_cache_lock)
58{
59 spin_unlock_irqrestore(&smp_cache_lock, flags);
60}
61
62#else
63static inline unsigned long smp_lock_cache(void) { return 0; }
64static inline void smp_unlock_cache(unsigned long flags) {}
65static inline void smp_cache_call(unsigned long opr_mask,
66 unsigned long addr, unsigned long end)
67{
68}
69#endif /* CONFIG_SMP */
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index bc35826f1357..0a1f0aa92ebc 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -18,8 +18,13 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <asm/smp.h>
22#include "cache-smp.h"
21 23
22EXPORT_SYMBOL(mn10300_icache_inv); 24EXPORT_SYMBOL(mn10300_icache_inv);
25EXPORT_SYMBOL(mn10300_icache_inv_range);
26EXPORT_SYMBOL(mn10300_icache_inv_range2);
27EXPORT_SYMBOL(mn10300_icache_inv_page);
23EXPORT_SYMBOL(mn10300_dcache_inv); 28EXPORT_SYMBOL(mn10300_dcache_inv);
24EXPORT_SYMBOL(mn10300_dcache_inv_range); 29EXPORT_SYMBOL(mn10300_dcache_inv_range);
25EXPORT_SYMBOL(mn10300_dcache_inv_range2); 30EXPORT_SYMBOL(mn10300_dcache_inv_range2);