aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/mm/homecache.c
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2011-02-28 15:48:39 -0500
committerChris Metcalf <cmetcalf@tilera.com>2011-03-01 16:21:06 -0500
commit63b7ca6b04427aea9075d6f5f5f15b82e115bce4 (patch)
tree97a72ec3d243a46475e880b2c5703a167165f961 /arch/tile/mm/homecache.c
parent3cebbafd28e6f91677f3becffcdf9150b74a4e0c (diff)
arch/tile: enhance existing finv_buffer_remote() routine
It now takes an additional argument so it can be used to flush-and-invalidate pages that are cached using hash-for-home as well those that are cached with coherence point on a single cpu. This allows it to be used more widely for changing the coherence point of arbitrary pages when necessary. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile/mm/homecache.c')
-rw-r--r--arch/tile/mm/homecache.c36
1 files changed, 31 insertions, 5 deletions
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index d78df3a6ee15..f344f4fc7342 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -179,23 +179,46 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
179 panic("Unsafe to continue."); 179 panic("Unsafe to continue.");
180} 180}
181 181
182void flush_remote_page(struct page *page, int order)
183{
184 int i, pages = (1 << order);
185 for (i = 0; i < pages; ++i, ++page) {
186 void *p = kmap_atomic(page);
187 int hfh = 0;
188 int home = page_home(page);
189#if CHIP_HAS_CBOX_HOME_MAP()
190 if (home == PAGE_HOME_HASH)
191 hfh = 1;
192 else
193#endif
194 BUG_ON(home < 0 || home >= NR_CPUS);
195 finv_buffer_remote(p, PAGE_SIZE, hfh);
196 kunmap_atomic(p);
197 }
198}
199
182void homecache_evict(const struct cpumask *mask) 200void homecache_evict(const struct cpumask *mask)
183{ 201{
184 flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); 202 flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
185} 203}
186 204
187/* Return a mask of the cpus whose caches currently own these pages. */ 205/*
188static void homecache_mask(struct page *page, int pages, 206 * Return a mask of the cpus whose caches currently own these pages.
189 struct cpumask *home_mask) 207 * The return value is whether the pages are all coherently cached
208 * (i.e. none are immutable, incoherent, or uncached).
209 */
210static int homecache_mask(struct page *page, int pages,
211 struct cpumask *home_mask)
190{ 212{
191 int i; 213 int i;
214 int cached_coherently = 1;
192 cpumask_clear(home_mask); 215 cpumask_clear(home_mask);
193 for (i = 0; i < pages; ++i) { 216 for (i = 0; i < pages; ++i) {
194 int home = page_home(&page[i]); 217 int home = page_home(&page[i]);
195 if (home == PAGE_HOME_IMMUTABLE || 218 if (home == PAGE_HOME_IMMUTABLE ||
196 home == PAGE_HOME_INCOHERENT) { 219 home == PAGE_HOME_INCOHERENT) {
197 cpumask_copy(home_mask, cpu_possible_mask); 220 cpumask_copy(home_mask, cpu_possible_mask);
198 return; 221 return 0;
199 } 222 }
200#if CHIP_HAS_CBOX_HOME_MAP() 223#if CHIP_HAS_CBOX_HOME_MAP()
201 if (home == PAGE_HOME_HASH) { 224 if (home == PAGE_HOME_HASH) {
@@ -203,11 +226,14 @@ static void homecache_mask(struct page *page, int pages,
203 continue; 226 continue;
204 } 227 }
205#endif 228#endif
206 if (home == PAGE_HOME_UNCACHED) 229 if (home == PAGE_HOME_UNCACHED) {
230 cached_coherently = 0;
207 continue; 231 continue;
232 }
208 BUG_ON(home < 0 || home >= NR_CPUS); 233 BUG_ON(home < 0 || home >= NR_CPUS);
209 cpumask_set_cpu(home, home_mask); 234 cpumask_set_cpu(home, home_mask);
210 } 235 }
236 return cached_coherently;
211} 237}
212 238
213/* 239/*