diff options
Diffstat (limited to 'arch/tile/mm/homecache.c')
-rw-r--r-- | arch/tile/mm/homecache.c | 49 |
1 files changed, 35 insertions, 14 deletions
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c index fb3b4a55cec4..cbe6f4f9eca3 100644 --- a/arch/tile/mm/homecache.c +++ b/arch/tile/mm/homecache.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #include <asm/pgalloc.h> | 37 | #include <asm/pgalloc.h> |
38 | #include <asm/homecache.h> | 38 | #include <asm/homecache.h> |
39 | 39 | ||
40 | #include <arch/sim.h> | ||
41 | |||
40 | #include "migrate.h" | 42 | #include "migrate.h" |
41 | 43 | ||
42 | 44 | ||
@@ -177,23 +179,46 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | |||
177 | panic("Unsafe to continue."); | 179 | panic("Unsafe to continue."); |
178 | } | 180 | } |
179 | 181 | ||
182 | void flush_remote_page(struct page *page, int order) | ||
183 | { | ||
184 | int i, pages = (1 << order); | ||
185 | for (i = 0; i < pages; ++i, ++page) { | ||
186 | void *p = kmap_atomic(page); | ||
187 | int hfh = 0; | ||
188 | int home = page_home(page); | ||
189 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
190 | if (home == PAGE_HOME_HASH) | ||
191 | hfh = 1; | ||
192 | else | ||
193 | #endif | ||
194 | BUG_ON(home < 0 || home >= NR_CPUS); | ||
195 | finv_buffer_remote(p, PAGE_SIZE, hfh); | ||
196 | kunmap_atomic(p); | ||
197 | } | ||
198 | } | ||
199 | |||
180 | void homecache_evict(const struct cpumask *mask) | 200 | void homecache_evict(const struct cpumask *mask) |
181 | { | 201 | { |
182 | flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); | 202 | flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); |
183 | } | 203 | } |
184 | 204 | ||
185 | /* Return a mask of the cpus whose caches currently own these pages. */ | 205 | /* |
186 | static void homecache_mask(struct page *page, int pages, | 206 | * Return a mask of the cpus whose caches currently own these pages. |
187 | struct cpumask *home_mask) | 207 | * The return value is whether the pages are all coherently cached |
208 | * (i.e. none are immutable, incoherent, or uncached). | ||
209 | */ | ||
210 | static int homecache_mask(struct page *page, int pages, | ||
211 | struct cpumask *home_mask) | ||
188 | { | 212 | { |
189 | int i; | 213 | int i; |
214 | int cached_coherently = 1; | ||
190 | cpumask_clear(home_mask); | 215 | cpumask_clear(home_mask); |
191 | for (i = 0; i < pages; ++i) { | 216 | for (i = 0; i < pages; ++i) { |
192 | int home = page_home(&page[i]); | 217 | int home = page_home(&page[i]); |
193 | if (home == PAGE_HOME_IMMUTABLE || | 218 | if (home == PAGE_HOME_IMMUTABLE || |
194 | home == PAGE_HOME_INCOHERENT) { | 219 | home == PAGE_HOME_INCOHERENT) { |
195 | cpumask_copy(home_mask, cpu_possible_mask); | 220 | cpumask_copy(home_mask, cpu_possible_mask); |
196 | return; | 221 | return 0; |
197 | } | 222 | } |
198 | #if CHIP_HAS_CBOX_HOME_MAP() | 223 | #if CHIP_HAS_CBOX_HOME_MAP() |
199 | if (home == PAGE_HOME_HASH) { | 224 | if (home == PAGE_HOME_HASH) { |
@@ -201,11 +226,14 @@ static void homecache_mask(struct page *page, int pages, | |||
201 | continue; | 226 | continue; |
202 | } | 227 | } |
203 | #endif | 228 | #endif |
204 | if (home == PAGE_HOME_UNCACHED) | 229 | if (home == PAGE_HOME_UNCACHED) { |
230 | cached_coherently = 0; | ||
205 | continue; | 231 | continue; |
232 | } | ||
206 | BUG_ON(home < 0 || home >= NR_CPUS); | 233 | BUG_ON(home < 0 || home >= NR_CPUS); |
207 | cpumask_set_cpu(home, home_mask); | 234 | cpumask_set_cpu(home, home_mask); |
208 | } | 235 | } |
236 | return cached_coherently; | ||
209 | } | 237 | } |
210 | 238 | ||
211 | /* | 239 | /* |
@@ -217,13 +245,6 @@ static unsigned long cache_flush_length(unsigned long length) | |||
217 | return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; | 245 | return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; |
218 | } | 246 | } |
219 | 247 | ||
220 | /* On the simulator, confirm lines have been evicted everywhere. */ | ||
221 | static void validate_lines_evicted(unsigned long pfn, size_t length) | ||
222 | { | ||
223 | sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, | ||
224 | (HV_PhysAddr)pfn << PAGE_SHIFT, length); | ||
225 | } | ||
226 | |||
227 | /* Flush a page out of whatever cache(s) it is in. */ | 248 | /* Flush a page out of whatever cache(s) it is in. */ |
228 | void homecache_flush_cache(struct page *page, int order) | 249 | void homecache_flush_cache(struct page *page, int order) |
229 | { | 250 | { |
@@ -234,7 +255,7 @@ void homecache_flush_cache(struct page *page, int order) | |||
234 | 255 | ||
235 | homecache_mask(page, pages, &home_mask); | 256 | homecache_mask(page, pages, &home_mask); |
236 | flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); | 257 | flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); |
237 | validate_lines_evicted(pfn, pages * PAGE_SIZE); | 258 | sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE); |
238 | } | 259 | } |
239 | 260 | ||
240 | 261 | ||
@@ -391,7 +412,7 @@ void homecache_change_page_home(struct page *page, int order, int home) | |||
391 | pte_t *ptep = virt_to_pte(NULL, kva); | 412 | pte_t *ptep = virt_to_pte(NULL, kva); |
392 | pte_t pteval = *ptep; | 413 | pte_t pteval = *ptep; |
393 | BUG_ON(!pte_present(pteval) || pte_huge(pteval)); | 414 | BUG_ON(!pte_present(pteval) || pte_huge(pteval)); |
394 | *ptep = pte_set_home(pteval, home); | 415 | __set_pte(ptep, pte_set_home(pteval, home)); |
395 | } | 416 | } |
396 | } | 417 | } |
397 | 418 | ||