diff options
Diffstat (limited to 'arch/tile/mm/homecache.c')
-rw-r--r-- | arch/tile/mm/homecache.c | 445 |
1 files changed, 445 insertions, 0 deletions
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c new file mode 100644 index 000000000000..52feb77133ce --- /dev/null +++ b/arch/tile/mm/homecache.c | |||
@@ -0,0 +1,445 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation, version 2. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
11 | * NON INFRINGEMENT. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * This code maintains the "home" for each page in the system. | ||
15 | */ | ||
16 | |||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/mm.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/bootmem.h> | ||
22 | #include <linux/rmap.h> | ||
23 | #include <linux/pagemap.h> | ||
24 | #include <linux/mutex.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/sysctl.h> | ||
27 | #include <linux/pagevec.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/timex.h> | ||
30 | #include <linux/cache.h> | ||
31 | #include <linux/smp.h> | ||
32 | |||
33 | #include <asm/page.h> | ||
34 | #include <asm/sections.h> | ||
35 | #include <asm/tlbflush.h> | ||
36 | #include <asm/pgalloc.h> | ||
37 | #include <asm/homecache.h> | ||
38 | |||
39 | #include "migrate.h" | ||
40 | |||
41 | |||
42 | #if CHIP_HAS_COHERENT_LOCAL_CACHE() | ||
43 | |||
44 | /* | ||
45 | * The noallocl2 option suppresses all use of the L2 cache to cache | ||
46 | * locally from a remote home. There's no point in using it if we | ||
47 | * don't have coherent local caching, though. | ||
48 | */ | ||
49 | int __write_once noallocl2; | ||
50 | static int __init set_noallocl2(char *str) | ||
51 | { | ||
52 | noallocl2 = 1; | ||
53 | return 0; | ||
54 | } | ||
55 | early_param("noallocl2", set_noallocl2); | ||
56 | |||
57 | #else | ||
58 | |||
59 | #define noallocl2 0 | ||
60 | |||
61 | #endif | ||
62 | |||
63 | |||
64 | |||
65 | /* Provide no-op versions of these routines to keep flush_remote() cleaner. */ | ||
66 | #define mark_caches_evicted_start() 0 | ||
67 | #define mark_caches_evicted_finish(mask, timestamp) do {} while (0) | ||
68 | |||
69 | |||
70 | |||
71 | |||
72 | /* | ||
73 | * Update the irq_stat for cpus that we are going to interrupt | ||
74 | * with TLB or cache flushes. Also handle removing dataplane cpus | ||
75 | * from the TLB flush set, and setting dataplane_tlb_state instead. | ||
76 | */ | ||
77 | static void hv_flush_update(const struct cpumask *cache_cpumask, | ||
78 | struct cpumask *tlb_cpumask, | ||
79 | unsigned long tlb_va, unsigned long tlb_length, | ||
80 | HV_Remote_ASID *asids, int asidcount) | ||
81 | { | ||
82 | struct cpumask mask; | ||
83 | int i, cpu; | ||
84 | |||
85 | cpumask_clear(&mask); | ||
86 | if (cache_cpumask) | ||
87 | cpumask_or(&mask, &mask, cache_cpumask); | ||
88 | if (tlb_cpumask && tlb_length) { | ||
89 | cpumask_or(&mask, &mask, tlb_cpumask); | ||
90 | } | ||
91 | |||
92 | for (i = 0; i < asidcount; ++i) | ||
93 | cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask); | ||
94 | |||
95 | /* | ||
96 | * Don't bother to update atomically; losing a count | ||
97 | * here is not that critical. | ||
98 | */ | ||
99 | for_each_cpu(cpu, &mask) | ||
100 | ++per_cpu(irq_stat, cpu).irq_hv_flush_count; | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * This wrapper function around hv_flush_remote() does several things: | ||
105 | * | ||
106 | * - Provides a return value error-checking panic path, since | ||
107 | * there's never any good reason for hv_flush_remote() to fail. | ||
108 | * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally | ||
109 | * is the type that Linux wants to pass around anyway. | ||
110 | * - Centralizes the mark_caches_evicted() handling. | ||
111 | * - Canonicalizes that lengths of zero make cpumasks NULL. | ||
112 | * - Handles deferring TLB flushes for dataplane tiles. | ||
113 | * - Tracks remote interrupts in the per-cpu irq_cpustat_t. | ||
114 | * | ||
115 | * Note that we have to wait until the cache flush completes before | ||
116 | * updating the per-cpu last_cache_flush word, since otherwise another | ||
117 | * concurrent flush can race, conclude the flush has already | ||
118 | * completed, and start to use the page while it's still dirty | ||
119 | * remotely (running concurrently with the actual evict, presumably). | ||
120 | */ | ||
121 | void flush_remote(unsigned long cache_pfn, unsigned long cache_control, | ||
122 | const struct cpumask *cache_cpumask_orig, | ||
123 | HV_VirtAddr tlb_va, unsigned long tlb_length, | ||
124 | unsigned long tlb_pgsize, | ||
125 | const struct cpumask *tlb_cpumask_orig, | ||
126 | HV_Remote_ASID *asids, int asidcount) | ||
127 | { | ||
128 | int rc; | ||
129 | int timestamp = 0; /* happy compiler */ | ||
130 | struct cpumask cache_cpumask_copy, tlb_cpumask_copy; | ||
131 | struct cpumask *cache_cpumask, *tlb_cpumask; | ||
132 | HV_PhysAddr cache_pa; | ||
133 | char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5]; | ||
134 | |||
135 | mb(); /* provided just to simplify "magic hypervisor" mode */ | ||
136 | |||
137 | /* | ||
138 | * Canonicalize and copy the cpumasks. | ||
139 | */ | ||
140 | if (cache_cpumask_orig && cache_control) { | ||
141 | cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig); | ||
142 | cache_cpumask = &cache_cpumask_copy; | ||
143 | } else { | ||
144 | cpumask_clear(&cache_cpumask_copy); | ||
145 | cache_cpumask = NULL; | ||
146 | } | ||
147 | if (cache_cpumask == NULL) | ||
148 | cache_control = 0; | ||
149 | if (tlb_cpumask_orig && tlb_length) { | ||
150 | cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig); | ||
151 | tlb_cpumask = &tlb_cpumask_copy; | ||
152 | } else { | ||
153 | cpumask_clear(&tlb_cpumask_copy); | ||
154 | tlb_cpumask = NULL; | ||
155 | } | ||
156 | |||
157 | hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length, | ||
158 | asids, asidcount); | ||
159 | cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT; | ||
160 | if (cache_control & HV_FLUSH_EVICT_L2) | ||
161 | timestamp = mark_caches_evicted_start(); | ||
162 | rc = hv_flush_remote(cache_pa, cache_control, | ||
163 | cpumask_bits(cache_cpumask), | ||
164 | tlb_va, tlb_length, tlb_pgsize, | ||
165 | cpumask_bits(tlb_cpumask), | ||
166 | asids, asidcount); | ||
167 | if (cache_control & HV_FLUSH_EVICT_L2) | ||
168 | mark_caches_evicted_finish(cache_cpumask, timestamp); | ||
169 | if (rc == 0) | ||
170 | return; | ||
171 | cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); | ||
172 | cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); | ||
173 | |||
174 | printk("hv_flush_remote(%#llx, %#lx, %p [%s]," | ||
175 | " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", | ||
176 | cache_pa, cache_control, cache_cpumask, cache_buf, | ||
177 | (unsigned long)tlb_va, tlb_length, tlb_pgsize, | ||
178 | tlb_cpumask, tlb_buf, | ||
179 | asids, asidcount, rc); | ||
180 | if (asidcount > 0) { | ||
181 | int i; | ||
182 | printk(" asids:"); | ||
183 | for (i = 0; i < asidcount; ++i) | ||
184 | printk(" %d,%d,%d", | ||
185 | asids[i].x, asids[i].y, asids[i].asid); | ||
186 | printk("\n"); | ||
187 | } | ||
188 | panic("Unsafe to continue."); | ||
189 | } | ||
190 | |||
191 | void homecache_evict(const struct cpumask *mask) | ||
192 | { | ||
193 | flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); | ||
194 | } | ||
195 | |||
196 | /* Return a mask of the cpus whose caches currently own these pages. */ | ||
197 | static void homecache_mask(struct page *page, int pages, | ||
198 | struct cpumask *home_mask) | ||
199 | { | ||
200 | int i; | ||
201 | cpumask_clear(home_mask); | ||
202 | for (i = 0; i < pages; ++i) { | ||
203 | int home = page_home(&page[i]); | ||
204 | if (home == PAGE_HOME_IMMUTABLE || | ||
205 | home == PAGE_HOME_INCOHERENT) { | ||
206 | cpumask_copy(home_mask, cpu_possible_mask); | ||
207 | return; | ||
208 | } | ||
209 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
210 | if (home == PAGE_HOME_HASH) { | ||
211 | cpumask_or(home_mask, home_mask, &hash_for_home_map); | ||
212 | continue; | ||
213 | } | ||
214 | #endif | ||
215 | if (home == PAGE_HOME_UNCACHED) | ||
216 | continue; | ||
217 | BUG_ON(home < 0 || home >= NR_CPUS); | ||
218 | cpumask_set_cpu(home, home_mask); | ||
219 | } | ||
220 | } | ||
221 | |||
222 | /* | ||
223 | * Return the passed length, or zero if it's long enough that we | ||
224 | * believe we should evict the whole L2 cache. | ||
225 | */ | ||
226 | static unsigned long cache_flush_length(unsigned long length) | ||
227 | { | ||
228 | return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; | ||
229 | } | ||
230 | |||
231 | /* On the simulator, confirm lines have been evicted everywhere. */ | ||
232 | static void validate_lines_evicted(unsigned long pfn, size_t length) | ||
233 | { | ||
234 | sim_syscall(SIM_SYSCALL_VALIDATE_LINES_EVICTED, | ||
235 | (HV_PhysAddr)pfn << PAGE_SHIFT, length); | ||
236 | } | ||
237 | |||
238 | /* Flush a page out of whatever cache(s) it is in. */ | ||
239 | void homecache_flush_cache(struct page *page, int order) | ||
240 | { | ||
241 | int pages = 1 << order; | ||
242 | int length = cache_flush_length(pages * PAGE_SIZE); | ||
243 | unsigned long pfn = page_to_pfn(page); | ||
244 | struct cpumask home_mask; | ||
245 | |||
246 | homecache_mask(page, pages, &home_mask); | ||
247 | flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); | ||
248 | validate_lines_evicted(pfn, pages * PAGE_SIZE); | ||
249 | } | ||
250 | |||
251 | |||
252 | /* Report the home corresponding to a given PTE. */ | ||
253 | static int pte_to_home(pte_t pte) | ||
254 | { | ||
255 | if (hv_pte_get_nc(pte)) | ||
256 | return PAGE_HOME_IMMUTABLE; | ||
257 | switch (hv_pte_get_mode(pte)) { | ||
258 | case HV_PTE_MODE_CACHE_TILE_L3: | ||
259 | return get_remote_cache_cpu(pte); | ||
260 | case HV_PTE_MODE_CACHE_NO_L3: | ||
261 | return PAGE_HOME_INCOHERENT; | ||
262 | case HV_PTE_MODE_UNCACHED: | ||
263 | return PAGE_HOME_UNCACHED; | ||
264 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
265 | case HV_PTE_MODE_CACHE_HASH_L3: | ||
266 | return PAGE_HOME_HASH; | ||
267 | #endif | ||
268 | } | ||
269 | panic("Bad PTE %#llx\n", pte.val); | ||
270 | } | ||
271 | |||
272 | /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */ | ||
273 | pte_t pte_set_home(pte_t pte, int home) | ||
274 | { | ||
275 | /* Check for non-linear file mapping "PTEs" and pass them through. */ | ||
276 | if (pte_file(pte)) | ||
277 | return pte; | ||
278 | |||
279 | #if CHIP_HAS_MMIO() | ||
280 | /* Check for MMIO mappings and pass them through. */ | ||
281 | if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO) | ||
282 | return pte; | ||
283 | #endif | ||
284 | |||
285 | |||
286 | /* | ||
287 | * Only immutable pages get NC mappings. If we have a | ||
288 | * non-coherent PTE, but the underlying page is not | ||
289 | * immutable, it's likely the result of a forced | ||
290 | * caching setting running up against ptrace setting | ||
291 | * the page to be writable underneath. In this case, | ||
292 | * just keep the PTE coherent. | ||
293 | */ | ||
294 | if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) { | ||
295 | pte = hv_pte_clear_nc(pte); | ||
296 | printk("non-immutable page incoherently referenced: %#llx\n", | ||
297 | pte.val); | ||
298 | } | ||
299 | |||
300 | switch (home) { | ||
301 | |||
302 | case PAGE_HOME_UNCACHED: | ||
303 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); | ||
304 | break; | ||
305 | |||
306 | case PAGE_HOME_INCOHERENT: | ||
307 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); | ||
308 | break; | ||
309 | |||
310 | case PAGE_HOME_IMMUTABLE: | ||
311 | /* | ||
312 | * We could home this page anywhere, since it's immutable, | ||
313 | * but by default just home it to follow "hash_default". | ||
314 | */ | ||
315 | BUG_ON(hv_pte_get_writable(pte)); | ||
316 | if (pte_get_forcecache(pte)) { | ||
317 | /* Upgrade "force any cpu" to "No L3" for immutable. */ | ||
318 | if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3 | ||
319 | && pte_get_anyhome(pte)) { | ||
320 | pte = hv_pte_set_mode(pte, | ||
321 | HV_PTE_MODE_CACHE_NO_L3); | ||
322 | } | ||
323 | } else | ||
324 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
325 | if (hash_default) | ||
326 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); | ||
327 | else | ||
328 | #endif | ||
329 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); | ||
330 | pte = hv_pte_set_nc(pte); | ||
331 | break; | ||
332 | |||
333 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
334 | case PAGE_HOME_HASH: | ||
335 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); | ||
336 | break; | ||
337 | #endif | ||
338 | |||
339 | default: | ||
340 | BUG_ON(home < 0 || home >= NR_CPUS || | ||
341 | !cpu_is_valid_lotar(home)); | ||
342 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); | ||
343 | pte = set_remote_cache_cpu(pte, home); | ||
344 | break; | ||
345 | } | ||
346 | |||
347 | #if CHIP_HAS_NC_AND_NOALLOC_BITS() | ||
348 | if (noallocl2) | ||
349 | pte = hv_pte_set_no_alloc_l2(pte); | ||
350 | |||
351 | /* Simplify "no local and no l3" to "uncached" */ | ||
352 | if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) && | ||
353 | hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) { | ||
354 | pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); | ||
355 | } | ||
356 | #endif | ||
357 | |||
358 | /* Checking this case here gives a better panic than from the hv. */ | ||
359 | BUG_ON(hv_pte_get_mode(pte) == 0); | ||
360 | |||
361 | return pte; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * The routines in this section are the "static" versions of the normal | ||
366 | * dynamic homecaching routines; they just set the home cache | ||
367 | * of a kernel page once, and require a full-chip cache/TLB flush, | ||
368 | * so they're not suitable for anything but infrequent use. | ||
369 | */ | ||
370 | |||
371 | #if CHIP_HAS_CBOX_HOME_MAP() | ||
372 | static inline int initial_page_home(void) { return PAGE_HOME_HASH; } | ||
373 | #else | ||
374 | static inline int initial_page_home(void) { return 0; } | ||
375 | #endif | ||
376 | |||
377 | int page_home(struct page *page) | ||
378 | { | ||
379 | if (PageHighMem(page)) { | ||
380 | return initial_page_home(); | ||
381 | } else { | ||
382 | unsigned long kva = (unsigned long)page_address(page); | ||
383 | return pte_to_home(*virt_to_pte(NULL, kva)); | ||
384 | } | ||
385 | } | ||
386 | |||
387 | void homecache_change_page_home(struct page *page, int order, int home) | ||
388 | { | ||
389 | int i, pages = (1 << order); | ||
390 | unsigned long kva; | ||
391 | |||
392 | BUG_ON(PageHighMem(page)); | ||
393 | BUG_ON(page_count(page) > 1); | ||
394 | BUG_ON(page_mapcount(page) != 0); | ||
395 | kva = (unsigned long) page_address(page); | ||
396 | flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map, | ||
397 | kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask, | ||
398 | NULL, 0); | ||
399 | |||
400 | for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { | ||
401 | pte_t *ptep = virt_to_pte(NULL, kva); | ||
402 | pte_t pteval = *ptep; | ||
403 | BUG_ON(!pte_present(pteval) || pte_huge(pteval)); | ||
404 | *ptep = pte_set_home(pteval, home); | ||
405 | } | ||
406 | } | ||
407 | |||
408 | struct page *homecache_alloc_pages(gfp_t gfp_mask, | ||
409 | unsigned int order, int home) | ||
410 | { | ||
411 | struct page *page; | ||
412 | BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ | ||
413 | page = alloc_pages(gfp_mask, order); | ||
414 | if (page) | ||
415 | homecache_change_page_home(page, order, home); | ||
416 | return page; | ||
417 | } | ||
418 | |||
419 | struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, | ||
420 | unsigned int order, int home) | ||
421 | { | ||
422 | struct page *page; | ||
423 | BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ | ||
424 | page = alloc_pages_node(nid, gfp_mask, order); | ||
425 | if (page) | ||
426 | homecache_change_page_home(page, order, home); | ||
427 | return page; | ||
428 | } | ||
429 | |||
430 | void homecache_free_pages(unsigned long addr, unsigned int order) | ||
431 | { | ||
432 | struct page *page; | ||
433 | |||
434 | if (addr == 0) | ||
435 | return; | ||
436 | |||
437 | VM_BUG_ON(!virt_addr_valid((void *)addr)); | ||
438 | page = virt_to_page((void *)addr); | ||
439 | if (put_page_testzero(page)) { | ||
440 | int pages = (1 << order); | ||
441 | homecache_change_page_home(page, order, initial_page_home()); | ||
442 | while (pages--) | ||
443 | __free_page(page++); | ||
444 | } | ||
445 | } | ||