diff options
Diffstat (limited to 'drivers/misc/sgi-gru/grumain.c')
-rw-r--r-- | drivers/misc/sgi-gru/grumain.c | 798 |
1 files changed, 798 insertions, 0 deletions
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c new file mode 100644 index 000000000000..aef6822cb80e --- /dev/null +++ b/drivers/misc/sgi-gru/grumain.c | |||
@@ -0,0 +1,798 @@ | |||
1 | /* | ||
2 | * SN Platform GRU Driver | ||
3 | * | ||
4 | * DRIVER TABLE MANAGER + GRU CONTEXT LOAD/UNLOAD | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/device.h> | ||
19 | #include <linux/list.h> | ||
20 | #include <asm/uv/uv_hub.h> | ||
21 | #include "gru.h" | ||
22 | #include "grutables.h" | ||
23 | #include "gruhandles.h" | ||
24 | |||
25 | unsigned long options __read_mostly; | ||
26 | |||
27 | static struct device_driver gru_driver = { | ||
28 | .name = "gru" | ||
29 | }; | ||
30 | |||
31 | static struct device gru_device = { | ||
32 | .bus_id = {0}, | ||
33 | .driver = &gru_driver, | ||
34 | }; | ||
35 | |||
36 | struct device *grudev = &gru_device; | ||
37 | |||
38 | /* | ||
39 | * Select a gru fault map to be used by the current cpu. Note that | ||
40 | * multiple cpus may be using the same map. | ||
41 | * ZZZ should "shift" be used?? Depends on HT cpu numbering | ||
42 | * ZZZ should be inline but did not work on emulator | ||
43 | */ | ||
44 | int gru_cpu_fault_map_id(void) | ||
45 | { | ||
46 | return uv_blade_processor_id() % GRU_NUM_TFM; | ||
47 | } | ||
48 | |||
49 | /*--------- ASID Management ------------------------------------------- | ||
50 | * | ||
51 | * Initially, assign asids sequentially from MIN_ASID .. MAX_ASID. | ||
52 | * Once MAX is reached, flush the TLB & start over. However, | ||
53 | * some asids may still be in use. There won't be many (percentage wise) still | ||
54 | * in use. Search active contexts & determine the value of the first | ||
55 | * asid in use ("x"s below). Set "limit" to this value. | ||
56 | * This defines a block of assignable asids. | ||
57 | * | ||
58 | * When "limit" is reached, search forward from limit+1 and determine the | ||
59 | * next block of assignable asids. | ||
60 | * | ||
61 | * Repeat until MAX_ASID is reached, then start over again. | ||
62 | * | ||
63 | * Each time MAX_ASID is reached, increment the asid generation. Since | ||
64 | * the search for in-use asids only checks contexts with GRUs currently | ||
65 | * assigned, asids in some contexts will be missed. Prior to loading | ||
66 | * a context, the asid generation of the GTS asid is rechecked. If it | ||
67 | * doesn't match the current generation, a new asid will be assigned. | ||
68 | * | ||
69 | * 0---------------x------------x---------------------x----| | ||
70 | * ^-next ^-limit ^-MAX_ASID | ||
71 | * | ||
72 | * All asid manipulation & context loading/unloading is protected by the | ||
73 | * gs_lock. | ||
74 | */ | ||
75 | |||
76 | /* Hit the asid limit. Start over */ | ||
77 | static int gru_wrap_asid(struct gru_state *gru) | ||
78 | { | ||
79 | gru_dbg(grudev, "gru %p\n", gru); | ||
80 | STAT(asid_wrap); | ||
81 | gru->gs_asid_gen++; | ||
82 | gru_flush_all_tlb(gru); | ||
83 | return MIN_ASID; | ||
84 | } | ||
85 | |||
86 | /* Find the next chunk of unused asids */ | ||
87 | static int gru_reset_asid_limit(struct gru_state *gru, int asid) | ||
88 | { | ||
89 | int i, gid, inuse_asid, limit; | ||
90 | |||
91 | gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); | ||
92 | STAT(asid_next); | ||
93 | limit = MAX_ASID; | ||
94 | if (asid >= limit) | ||
95 | asid = gru_wrap_asid(gru); | ||
96 | gid = gru->gs_gid; | ||
97 | again: | ||
98 | for (i = 0; i < GRU_NUM_CCH; i++) { | ||
99 | if (!gru->gs_gts[i]) | ||
100 | continue; | ||
101 | inuse_asid = gru->gs_gts[i]->ts_gms->ms_asids[gid].mt_asid; | ||
102 | gru_dbg(grudev, "gru %p, inuse_asid 0x%x, cxtnum %d, gts %p\n", | ||
103 | gru, inuse_asid, i, gru->gs_gts[i]); | ||
104 | if (inuse_asid == asid) { | ||
105 | asid += ASID_INC; | ||
106 | if (asid >= limit) { | ||
107 | /* | ||
108 | * empty range: reset the range limit and | ||
109 | * start over | ||
110 | */ | ||
111 | limit = MAX_ASID; | ||
112 | if (asid >= MAX_ASID) | ||
113 | asid = gru_wrap_asid(gru); | ||
114 | goto again; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | if ((inuse_asid > asid) && (inuse_asid < limit)) | ||
119 | limit = inuse_asid; | ||
120 | } | ||
121 | gru->gs_asid_limit = limit; | ||
122 | gru->gs_asid = asid; | ||
123 | gru_dbg(grudev, "gru %p, new asid 0x%x, new_limit 0x%x\n", gru, asid, | ||
124 | limit); | ||
125 | return asid; | ||
126 | } | ||
127 | |||
128 | /* Assign a new ASID to a thread context. */ | ||
129 | static int gru_assign_asid(struct gru_state *gru) | ||
130 | { | ||
131 | int asid; | ||
132 | |||
133 | spin_lock(&gru->gs_asid_lock); | ||
134 | gru->gs_asid += ASID_INC; | ||
135 | asid = gru->gs_asid; | ||
136 | if (asid >= gru->gs_asid_limit) | ||
137 | asid = gru_reset_asid_limit(gru, asid); | ||
138 | spin_unlock(&gru->gs_asid_lock); | ||
139 | |||
140 | gru_dbg(grudev, "gru %p, asid 0x%x\n", gru, asid); | ||
141 | return asid; | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Clear n bits in a word. Return a word indicating the bits that were cleared. | ||
146 | * Optionally, build an array of chars that contain the bit numbers allocated. | ||
147 | */ | ||
148 | static unsigned long reserve_resources(unsigned long *p, int n, int mmax, | ||
149 | char *idx) | ||
150 | { | ||
151 | unsigned long bits = 0; | ||
152 | int i; | ||
153 | |||
154 | do { | ||
155 | i = find_first_bit(p, mmax); | ||
156 | if (i == mmax) | ||
157 | BUG(); | ||
158 | __clear_bit(i, p); | ||
159 | __set_bit(i, &bits); | ||
160 | if (idx) | ||
161 | *idx++ = i; | ||
162 | } while (--n); | ||
163 | return bits; | ||
164 | } | ||
165 | |||
166 | unsigned long reserve_gru_cb_resources(struct gru_state *gru, int cbr_au_count, | ||
167 | char *cbmap) | ||
168 | { | ||
169 | return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, | ||
170 | cbmap); | ||
171 | } | ||
172 | |||
173 | unsigned long reserve_gru_ds_resources(struct gru_state *gru, int dsr_au_count, | ||
174 | char *dsmap) | ||
175 | { | ||
176 | return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, | ||
177 | dsmap); | ||
178 | } | ||
179 | |||
180 | static void reserve_gru_resources(struct gru_state *gru, | ||
181 | struct gru_thread_state *gts) | ||
182 | { | ||
183 | gru->gs_active_contexts++; | ||
184 | gts->ts_cbr_map = | ||
185 | reserve_gru_cb_resources(gru, gts->ts_cbr_au_count, | ||
186 | gts->ts_cbr_idx); | ||
187 | gts->ts_dsr_map = | ||
188 | reserve_gru_ds_resources(gru, gts->ts_dsr_au_count, NULL); | ||
189 | } | ||
190 | |||
191 | static void free_gru_resources(struct gru_state *gru, | ||
192 | struct gru_thread_state *gts) | ||
193 | { | ||
194 | gru->gs_active_contexts--; | ||
195 | gru->gs_cbr_map |= gts->ts_cbr_map; | ||
196 | gru->gs_dsr_map |= gts->ts_dsr_map; | ||
197 | } | ||
198 | |||
199 | /* | ||
200 | * Check if a GRU has sufficient free resources to satisfy an allocation | ||
201 | * request. Note: GRU locks may or may not be held when this is called. If | ||
202 | * not held, recheck after acquiring the appropriate locks. | ||
203 | * | ||
204 | * Returns 1 if sufficient resources, 0 if not | ||
205 | */ | ||
206 | static int check_gru_resources(struct gru_state *gru, int cbr_au_count, | ||
207 | int dsr_au_count, int max_active_contexts) | ||
208 | { | ||
209 | return hweight64(gru->gs_cbr_map) >= cbr_au_count | ||
210 | && hweight64(gru->gs_dsr_map) >= dsr_au_count | ||
211 | && gru->gs_active_contexts < max_active_contexts; | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * TLB manangment requires tracking all GRU chiplets that have loaded a GSEG | ||
216 | * context. | ||
217 | */ | ||
218 | static int gru_load_mm_tracker(struct gru_state *gru, struct gru_mm_struct *gms, | ||
219 | int ctxnum) | ||
220 | { | ||
221 | struct gru_mm_tracker *asids = &gms->ms_asids[gru->gs_gid]; | ||
222 | unsigned short ctxbitmap = (1 << ctxnum); | ||
223 | int asid; | ||
224 | |||
225 | spin_lock(&gms->ms_asid_lock); | ||
226 | asid = asids->mt_asid; | ||
227 | |||
228 | if (asid == 0 || asids->mt_asid_gen != gru->gs_asid_gen) { | ||
229 | asid = gru_assign_asid(gru); | ||
230 | asids->mt_asid = asid; | ||
231 | asids->mt_asid_gen = gru->gs_asid_gen; | ||
232 | STAT(asid_new); | ||
233 | } else { | ||
234 | STAT(asid_reuse); | ||
235 | } | ||
236 | |||
237 | BUG_ON(asids->mt_ctxbitmap & ctxbitmap); | ||
238 | asids->mt_ctxbitmap |= ctxbitmap; | ||
239 | if (!test_bit(gru->gs_gid, gms->ms_asidmap)) | ||
240 | __set_bit(gru->gs_gid, gms->ms_asidmap); | ||
241 | spin_unlock(&gms->ms_asid_lock); | ||
242 | |||
243 | gru_dbg(grudev, | ||
244 | "gru %x, gms %p, ctxnum 0x%d, asid 0x%x, asidmap 0x%lx\n", | ||
245 | gru->gs_gid, gms, ctxnum, asid, gms->ms_asidmap[0]); | ||
246 | return asid; | ||
247 | } | ||
248 | |||
249 | static void gru_unload_mm_tracker(struct gru_state *gru, | ||
250 | struct gru_mm_struct *gms, int ctxnum) | ||
251 | { | ||
252 | struct gru_mm_tracker *asids; | ||
253 | unsigned short ctxbitmap; | ||
254 | |||
255 | asids = &gms->ms_asids[gru->gs_gid]; | ||
256 | ctxbitmap = (1 << ctxnum); | ||
257 | spin_lock(&gms->ms_asid_lock); | ||
258 | BUG_ON((asids->mt_ctxbitmap & ctxbitmap) != ctxbitmap); | ||
259 | asids->mt_ctxbitmap ^= ctxbitmap; | ||
260 | gru_dbg(grudev, "gru %x, gms %p, ctxnum 0x%d, asidmap 0x%lx\n", | ||
261 | gru->gs_gid, gms, ctxnum, gms->ms_asidmap[0]); | ||
262 | spin_unlock(&gms->ms_asid_lock); | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * Decrement the reference count on a GTS structure. Free the structure | ||
267 | * if the reference count goes to zero. | ||
268 | */ | ||
269 | void gts_drop(struct gru_thread_state *gts) | ||
270 | { | ||
271 | if (gts && atomic_dec_return(>s->ts_refcnt) == 0) { | ||
272 | gru_drop_mmu_notifier(gts->ts_gms); | ||
273 | kfree(gts); | ||
274 | STAT(gts_free); | ||
275 | } | ||
276 | } | ||
277 | |||
278 | /* | ||
279 | * Locate the GTS structure for the current thread. | ||
280 | */ | ||
281 | static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data | ||
282 | *vdata, int tsid) | ||
283 | { | ||
284 | struct gru_thread_state *gts; | ||
285 | |||
286 | list_for_each_entry(gts, &vdata->vd_head, ts_next) | ||
287 | if (gts->ts_tsid == tsid) | ||
288 | return gts; | ||
289 | return NULL; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * Allocate a thread state structure. | ||
294 | */ | ||
295 | static struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | ||
296 | struct gru_vma_data *vdata, | ||
297 | int tsid) | ||
298 | { | ||
299 | struct gru_thread_state *gts; | ||
300 | int bytes; | ||
301 | |||
302 | bytes = DSR_BYTES(vdata->vd_dsr_au_count) + | ||
303 | CBR_BYTES(vdata->vd_cbr_au_count); | ||
304 | bytes += sizeof(struct gru_thread_state); | ||
305 | gts = kzalloc(bytes, GFP_KERNEL); | ||
306 | if (!gts) | ||
307 | return NULL; | ||
308 | |||
309 | STAT(gts_alloc); | ||
310 | atomic_set(>s->ts_refcnt, 1); | ||
311 | mutex_init(>s->ts_ctxlock); | ||
312 | gts->ts_cbr_au_count = vdata->vd_cbr_au_count; | ||
313 | gts->ts_dsr_au_count = vdata->vd_dsr_au_count; | ||
314 | gts->ts_user_options = vdata->vd_user_options; | ||
315 | gts->ts_tsid = tsid; | ||
316 | gts->ts_user_options = vdata->vd_user_options; | ||
317 | gts->ts_ctxnum = NULLCTX; | ||
318 | gts->ts_mm = current->mm; | ||
319 | gts->ts_vma = vma; | ||
320 | gts->ts_tlb_int_select = -1; | ||
321 | gts->ts_gms = gru_register_mmu_notifier(); | ||
322 | if (!gts->ts_gms) | ||
323 | goto err; | ||
324 | |||
325 | gru_dbg(grudev, "alloc vdata %p, new gts %p\n", vdata, gts); | ||
326 | return gts; | ||
327 | |||
328 | err: | ||
329 | gts_drop(gts); | ||
330 | return NULL; | ||
331 | } | ||
332 | |||
333 | /* | ||
334 | * Allocate a vma private data structure. | ||
335 | */ | ||
336 | struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, int tsid) | ||
337 | { | ||
338 | struct gru_vma_data *vdata = NULL; | ||
339 | |||
340 | vdata = kmalloc(sizeof(*vdata), GFP_KERNEL); | ||
341 | if (!vdata) | ||
342 | return NULL; | ||
343 | |||
344 | INIT_LIST_HEAD(&vdata->vd_head); | ||
345 | spin_lock_init(&vdata->vd_lock); | ||
346 | gru_dbg(grudev, "alloc vdata %p\n", vdata); | ||
347 | return vdata; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * Find the thread state structure for the current thread. | ||
352 | */ | ||
353 | struct gru_thread_state *gru_find_thread_state(struct vm_area_struct *vma, | ||
354 | int tsid) | ||
355 | { | ||
356 | struct gru_vma_data *vdata = vma->vm_private_data; | ||
357 | struct gru_thread_state *gts; | ||
358 | |||
359 | spin_lock(&vdata->vd_lock); | ||
360 | gts = gru_find_current_gts_nolock(vdata, tsid); | ||
361 | spin_unlock(&vdata->vd_lock); | ||
362 | gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); | ||
363 | return gts; | ||
364 | } | ||
365 | |||
366 | /* | ||
367 | * Allocate a new thread state for a GSEG. Note that races may allow | ||
368 | * another thread to race to create a gts. | ||
369 | */ | ||
370 | struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, | ||
371 | int tsid) | ||
372 | { | ||
373 | struct gru_vma_data *vdata = vma->vm_private_data; | ||
374 | struct gru_thread_state *gts, *ngts; | ||
375 | |||
376 | gts = gru_alloc_gts(vma, vdata, tsid); | ||
377 | if (!gts) | ||
378 | return NULL; | ||
379 | |||
380 | spin_lock(&vdata->vd_lock); | ||
381 | ngts = gru_find_current_gts_nolock(vdata, tsid); | ||
382 | if (ngts) { | ||
383 | gts_drop(gts); | ||
384 | gts = ngts; | ||
385 | STAT(gts_double_allocate); | ||
386 | } else { | ||
387 | list_add(>s->ts_next, &vdata->vd_head); | ||
388 | } | ||
389 | spin_unlock(&vdata->vd_lock); | ||
390 | gru_dbg(grudev, "vma %p, gts %p\n", vma, gts); | ||
391 | return gts; | ||
392 | } | ||
393 | |||
394 | /* | ||
395 | * Free the GRU context assigned to the thread state. | ||
396 | */ | ||
397 | static void gru_free_gru_context(struct gru_thread_state *gts) | ||
398 | { | ||
399 | struct gru_state *gru; | ||
400 | |||
401 | gru = gts->ts_gru; | ||
402 | gru_dbg(grudev, "gts %p, gru %p\n", gts, gru); | ||
403 | |||
404 | spin_lock(&gru->gs_lock); | ||
405 | gru->gs_gts[gts->ts_ctxnum] = NULL; | ||
406 | free_gru_resources(gru, gts); | ||
407 | BUG_ON(test_bit(gts->ts_ctxnum, &gru->gs_context_map) == 0); | ||
408 | __clear_bit(gts->ts_ctxnum, &gru->gs_context_map); | ||
409 | gts->ts_ctxnum = NULLCTX; | ||
410 | gts->ts_gru = NULL; | ||
411 | spin_unlock(&gru->gs_lock); | ||
412 | |||
413 | gts_drop(gts); | ||
414 | STAT(free_context); | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | * Prefetching cachelines help hardware performance. | ||
419 | */ | ||
420 | static void prefetch_data(void *p, int num, int stride) | ||
421 | { | ||
422 | while (num-- > 0) { | ||
423 | prefetchw(p); | ||
424 | p += stride; | ||
425 | } | ||
426 | } | ||
427 | |||
428 | static inline long gru_copy_handle(void *d, void *s) | ||
429 | { | ||
430 | memcpy(d, s, GRU_HANDLE_BYTES); | ||
431 | return GRU_HANDLE_BYTES; | ||
432 | } | ||
433 | |||
434 | /* rewrite in assembly & use lots of prefetch */ | ||
435 | static void gru_load_context_data(void *save, void *grubase, int ctxnum, | ||
436 | unsigned long cbrmap, unsigned long dsrmap) | ||
437 | { | ||
438 | void *gseg, *cb, *cbe; | ||
439 | unsigned long length; | ||
440 | int i, scr; | ||
441 | |||
442 | gseg = grubase + ctxnum * GRU_GSEG_STRIDE; | ||
443 | length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; | ||
444 | prefetch_data(gseg + GRU_DS_BASE, length / GRU_CACHE_LINE_BYTES, | ||
445 | GRU_CACHE_LINE_BYTES); | ||
446 | |||
447 | cb = gseg + GRU_CB_BASE; | ||
448 | cbe = grubase + GRU_CBE_BASE; | ||
449 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { | ||
450 | prefetch_data(cb, 1, GRU_CACHE_LINE_BYTES); | ||
451 | prefetch_data(cbe + i * GRU_HANDLE_STRIDE, 1, | ||
452 | GRU_CACHE_LINE_BYTES); | ||
453 | cb += GRU_HANDLE_STRIDE; | ||
454 | } | ||
455 | |||
456 | cb = gseg + GRU_CB_BASE; | ||
457 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { | ||
458 | save += gru_copy_handle(cb, save); | ||
459 | save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save); | ||
460 | cb += GRU_HANDLE_STRIDE; | ||
461 | } | ||
462 | |||
463 | memcpy(gseg + GRU_DS_BASE, save, length); | ||
464 | } | ||
465 | |||
466 | static void gru_unload_context_data(void *save, void *grubase, int ctxnum, | ||
467 | unsigned long cbrmap, unsigned long dsrmap) | ||
468 | { | ||
469 | void *gseg, *cb, *cbe; | ||
470 | unsigned long length; | ||
471 | int i, scr; | ||
472 | |||
473 | gseg = grubase + ctxnum * GRU_GSEG_STRIDE; | ||
474 | |||
475 | cb = gseg + GRU_CB_BASE; | ||
476 | cbe = grubase + GRU_CBE_BASE; | ||
477 | for_each_cbr_in_allocation_map(i, &cbrmap, scr) { | ||
478 | save += gru_copy_handle(save, cb); | ||
479 | save += gru_copy_handle(save, cbe + i * GRU_HANDLE_STRIDE); | ||
480 | cb += GRU_HANDLE_STRIDE; | ||
481 | } | ||
482 | length = hweight64(dsrmap) * GRU_DSR_AU_BYTES; | ||
483 | memcpy(save, gseg + GRU_DS_BASE, length); | ||
484 | } | ||
485 | |||
486 | void gru_unload_context(struct gru_thread_state *gts, int savestate) | ||
487 | { | ||
488 | struct gru_state *gru = gts->ts_gru; | ||
489 | struct gru_context_configuration_handle *cch; | ||
490 | int ctxnum = gts->ts_ctxnum; | ||
491 | |||
492 | zap_vma_ptes(gts->ts_vma, UGRUADDR(gts), GRU_GSEG_PAGESIZE); | ||
493 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); | ||
494 | |||
495 | lock_cch_handle(cch); | ||
496 | if (cch_interrupt_sync(cch)) | ||
497 | BUG(); | ||
498 | gru_dbg(grudev, "gts %p\n", gts); | ||
499 | |||
500 | gru_unload_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); | ||
501 | if (savestate) | ||
502 | gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, | ||
503 | ctxnum, gts->ts_cbr_map, | ||
504 | gts->ts_dsr_map); | ||
505 | |||
506 | if (cch_deallocate(cch)) | ||
507 | BUG(); | ||
508 | gts->ts_force_unload = 0; /* ts_force_unload locked by CCH lock */ | ||
509 | unlock_cch_handle(cch); | ||
510 | |||
511 | gru_free_gru_context(gts); | ||
512 | STAT(unload_context); | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * Load a GRU context by copying it from the thread data structure in memory | ||
517 | * to the GRU. | ||
518 | */ | ||
519 | static void gru_load_context(struct gru_thread_state *gts) | ||
520 | { | ||
521 | struct gru_state *gru = gts->ts_gru; | ||
522 | struct gru_context_configuration_handle *cch; | ||
523 | int err, asid, ctxnum = gts->ts_ctxnum; | ||
524 | |||
525 | gru_dbg(grudev, "gts %p\n", gts); | ||
526 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); | ||
527 | |||
528 | lock_cch_handle(cch); | ||
529 | asid = gru_load_mm_tracker(gru, gts->ts_gms, gts->ts_ctxnum); | ||
530 | cch->tfm_fault_bit_enable = | ||
531 | (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL | ||
532 | || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); | ||
533 | cch->tlb_int_enable = (gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); | ||
534 | if (cch->tlb_int_enable) { | ||
535 | gts->ts_tlb_int_select = gru_cpu_fault_map_id(); | ||
536 | cch->tlb_int_select = gts->ts_tlb_int_select; | ||
537 | } | ||
538 | cch->tfm_done_bit_enable = 0; | ||
539 | err = cch_allocate(cch, asid, gts->ts_cbr_map, gts->ts_dsr_map); | ||
540 | if (err) { | ||
541 | gru_dbg(grudev, | ||
542 | "err %d: cch %p, gts %p, cbr 0x%lx, dsr 0x%lx\n", | ||
543 | err, cch, gts, gts->ts_cbr_map, gts->ts_dsr_map); | ||
544 | BUG(); | ||
545 | } | ||
546 | |||
547 | gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, | ||
548 | gts->ts_cbr_map, gts->ts_dsr_map); | ||
549 | |||
550 | if (cch_start(cch)) | ||
551 | BUG(); | ||
552 | unlock_cch_handle(cch); | ||
553 | |||
554 | STAT(load_context); | ||
555 | } | ||
556 | |||
557 | /* | ||
558 | * Update fields in an active CCH: | ||
559 | * - retarget interrupts on local blade | ||
560 | * - force a delayed context unload by clearing the CCH asids. This | ||
561 | * forces TLB misses for new GRU instructions. The context is unloaded | ||
562 | * when the next TLB miss occurs. | ||
563 | */ | ||
564 | static int gru_update_cch(struct gru_thread_state *gts, int int_select) | ||
565 | { | ||
566 | struct gru_context_configuration_handle *cch; | ||
567 | struct gru_state *gru = gts->ts_gru; | ||
568 | int i, ctxnum = gts->ts_ctxnum, ret = 0; | ||
569 | |||
570 | cch = get_cch(gru->gs_gru_base_vaddr, ctxnum); | ||
571 | |||
572 | lock_cch_handle(cch); | ||
573 | if (cch->state == CCHSTATE_ACTIVE) { | ||
574 | if (gru->gs_gts[gts->ts_ctxnum] != gts) | ||
575 | goto exit; | ||
576 | if (cch_interrupt(cch)) | ||
577 | BUG(); | ||
578 | if (int_select >= 0) { | ||
579 | gts->ts_tlb_int_select = int_select; | ||
580 | cch->tlb_int_select = int_select; | ||
581 | } else { | ||
582 | for (i = 0; i < 8; i++) | ||
583 | cch->asid[i] = 0; | ||
584 | cch->tfm_fault_bit_enable = 0; | ||
585 | cch->tlb_int_enable = 0; | ||
586 | gts->ts_force_unload = 1; | ||
587 | } | ||
588 | if (cch_start(cch)) | ||
589 | BUG(); | ||
590 | ret = 1; | ||
591 | } | ||
592 | exit: | ||
593 | unlock_cch_handle(cch); | ||
594 | return ret; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * Update CCH tlb interrupt select. Required when all the following is true: | ||
599 | * - task's GRU context is loaded into a GRU | ||
600 | * - task is using interrupt notification for TLB faults | ||
601 | * - task has migrated to a different cpu on the same blade where | ||
602 | * it was previously running. | ||
603 | */ | ||
604 | static int gru_retarget_intr(struct gru_thread_state *gts) | ||
605 | { | ||
606 | if (gts->ts_tlb_int_select < 0 | ||
607 | || gts->ts_tlb_int_select == gru_cpu_fault_map_id()) | ||
608 | return 0; | ||
609 | |||
610 | gru_dbg(grudev, "retarget from %d to %d\n", gts->ts_tlb_int_select, | ||
611 | gru_cpu_fault_map_id()); | ||
612 | return gru_update_cch(gts, gru_cpu_fault_map_id()); | ||
613 | } | ||
614 | |||
615 | |||
616 | /* | ||
617 | * Insufficient GRU resources available on the local blade. Steal a context from | ||
618 | * a process. This is a hack until a _real_ resource scheduler is written.... | ||
619 | */ | ||
620 | #define next_ctxnum(n) ((n) < GRU_NUM_CCH - 2 ? (n) + 1 : 0) | ||
621 | #define next_gru(b, g) (((g) < &(b)->bs_grus[GRU_CHIPLETS_PER_BLADE - 1]) ? \ | ||
622 | ((g)+1) : &(b)->bs_grus[0]) | ||
623 | |||
624 | static void gru_steal_context(struct gru_thread_state *gts) | ||
625 | { | ||
626 | struct gru_blade_state *blade; | ||
627 | struct gru_state *gru, *gru0; | ||
628 | struct gru_thread_state *ngts = NULL; | ||
629 | int ctxnum, ctxnum0, flag = 0, cbr, dsr; | ||
630 | |||
631 | cbr = gts->ts_cbr_au_count; | ||
632 | dsr = gts->ts_dsr_au_count; | ||
633 | |||
634 | preempt_disable(); | ||
635 | blade = gru_base[uv_numa_blade_id()]; | ||
636 | spin_lock(&blade->bs_lock); | ||
637 | |||
638 | ctxnum = next_ctxnum(blade->bs_lru_ctxnum); | ||
639 | gru = blade->bs_lru_gru; | ||
640 | if (ctxnum == 0) | ||
641 | gru = next_gru(blade, gru); | ||
642 | ctxnum0 = ctxnum; | ||
643 | gru0 = gru; | ||
644 | while (1) { | ||
645 | if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) | ||
646 | break; | ||
647 | spin_lock(&gru->gs_lock); | ||
648 | for (; ctxnum < GRU_NUM_CCH; ctxnum++) { | ||
649 | if (flag && gru == gru0 && ctxnum == ctxnum0) | ||
650 | break; | ||
651 | ngts = gru->gs_gts[ctxnum]; | ||
652 | /* | ||
653 | * We are grabbing locks out of order, so trylock is | ||
654 | * needed. GTSs are usually not locked, so the odds of | ||
655 | * success are high. If trylock fails, try to steal a | ||
656 | * different GSEG. | ||
657 | */ | ||
658 | if (ngts && mutex_trylock(&ngts->ts_ctxlock)) | ||
659 | break; | ||
660 | ngts = NULL; | ||
661 | flag = 1; | ||
662 | } | ||
663 | spin_unlock(&gru->gs_lock); | ||
664 | if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0)) | ||
665 | break; | ||
666 | ctxnum = 0; | ||
667 | gru = next_gru(blade, gru); | ||
668 | } | ||
669 | blade->bs_lru_gru = gru; | ||
670 | blade->bs_lru_ctxnum = ctxnum; | ||
671 | spin_unlock(&blade->bs_lock); | ||
672 | preempt_enable(); | ||
673 | |||
674 | if (ngts) { | ||
675 | STAT(steal_context); | ||
676 | ngts->ts_steal_jiffies = jiffies; | ||
677 | gru_unload_context(ngts, 1); | ||
678 | mutex_unlock(&ngts->ts_ctxlock); | ||
679 | } else { | ||
680 | STAT(steal_context_failed); | ||
681 | } | ||
682 | gru_dbg(grudev, | ||
683 | "stole gru %x, ctxnum %d from gts %p. Need cb %d, ds %d;" | ||
684 | " avail cb %ld, ds %ld\n", | ||
685 | gru->gs_gid, ctxnum, ngts, cbr, dsr, hweight64(gru->gs_cbr_map), | ||
686 | hweight64(gru->gs_dsr_map)); | ||
687 | } | ||
688 | |||
689 | /* | ||
690 | * Scan the GRUs on the local blade & assign a GRU context. | ||
691 | */ | ||
692 | static struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts) | ||
693 | { | ||
694 | struct gru_state *gru, *grux; | ||
695 | int i, max_active_contexts; | ||
696 | |||
697 | preempt_disable(); | ||
698 | |||
699 | again: | ||
700 | gru = NULL; | ||
701 | max_active_contexts = GRU_NUM_CCH; | ||
702 | for_each_gru_on_blade(grux, uv_numa_blade_id(), i) { | ||
703 | if (check_gru_resources(grux, gts->ts_cbr_au_count, | ||
704 | gts->ts_dsr_au_count, | ||
705 | max_active_contexts)) { | ||
706 | gru = grux; | ||
707 | max_active_contexts = grux->gs_active_contexts; | ||
708 | if (max_active_contexts == 0) | ||
709 | break; | ||
710 | } | ||
711 | } | ||
712 | |||
713 | if (gru) { | ||
714 | spin_lock(&gru->gs_lock); | ||
715 | if (!check_gru_resources(gru, gts->ts_cbr_au_count, | ||
716 | gts->ts_dsr_au_count, GRU_NUM_CCH)) { | ||
717 | spin_unlock(&gru->gs_lock); | ||
718 | goto again; | ||
719 | } | ||
720 | reserve_gru_resources(gru, gts); | ||
721 | gts->ts_gru = gru; | ||
722 | gts->ts_ctxnum = | ||
723 | find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH); | ||
724 | BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH); | ||
725 | atomic_inc(>s->ts_refcnt); | ||
726 | gru->gs_gts[gts->ts_ctxnum] = gts; | ||
727 | __set_bit(gts->ts_ctxnum, &gru->gs_context_map); | ||
728 | spin_unlock(&gru->gs_lock); | ||
729 | |||
730 | STAT(assign_context); | ||
731 | gru_dbg(grudev, | ||
732 | "gseg %p, gts %p, gru %x, ctx %d, cbr %d, dsr %d\n", | ||
733 | gseg_virtual_address(gts->ts_gru, gts->ts_ctxnum), gts, | ||
734 | gts->ts_gru->gs_gid, gts->ts_ctxnum, | ||
735 | gts->ts_cbr_au_count, gts->ts_dsr_au_count); | ||
736 | } else { | ||
737 | gru_dbg(grudev, "failed to allocate a GTS %s\n", ""); | ||
738 | STAT(assign_context_failed); | ||
739 | } | ||
740 | |||
741 | preempt_enable(); | ||
742 | return gru; | ||
743 | } | ||
744 | |||
745 | /* | ||
746 | * gru_nopage | ||
747 | * | ||
748 | * Map the user's GRU segment | ||
749 | */ | ||
750 | int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | ||
751 | { | ||
752 | struct gru_thread_state *gts; | ||
753 | unsigned long paddr, vaddr; | ||
754 | |||
755 | vaddr = (unsigned long)vmf->virtual_address; | ||
756 | gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", | ||
757 | vma, vaddr, GSEG_BASE(vaddr)); | ||
758 | STAT(nopfn); | ||
759 | |||
760 | gts = gru_find_thread_state(vma, TSID(vaddr, vma)); | ||
761 | if (!gts) | ||
762 | return VM_FAULT_SIGBUS; | ||
763 | |||
764 | again: | ||
765 | preempt_disable(); | ||
766 | mutex_lock(>s->ts_ctxlock); | ||
767 | if (gts->ts_gru) { | ||
768 | if (gts->ts_gru->gs_blade_id != uv_numa_blade_id()) { | ||
769 | STAT(migrated_nopfn_unload); | ||
770 | gru_unload_context(gts, 1); | ||
771 | } else { | ||
772 | if (gru_retarget_intr(gts)) | ||
773 | STAT(migrated_nopfn_retarget); | ||
774 | } | ||
775 | } | ||
776 | |||
777 | if (!gts->ts_gru) { | ||
778 | while (!gru_assign_gru_context(gts)) { | ||
779 | mutex_unlock(>s->ts_ctxlock); | ||
780 | preempt_enable(); | ||
781 | schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ | ||
782 | if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) | ||
783 | gru_steal_context(gts); | ||
784 | goto again; | ||
785 | } | ||
786 | gru_load_context(gts); | ||
787 | paddr = gseg_physical_address(gts->ts_gru, gts->ts_ctxnum); | ||
788 | remap_pfn_range(vma, vaddr & ~(GRU_GSEG_PAGESIZE - 1), | ||
789 | paddr >> PAGE_SHIFT, GRU_GSEG_PAGESIZE, | ||
790 | vma->vm_page_prot); | ||
791 | } | ||
792 | |||
793 | mutex_unlock(>s->ts_ctxlock); | ||
794 | preempt_enable(); | ||
795 | |||
796 | return VM_FAULT_NOPAGE; | ||
797 | } | ||
798 | |||