aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-06-17 19:28:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 16:04:00 -0400
commit940229b9c0dcd9b6e1d64d0d26eba00238ddae98 (patch)
tree62df332cc772d692af3bce50de8b9210dafead3d
parent17b49a67a6a59f0e9f3c22e67ddb602410e8e182 (diff)
gru: check context state on reload
Check whether the gru state being loaded into a gru is from a new context or a previously unloaded context. If new, simply zero out the hardware context; if unloaded and valid, reload the old state. This change is primarily for reloading kernel contexts where the previous is not required to be saved. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/misc/sgi-gru/grumain.c32
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
2 files changed, 25 insertions, 9 deletions
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index 4e6e8c3554f0..afc4c473c794 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -307,11 +307,12 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma,
307 307
308 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count); 308 bytes = DSR_BYTES(dsr_au_count) + CBR_BYTES(cbr_au_count);
309 bytes += sizeof(struct gru_thread_state); 309 bytes += sizeof(struct gru_thread_state);
310 gts = kzalloc(bytes, GFP_KERNEL); 310 gts = kmalloc(bytes, GFP_KERNEL);
311 if (!gts) 311 if (!gts)
312 return NULL; 312 return NULL;
313 313
314 STAT(gts_alloc); 314 STAT(gts_alloc);
315 memset(gts, 0, sizeof(struct gru_thread_state)); /* zero out header */
315 atomic_set(&gts->ts_refcnt, 1); 316 atomic_set(&gts->ts_refcnt, 1);
316 mutex_init(&gts->ts_ctxlock); 317 mutex_init(&gts->ts_ctxlock);
317 gts->ts_cbr_au_count = cbr_au_count; 318 gts->ts_cbr_au_count = cbr_au_count;
@@ -458,7 +459,8 @@ static void gru_prefetch_context(void *gseg, void *cb, void *cbe,
458} 459}
459 460
460static void gru_load_context_data(void *save, void *grubase, int ctxnum, 461static void gru_load_context_data(void *save, void *grubase, int ctxnum,
461 unsigned long cbrmap, unsigned long dsrmap) 462 unsigned long cbrmap, unsigned long dsrmap,
463 int data_valid)
462{ 464{
463 void *gseg, *cb, *cbe; 465 void *gseg, *cb, *cbe;
464 unsigned long length; 466 unsigned long length;
@@ -471,12 +473,22 @@ static void gru_load_context_data(void *save, void *grubase, int ctxnum,
471 gru_prefetch_context(gseg, cb, cbe, cbrmap, length); 473 gru_prefetch_context(gseg, cb, cbe, cbrmap, length);
472 474
473 for_each_cbr_in_allocation_map(i, &cbrmap, scr) { 475 for_each_cbr_in_allocation_map(i, &cbrmap, scr) {
474 save += gru_copy_handle(cb, save); 476 if (data_valid) {
475 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE, save); 477 save += gru_copy_handle(cb, save);
478 save += gru_copy_handle(cbe + i * GRU_HANDLE_STRIDE,
479 save);
480 } else {
481 memset(cb, 0, GRU_CACHE_LINE_BYTES);
482 memset(cbe + i * GRU_HANDLE_STRIDE, 0,
483 GRU_CACHE_LINE_BYTES);
484 }
476 cb += GRU_HANDLE_STRIDE; 485 cb += GRU_HANDLE_STRIDE;
477 } 486 }
478 487
479 memcpy(gseg + GRU_DS_BASE, save, length); 488 if (data_valid)
489 memcpy(gseg + GRU_DS_BASE, save, length);
490 else
491 memset(gseg + GRU_DS_BASE, 0, length);
480} 492}
481 493
482static void gru_unload_context_data(void *save, void *grubase, int ctxnum, 494static void gru_unload_context_data(void *save, void *grubase, int ctxnum,
@@ -517,10 +529,12 @@ void gru_unload_context(struct gru_thread_state *gts, int savestate)
517 529
518 if (!is_kernel_context(gts)) 530 if (!is_kernel_context(gts))
519 gru_unload_mm_tracker(gru, gts); 531 gru_unload_mm_tracker(gru, gts);
520 if (savestate) 532 if (savestate) {
521 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, 533 gru_unload_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr,
522 ctxnum, gts->ts_cbr_map, 534 ctxnum, gts->ts_cbr_map,
523 gts->ts_dsr_map); 535 gts->ts_dsr_map);
536 gts->ts_data_valid = 1;
537 }
524 538
525 if (cch_deallocate(cch)) 539 if (cch_deallocate(cch))
526 BUG(); 540 BUG();
@@ -576,7 +590,7 @@ void gru_load_context(struct gru_thread_state *gts)
576 } 590 }
577 591
578 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum, 592 gru_load_context_data(gts->ts_gdata, gru->gs_gru_base_vaddr, ctxnum,
579 gts->ts_cbr_map, gts->ts_dsr_map); 593 gts->ts_cbr_map, gts->ts_dsr_map, gts->ts_data_valid);
580 594
581 if (cch_start(cch)) 595 if (cch_start(cch))
582 BUG(); 596 BUG();
@@ -611,8 +625,8 @@ int gru_update_cch(struct gru_thread_state *gts, int force_unload)
611 gts->ts_tlb_int_select = gru_cpu_fault_map_id(); 625 gts->ts_tlb_int_select = gru_cpu_fault_map_id();
612 cch->tlb_int_select = gru_cpu_fault_map_id(); 626 cch->tlb_int_select = gru_cpu_fault_map_id();
613 cch->tfm_fault_bit_enable = 627 cch->tfm_fault_bit_enable =
614 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL 628 (gts->ts_user_options == GRU_OPT_MISS_FMM_POLL
615 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR); 629 || gts->ts_user_options == GRU_OPT_MISS_FMM_INTR);
616 } else { 630 } else {
617 for (i = 0; i < 8; i++) 631 for (i = 0; i < 8; i++)
618 cch->asid[i] = 0; 632 cch->asid[i] = 0;
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 1c85fdcf5d37..5f8f3bda2fa9 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -385,6 +385,8 @@ struct gru_thread_state {
385 after migration */ 385 after migration */
386 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each 386 char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each
387 allocated CB */ 387 allocated CB */
388 int ts_data_valid; /* Indicates if ts_gdata has
389 valid data */
388 unsigned long ts_gdata[0]; /* save area for GRU data (CB, 390 unsigned long ts_gdata[0]; /* save area for GRU data (CB,
389 DS, CBE) */ 391 DS, CBE) */
390}; 392};