aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/sgi-gru/grumain.c44
1 files changed, 30 insertions, 14 deletions
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index ade0925eab0e..f8538bbd0bfa 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -53,12 +53,16 @@ struct device *grudev = &gru_device;
53 */ 53 */
54int gru_cpu_fault_map_id(void) 54int gru_cpu_fault_map_id(void)
55{ 55{
56#ifdef CONFIG_IA64
57 return uv_blade_processor_id() % GRU_NUM_TFM;
58#else
56 int cpu = smp_processor_id(); 59 int cpu = smp_processor_id();
57 int id, core; 60 int id, core;
58 61
59 core = uv_cpu_core_number(cpu); 62 core = uv_cpu_core_number(cpu);
60 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu); 63 id = core + UV_MAX_INT_CORES * uv_cpu_socket_number(cpu);
61 return id; 64 return id;
65#endif
62} 66}
63 67
64/*--------- ASID Management ------------------------------------------- 68/*--------- ASID Management -------------------------------------------
@@ -699,15 +703,34 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
699} 703}
700 704
701/* 705/*
706 * Check if a GRU context is allowed to use a specific chiplet. By default
707 * a context is assigned to any blade-local chiplet. However, users can
708 * override this.
709 * Returns 1 if assignment allowed, 0 otherwise
710 */
711static int gru_check_chiplet_assignment(struct gru_state *gru,
712 struct gru_thread_state *gts)
713{
714 int blade_id;
715 int chiplet_id;
716
717 blade_id = gts->ts_user_blade_id;
718 if (blade_id < 0)
719 blade_id = uv_numa_blade_id();
720
721 chiplet_id = gts->ts_user_chiplet_id;
722 return gru->gs_blade_id == blade_id &&
723 (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id);
724}
725
726/*
702 * Unload the gru context if it is not assigned to the correct blade or 727 * Unload the gru context if it is not assigned to the correct blade or
703 * chiplet. Misassignment can occur if the process migrates to a different 728 * chiplet. Misassignment can occur if the process migrates to a different
704 * blade or if the user changes the selected blade/chiplet. 729 * blade or if the user changes the selected blade/chiplet.
705 * Return 0 if context correct placed, otherwise 1
706 */ 730 */
707void gru_check_context_placement(struct gru_thread_state *gts) 731void gru_check_context_placement(struct gru_thread_state *gts)
708{ 732{
709 struct gru_state *gru; 733 struct gru_state *gru;
710 int blade_id, chiplet_id;
711 734
712 /* 735 /*
713 * If the current task is the context owner, verify that the 736 * If the current task is the context owner, verify that the
@@ -718,13 +741,7 @@ void gru_check_context_placement(struct gru_thread_state *gts)
718 if (!gru || gts->ts_tgid_owner != current->tgid) 741 if (!gru || gts->ts_tgid_owner != current->tgid)
719 return; 742 return;
720 743
721 blade_id = gts->ts_user_blade_id; 744 if (!gru_check_chiplet_assignment(gru, gts)) {
722 if (blade_id < 0)
723 blade_id = uv_numa_blade_id();
724
725 chiplet_id = gts->ts_user_chiplet_id;
726 if (gru->gs_blade_id != blade_id ||
727 (chiplet_id >= 0 && chiplet_id != gru->gs_chiplet_id)) {
728 STAT(check_context_unload); 745 STAT(check_context_unload);
729 gru_unload_context(gts, 1); 746 gru_unload_context(gts, 1);
730 } else if (gru_retarget_intr(gts)) { 747 } else if (gru_retarget_intr(gts)) {
@@ -768,9 +785,9 @@ void gru_steal_context(struct gru_thread_state *gts)
768 struct gru_state *gru, *gru0; 785 struct gru_state *gru, *gru0;
769 struct gru_thread_state *ngts = NULL; 786 struct gru_thread_state *ngts = NULL;
770 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 787 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
771 int blade_id = gts->ts_user_blade_id; 788 int blade_id;
772 int chiplet_id = gts->ts_user_chiplet_id;
773 789
790 blade_id = gts->ts_user_blade_id;
774 if (blade_id < 0) 791 if (blade_id < 0)
775 blade_id = uv_numa_blade_id(); 792 blade_id = uv_numa_blade_id();
776 cbr = gts->ts_cbr_au_count; 793 cbr = gts->ts_cbr_au_count;
@@ -788,7 +805,7 @@ void gru_steal_context(struct gru_thread_state *gts)
788 ctxnum0 = ctxnum; 805 ctxnum0 = ctxnum;
789 gru0 = gru; 806 gru0 = gru;
790 while (1) { 807 while (1) {
791 if (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id) { 808 if (gru_check_chiplet_assignment(gru, gts)) {
792 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 809 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
793 break; 810 break;
794 spin_lock(&gru->gs_lock); 811 spin_lock(&gru->gs_lock);
@@ -853,7 +870,6 @@ struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
853 struct gru_state *gru, *grux; 870 struct gru_state *gru, *grux;
854 int i, max_active_contexts; 871 int i, max_active_contexts;
855 int blade_id = gts->ts_user_blade_id; 872 int blade_id = gts->ts_user_blade_id;
856 int chiplet_id = gts->ts_user_chiplet_id;
857 873
858 if (blade_id < 0) 874 if (blade_id < 0)
859 blade_id = uv_numa_blade_id(); 875 blade_id = uv_numa_blade_id();
@@ -861,7 +877,7 @@ again:
861 gru = NULL; 877 gru = NULL;
862 max_active_contexts = GRU_NUM_CCH; 878 max_active_contexts = GRU_NUM_CCH;
863 for_each_gru_on_blade(grux, blade_id, i) { 879 for_each_gru_on_blade(grux, blade_id, i) {
864 if (chiplet_id >= 0 && chiplet_id != grux->gs_chiplet_id) 880 if (!gru_check_chiplet_assignment(grux, gts))
865 continue; 881 continue;
866 if (check_gru_resources(grux, gts->ts_cbr_au_count, 882 if (check_gru_resources(grux, gts->ts_cbr_au_count,
867 gts->ts_dsr_au_count, 883 gts->ts_dsr_au_count,