aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc
diff options
context:
space:
mode:
authorJack Steiner <steiner@sgi.com>2009-12-15 19:48:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-16 10:20:15 -0500
commit55484c45dbeca2eec7642932ec3f60f8a2d4bdbf (patch)
tree4927e959a656def901bb105605df4ab6977ab2d1 /drivers/misc
parent518e5cd4aae476042bdee511e0e00c8670c0df42 (diff)
gru: allow users to specify gru chiplet 2
Add support to the GRU driver to allow users to specify the blade & chiplet for allocation of GRU contexts. Add new statistics for context loading/unloading/retargeting. Also deleted a few GRU stats that were no longer being unused. Signed-off-by: Jack Steiner <steiner@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/sgi-gru/grufault.c13
-rw-r--r--drivers/misc/sgi-gru/grufile.c15
-rw-r--r--drivers/misc/sgi-gru/grukservices.c8
-rw-r--r--drivers/misc/sgi-gru/grumain.c128
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c9
-rw-r--r--drivers/misc/sgi-gru/grutables.h16
6 files changed, 109 insertions, 80 deletions
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index a69d119921ff..321ad809f345 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -546,17 +546,7 @@ int gru_handle_user_call_os(unsigned long cb)
546 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE) 546 if (ucbnum >= gts->ts_cbr_au_count * GRU_CBR_AU_SIZE)
547 goto exit; 547 goto exit;
548 548
549 /* 549 gru_check_context_placement(gts);
550 * If force_unload is set, the UPM TLB fault is phony. The task
551 * has migrated to another node and the GSEG must be moved. Just
552 * unload the context. The task will page fault and assign a new
553 * context.
554 */
555 if (gts->ts_tgid_owner == current->tgid && gts->ts_blade >= 0 &&
556 gts->ts_blade != uv_numa_blade_id()) {
557 STAT(call_os_offnode_reference);
558 gts->ts_force_unload = 1;
559 }
560 550
561 /* 551 /*
562 * CCH may contain stale data if ts_force_cch_reload is set. 552 * CCH may contain stale data if ts_force_cch_reload is set.
@@ -771,6 +761,7 @@ int gru_set_context_option(unsigned long arg)
771 } else { 761 } else {
772 gts->ts_user_blade_id = req.val1; 762 gts->ts_user_blade_id = req.val1;
773 gts->ts_user_chiplet_id = req.val0; 763 gts->ts_user_chiplet_id = req.val0;
764 gru_check_context_placement(gts);
774 } 765 }
775 break; 766 break;
776 case sco_gseg_owner: 767 case sco_gseg_owner:
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index 2e574fd5d3f7..0a6d2a5a01f3 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -232,23 +232,24 @@ static long gru_file_unlocked_ioctl(struct file *file, unsigned int req,
232 * system. 232 * system.
233 */ 233 */
234static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr, 234static void gru_init_chiplet(struct gru_state *gru, unsigned long paddr,
235 void *vaddr, int nid, int bid, int grunum) 235 void *vaddr, int blade_id, int chiplet_id)
236{ 236{
237 spin_lock_init(&gru->gs_lock); 237 spin_lock_init(&gru->gs_lock);
238 spin_lock_init(&gru->gs_asid_lock); 238 spin_lock_init(&gru->gs_asid_lock);
239 gru->gs_gru_base_paddr = paddr; 239 gru->gs_gru_base_paddr = paddr;
240 gru->gs_gru_base_vaddr = vaddr; 240 gru->gs_gru_base_vaddr = vaddr;
241 gru->gs_gid = bid * GRU_CHIPLETS_PER_BLADE + grunum; 241 gru->gs_gid = blade_id * GRU_CHIPLETS_PER_BLADE + chiplet_id;
242 gru->gs_blade = gru_base[bid]; 242 gru->gs_blade = gru_base[blade_id];
243 gru->gs_blade_id = bid; 243 gru->gs_blade_id = blade_id;
244 gru->gs_chiplet_id = chiplet_id;
244 gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1; 245 gru->gs_cbr_map = (GRU_CBR_AU == 64) ? ~0 : (1UL << GRU_CBR_AU) - 1;
245 gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1; 246 gru->gs_dsr_map = (1UL << GRU_DSR_AU) - 1;
246 gru->gs_asid_limit = MAX_ASID; 247 gru->gs_asid_limit = MAX_ASID;
247 gru_tgh_flush_init(gru); 248 gru_tgh_flush_init(gru);
248 if (gru->gs_gid >= gru_max_gids) 249 if (gru->gs_gid >= gru_max_gids)
249 gru_max_gids = gru->gs_gid + 1; 250 gru_max_gids = gru->gs_gid + 1;
250 gru_dbg(grudev, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n", 251 gru_dbg(grudev, "bid %d, gid %d, vaddr %p (0x%lx)\n",
251 bid, nid, gru->gs_gid, gru->gs_gru_base_vaddr, 252 blade_id, gru->gs_gid, gru->gs_gru_base_vaddr,
252 gru->gs_gru_base_paddr); 253 gru->gs_gru_base_paddr);
253} 254}
254 255
@@ -283,7 +284,7 @@ static int gru_init_tables(unsigned long gru_base_paddr, void *gru_base_vaddr)
283 chip++, gru++) { 284 chip++, gru++) {
284 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip); 285 paddr = gru_chiplet_paddr(gru_base_paddr, pnode, chip);
285 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip); 286 vaddr = gru_chiplet_vaddr(gru_base_vaddr, pnode, chip);
286 gru_init_chiplet(gru, paddr, vaddr, nid, bid, chip); 287 gru_init_chiplet(gru, paddr, vaddr, bid, chip);
287 n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE; 288 n = hweight64(gru->gs_cbr_map) * GRU_CBR_AU_SIZE;
288 cbrs = max(cbrs, n); 289 cbrs = max(cbrs, n);
289 n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES; 290 n = hweight64(gru->gs_dsr_map) * GRU_DSR_AU_BYTES;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 306855cc80fe..e0d4b53d1fc2 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -160,8 +160,10 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
160 up_read(&bs->bs_kgts_sema); 160 up_read(&bs->bs_kgts_sema);
161 down_write(&bs->bs_kgts_sema); 161 down_write(&bs->bs_kgts_sema);
162 162
163 if (!bs->bs_kgts) 163 if (!bs->bs_kgts) {
164 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0); 164 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
165 bs->bs_kgts->ts_user_blade_id = blade_id;
166 }
165 kgts = bs->bs_kgts; 167 kgts = bs->bs_kgts;
166 168
167 if (!kgts->ts_gru) { 169 if (!kgts->ts_gru) {
@@ -172,9 +174,9 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
172 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( 174 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
173 GRU_NUM_KERNEL_DSR_BYTES * ncpus + 175 GRU_NUM_KERNEL_DSR_BYTES * ncpus +
174 bs->bs_async_dsr_bytes); 176 bs->bs_async_dsr_bytes);
175 while (!gru_assign_gru_context(kgts, blade_id)) { 177 while (!gru_assign_gru_context(kgts)) {
176 msleep(1); 178 msleep(1);
177 gru_steal_context(kgts, blade_id); 179 gru_steal_context(kgts);
178 } 180 }
179 gru_load_context(kgts); 181 gru_load_context(kgts);
180 gru = bs->bs_kgts->ts_gru; 182 gru = bs->bs_kgts->ts_gru;
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c
index f449c2dbc1e3..54ad7544a9a0 100644
--- a/drivers/misc/sgi-gru/grumain.c
+++ b/drivers/misc/sgi-gru/grumain.c
@@ -684,6 +684,40 @@ static int gru_retarget_intr(struct gru_thread_state *gts)
684 return gru_update_cch(gts, 0); 684 return gru_update_cch(gts, 0);
685} 685}
686 686
687/*
688 * Unload the gru context if it is not assigned to the correct blade or
689 * chiplet. Misassignment can occur if the process migrates to a different
690 * blade or if the user changes the selected blade/chiplet.
691 * Return 0 if context correct placed, otherwise 1
692 */
693void gru_check_context_placement(struct gru_thread_state *gts)
694{
695 struct gru_state *gru;
696 int blade_id, chiplet_id;
697
698 /*
699 * If the current task is the context owner, verify that the
700 * context is correctly placed. This test is skipped for non-owner
701 * references. Pthread apps use non-owner references to the CBRs.
702 */
703 gru = gts->ts_gru;
704 if (!gru || gts->ts_tgid_owner != current->tgid)
705 return;
706
707 blade_id = gts->ts_user_blade_id;
708 if (blade_id < 0)
709 blade_id = uv_numa_blade_id();
710
711 chiplet_id = gts->ts_user_chiplet_id;
712 if (gru->gs_blade_id != blade_id ||
713 (chiplet_id >= 0 && chiplet_id != gru->gs_chiplet_id)) {
714 STAT(check_context_unload);
715 gru_unload_context(gts, 1);
716 } else if (gru_retarget_intr(gts)) {
717 STAT(check_context_retarget_intr);
718 }
719}
720
687 721
688/* 722/*
689 * Insufficient GRU resources available on the local blade. Steal a context from 723 * Insufficient GRU resources available on the local blade. Steal a context from
@@ -714,13 +748,17 @@ static void gts_stolen(struct gru_thread_state *gts,
714 } 748 }
715} 749}
716 750
717void gru_steal_context(struct gru_thread_state *gts, int blade_id) 751void gru_steal_context(struct gru_thread_state *gts)
718{ 752{
719 struct gru_blade_state *blade; 753 struct gru_blade_state *blade;
720 struct gru_state *gru, *gru0; 754 struct gru_state *gru, *gru0;
721 struct gru_thread_state *ngts = NULL; 755 struct gru_thread_state *ngts = NULL;
722 int ctxnum, ctxnum0, flag = 0, cbr, dsr; 756 int ctxnum, ctxnum0, flag = 0, cbr, dsr;
757 int blade_id = gts->ts_user_blade_id;
758 int chiplet_id = gts->ts_user_chiplet_id;
723 759
760 if (blade_id < 0)
761 blade_id = uv_numa_blade_id();
724 cbr = gts->ts_cbr_au_count; 762 cbr = gts->ts_cbr_au_count;
725 dsr = gts->ts_dsr_au_count; 763 dsr = gts->ts_dsr_au_count;
726 764
@@ -731,35 +769,39 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
731 gru = blade->bs_lru_gru; 769 gru = blade->bs_lru_gru;
732 if (ctxnum == 0) 770 if (ctxnum == 0)
733 gru = next_gru(blade, gru); 771 gru = next_gru(blade, gru);
772 blade->bs_lru_gru = gru;
773 blade->bs_lru_ctxnum = ctxnum;
734 ctxnum0 = ctxnum; 774 ctxnum0 = ctxnum;
735 gru0 = gru; 775 gru0 = gru;
736 while (1) { 776 while (1) {
737 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH)) 777 if (chiplet_id < 0 || chiplet_id == gru->gs_chiplet_id) {
738 break; 778 if (check_gru_resources(gru, cbr, dsr, GRU_NUM_CCH))
739 spin_lock(&gru->gs_lock);
740 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
741 if (flag && gru == gru0 && ctxnum == ctxnum0)
742 break; 779 break;
743 ngts = gru->gs_gts[ctxnum]; 780 spin_lock(&gru->gs_lock);
744 /* 781 for (; ctxnum < GRU_NUM_CCH; ctxnum++) {
745 * We are grabbing locks out of order, so trylock is 782 if (flag && gru == gru0 && ctxnum == ctxnum0)
746 * needed. GTSs are usually not locked, so the odds of 783 break;
747 * success are high. If trylock fails, try to steal a 784 ngts = gru->gs_gts[ctxnum];
748 * different GSEG. 785 /*
749 */ 786 * We are grabbing locks out of order, so trylock is
750 if (ngts && is_gts_stealable(ngts, blade)) 787 * needed. GTSs are usually not locked, so the odds of
788 * success are high. If trylock fails, try to steal a
789 * different GSEG.
790 */
791 if (ngts && is_gts_stealable(ngts, blade))
792 break;
793 ngts = NULL;
794 }
795 spin_unlock(&gru->gs_lock);
796 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
751 break; 797 break;
752 ngts = NULL;
753 flag = 1;
754 } 798 }
755 spin_unlock(&gru->gs_lock); 799 if (flag && gru == gru0)
756 if (ngts || (flag && gru == gru0 && ctxnum == ctxnum0))
757 break; 800 break;
801 flag = 1;
758 ctxnum = 0; 802 ctxnum = 0;
759 gru = next_gru(blade, gru); 803 gru = next_gru(blade, gru);
760 } 804 }
761 blade->bs_lru_gru = gru;
762 blade->bs_lru_ctxnum = ctxnum;
763 spin_unlock(&blade->bs_lock); 805 spin_unlock(&blade->bs_lock);
764 806
765 if (ngts) { 807 if (ngts) {
@@ -778,19 +820,35 @@ void gru_steal_context(struct gru_thread_state *gts, int blade_id)
778} 820}
779 821
780/* 822/*
823 * Assign a gru context.
824 */
825static int gru_assign_context_number(struct gru_state *gru)
826{
827 int ctxnum;
828
829 ctxnum = find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
830 __set_bit(ctxnum, &gru->gs_context_map);
831 return ctxnum;
832}
833
834/*
781 * Scan the GRUs on the local blade & assign a GRU context. 835 * Scan the GRUs on the local blade & assign a GRU context.
782 */ 836 */
783struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, 837struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts)
784 int blade)
785{ 838{
786 struct gru_state *gru, *grux; 839 struct gru_state *gru, *grux;
787 int i, max_active_contexts; 840 int i, max_active_contexts;
841 int blade_id = gts->ts_user_blade_id;
842 int chiplet_id = gts->ts_user_chiplet_id;
788 843
789 844 if (blade_id < 0)
845 blade_id = uv_numa_blade_id();
790again: 846again:
791 gru = NULL; 847 gru = NULL;
792 max_active_contexts = GRU_NUM_CCH; 848 max_active_contexts = GRU_NUM_CCH;
793 for_each_gru_on_blade(grux, blade, i) { 849 for_each_gru_on_blade(grux, blade_id, i) {
850 if (chiplet_id >= 0 && chiplet_id != grux->gs_chiplet_id)
851 continue;
794 if (check_gru_resources(grux, gts->ts_cbr_au_count, 852 if (check_gru_resources(grux, gts->ts_cbr_au_count,
795 gts->ts_dsr_au_count, 853 gts->ts_dsr_au_count,
796 max_active_contexts)) { 854 max_active_contexts)) {
@@ -811,12 +869,9 @@ again:
811 reserve_gru_resources(gru, gts); 869 reserve_gru_resources(gru, gts);
812 gts->ts_gru = gru; 870 gts->ts_gru = gru;
813 gts->ts_blade = gru->gs_blade_id; 871 gts->ts_blade = gru->gs_blade_id;
814 gts->ts_ctxnum = 872 gts->ts_ctxnum = gru_assign_context_number(gru);
815 find_first_zero_bit(&gru->gs_context_map, GRU_NUM_CCH);
816 BUG_ON(gts->ts_ctxnum == GRU_NUM_CCH);
817 atomic_inc(&gts->ts_refcnt); 873 atomic_inc(&gts->ts_refcnt);
818 gru->gs_gts[gts->ts_ctxnum] = gts; 874 gru->gs_gts[gts->ts_ctxnum] = gts;
819 __set_bit(gts->ts_ctxnum, &gru->gs_context_map);
820 spin_unlock(&gru->gs_lock); 875 spin_unlock(&gru->gs_lock);
821 876
822 STAT(assign_context); 877 STAT(assign_context);
@@ -844,7 +899,6 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
844{ 899{
845 struct gru_thread_state *gts; 900 struct gru_thread_state *gts;
846 unsigned long paddr, vaddr; 901 unsigned long paddr, vaddr;
847 int blade_id;
848 902
849 vaddr = (unsigned long)vmf->virtual_address; 903 vaddr = (unsigned long)vmf->virtual_address;
850 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n", 904 gru_dbg(grudev, "vma %p, vaddr 0x%lx (0x%lx)\n",
@@ -859,28 +913,18 @@ int gru_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
859again: 913again:
860 mutex_lock(&gts->ts_ctxlock); 914 mutex_lock(&gts->ts_ctxlock);
861 preempt_disable(); 915 preempt_disable();
862 blade_id = uv_numa_blade_id();
863 916
864 if (gts->ts_gru) { 917 gru_check_context_placement(gts);
865 if (gts->ts_gru->gs_blade_id != blade_id) {
866 STAT(migrated_nopfn_unload);
867 gru_unload_context(gts, 1);
868 } else {
869 if (gru_retarget_intr(gts))
870 STAT(migrated_nopfn_retarget);
871 }
872 }
873 918
874 if (!gts->ts_gru) { 919 if (!gts->ts_gru) {
875 STAT(load_user_context); 920 STAT(load_user_context);
876 if (!gru_assign_gru_context(gts, blade_id)) { 921 if (!gru_assign_gru_context(gts)) {
877 preempt_enable(); 922 preempt_enable();
878 mutex_unlock(&gts->ts_ctxlock); 923 mutex_unlock(&gts->ts_ctxlock);
879 set_current_state(TASK_INTERRUPTIBLE); 924 set_current_state(TASK_INTERRUPTIBLE);
880 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */ 925 schedule_timeout(GRU_ASSIGN_DELAY); /* true hack ZZZ */
881 blade_id = uv_numa_blade_id();
882 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies) 926 if (gts->ts_steal_jiffies + GRU_STEAL_DELAY < jiffies)
883 gru_steal_context(gts, blade_id); 927 gru_steal_context(gts);
884 goto again; 928 goto again;
885 } 929 }
886 gru_load_context(gts); 930 gru_load_context(gts);
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index cf6b3e3034d6..a8a2e760cca3 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -67,19 +67,14 @@ static int statistics_show(struct seq_file *s, void *p)
67 printstat(s, intr); 67 printstat(s, intr);
68 printstat(s, intr_mm_lock_failed); 68 printstat(s, intr_mm_lock_failed);
69 printstat(s, call_os); 69 printstat(s, call_os);
70 printstat(s, call_os_offnode_reference);
71 printstat(s, call_os_check_for_bug); 70 printstat(s, call_os_check_for_bug);
72 printstat(s, call_os_wait_queue); 71 printstat(s, call_os_wait_queue);
73 printstat(s, user_flush_tlb); 72 printstat(s, user_flush_tlb);
74 printstat(s, user_unload_context); 73 printstat(s, user_unload_context);
75 printstat(s, user_exception); 74 printstat(s, user_exception);
76 printstat(s, set_context_option); 75 printstat(s, set_context_option);
77 printstat(s, migrate_check); 76 printstat(s, check_context_retarget_intr);
78 printstat(s, migrated_retarget); 77 printstat(s, check_context_unload);
79 printstat(s, migrated_unload);
80 printstat(s, migrated_unload_delay);
81 printstat(s, migrated_nopfn_retarget);
82 printstat(s, migrated_nopfn_unload);
83 printstat(s, tlb_dropin); 78 printstat(s, tlb_dropin);
84 printstat(s, tlb_dropin_fail_no_asid); 79 printstat(s, tlb_dropin_fail_no_asid);
85 printstat(s, tlb_dropin_fail_upm); 80 printstat(s, tlb_dropin_fail_upm);
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 27131fb2253f..8a61a8599bac 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -192,19 +192,14 @@ struct gru_stats_s {
192 atomic_long_t intr; 192 atomic_long_t intr;
193 atomic_long_t intr_mm_lock_failed; 193 atomic_long_t intr_mm_lock_failed;
194 atomic_long_t call_os; 194 atomic_long_t call_os;
195 atomic_long_t call_os_offnode_reference;
196 atomic_long_t call_os_check_for_bug; 195 atomic_long_t call_os_check_for_bug;
197 atomic_long_t call_os_wait_queue; 196 atomic_long_t call_os_wait_queue;
198 atomic_long_t user_flush_tlb; 197 atomic_long_t user_flush_tlb;
199 atomic_long_t user_unload_context; 198 atomic_long_t user_unload_context;
200 atomic_long_t user_exception; 199 atomic_long_t user_exception;
201 atomic_long_t set_context_option; 200 atomic_long_t set_context_option;
202 atomic_long_t migrate_check; 201 atomic_long_t check_context_retarget_intr;
203 atomic_long_t migrated_retarget; 202 atomic_long_t check_context_unload;
204 atomic_long_t migrated_unload;
205 atomic_long_t migrated_unload_delay;
206 atomic_long_t migrated_nopfn_retarget;
207 atomic_long_t migrated_nopfn_unload;
208 atomic_long_t tlb_dropin; 203 atomic_long_t tlb_dropin;
209 atomic_long_t tlb_dropin_fail_no_asid; 204 atomic_long_t tlb_dropin_fail_no_asid;
210 atomic_long_t tlb_dropin_fail_upm; 205 atomic_long_t tlb_dropin_fail_upm;
@@ -425,6 +420,7 @@ struct gru_state {
425 gru segments (64) */ 420 gru segments (64) */
426 unsigned short gs_gid; /* unique GRU number */ 421 unsigned short gs_gid; /* unique GRU number */
427 unsigned short gs_blade_id; /* blade of GRU */ 422 unsigned short gs_blade_id; /* blade of GRU */
423 unsigned char gs_chiplet_id; /* blade chiplet of GRU */
428 unsigned char gs_tgh_local_shift; /* used to pick TGH for 424 unsigned char gs_tgh_local_shift; /* used to pick TGH for
429 local flush */ 425 local flush */
430 unsigned char gs_tgh_first_remote; /* starting TGH# for 426 unsigned char gs_tgh_first_remote; /* starting TGH# for
@@ -636,10 +632,9 @@ extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct
636 *vma, int tsid); 632 *vma, int tsid);
637extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct 633extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct
638 *vma, int tsid); 634 *vma, int tsid);
639extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts, 635extern struct gru_state *gru_assign_gru_context(struct gru_thread_state *gts);
640 int blade);
641extern void gru_load_context(struct gru_thread_state *gts); 636extern void gru_load_context(struct gru_thread_state *gts);
642extern void gru_steal_context(struct gru_thread_state *gts, int blade_id); 637extern void gru_steal_context(struct gru_thread_state *gts);
643extern void gru_unload_context(struct gru_thread_state *gts, int savestate); 638extern void gru_unload_context(struct gru_thread_state *gts, int savestate);
644extern int gru_update_cch(struct gru_thread_state *gts, int force_unload); 639extern int gru_update_cch(struct gru_thread_state *gts, int force_unload);
645extern void gts_drop(struct gru_thread_state *gts); 640extern void gts_drop(struct gru_thread_state *gts);
@@ -654,6 +649,7 @@ extern int gru_user_flush_tlb(unsigned long arg);
654extern int gru_user_unload_context(unsigned long arg); 649extern int gru_user_unload_context(unsigned long arg);
655extern int gru_get_exception_detail(unsigned long arg); 650extern int gru_get_exception_detail(unsigned long arg);
656extern int gru_set_context_option(unsigned long address); 651extern int gru_set_context_option(unsigned long address);
652extern void gru_check_context_placement(struct gru_thread_state *gts);
657extern int gru_cpu_fault_map_id(void); 653extern int gru_cpu_fault_map_id(void);
658extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); 654extern struct vm_area_struct *gru_find_vma(unsigned long vaddr);
659extern void gru_flush_all_tlb(struct gru_state *gru); 655extern void gru_flush_all_tlb(struct gru_state *gru);