diff options
author | Jack Steiner <steiner@sgi.com> | 2008-07-30 01:33:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:47 -0400 |
commit | 13d19498b0446cad2c394f9fbec8149b44a60c6e (patch) | |
tree | 4bb9820665d8a25d70c28cb48ff8d4edd74e0f6b /drivers | |
parent | 4c921d4d8aa74140597fd8736261837f73ca6e7a (diff) |
GRU Driver: driver internal header files
This patch contains header files internal to the GRU driver.
Signed-off-by: Jack Steiner <steiner@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/misc/sgi-gru/gru.h | 67 | ||||
-rw-r--r-- | drivers/misc/sgi-gru/grulib.h | 97 | ||||
-rw-r--r-- | drivers/misc/sgi-gru/grutables.h | 545 |
3 files changed, 709 insertions, 0 deletions
diff --git a/drivers/misc/sgi-gru/gru.h b/drivers/misc/sgi-gru/gru.h new file mode 100644 index 000000000000..40df7cb3f0a5 --- /dev/null +++ b/drivers/misc/sgi-gru/gru.h | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU Lesser General Public License as published by | ||
6 | * the Free Software Foundation; either version 2.1 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU Lesser General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU Lesser General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef __GRU_H__ | ||
20 | #define __GRU_H__ | ||
21 | |||
22 | /* | ||
23 | * GRU architectural definitions | ||
24 | */ | ||
25 | #define GRU_CACHE_LINE_BYTES 64 | ||
26 | #define GRU_HANDLE_STRIDE 256 | ||
27 | #define GRU_CB_BASE 0 | ||
28 | #define GRU_DS_BASE 0x20000 | ||
29 | |||
30 | /* | ||
31 | * Size used to map GRU GSeg | ||
32 | */ | ||
33 | #if defined CONFIG_IA64 | ||
34 | #define GRU_GSEG_PAGESIZE (256 * 1024UL) | ||
35 | #elif defined CONFIG_X86_64 | ||
36 | #define GRU_GSEG_PAGESIZE (256 * 1024UL) /* ZZZ 2MB ??? */ | ||
37 | #else | ||
38 | #error "Unsupported architecture" | ||
39 | #endif | ||
40 | |||
41 | /* | ||
42 | * Structure for obtaining GRU resource information | ||
43 | */ | ||
44 | struct gru_chiplet_info { | ||
45 | int node; | ||
46 | int chiplet; | ||
47 | int blade; | ||
48 | int total_dsr_bytes; | ||
49 | int total_cbr; | ||
50 | int total_user_dsr_bytes; | ||
51 | int total_user_cbr; | ||
52 | int free_user_dsr_bytes; | ||
53 | int free_user_cbr; | ||
54 | }; | ||
55 | |||
56 | /* Flags for GRU options on the gru_create_context() call */ | ||
57 | /* Select one of the follow 4 options to specify how TLB misses are handled */ | ||
58 | #define GRU_OPT_MISS_DEFAULT 0x0000 /* Use default mode */ | ||
59 | #define GRU_OPT_MISS_USER_POLL 0x0001 /* User will poll CB for faults */ | ||
60 | #define GRU_OPT_MISS_FMM_INTR 0x0002 /* Send interrupt to cpu to | ||
61 | handle fault */ | ||
62 | #define GRU_OPT_MISS_FMM_POLL 0x0003 /* Use system polling thread */ | ||
63 | #define GRU_OPT_MISS_MASK 0x0003 /* Mask for TLB MISS option */ | ||
64 | |||
65 | |||
66 | |||
67 | #endif /* __GRU_H__ */ | ||
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h new file mode 100644 index 000000000000..e56e196a6998 --- /dev/null +++ b/drivers/misc/sgi-gru/grulib.h | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU Lesser General Public License as published by | ||
6 | * the Free Software Foundation; either version 2.1 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU Lesser General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU Lesser General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #ifndef __GRULIB_H__ | ||
20 | #define __GRULIB_H__ | ||
21 | |||
22 | #define GRU_BASENAME "gru" | ||
23 | #define GRU_FULLNAME "/dev/gru" | ||
24 | #define GRU_IOCTL_NUM 'G' | ||
25 | |||
26 | /* | ||
27 | * Maximum number of GRU segments that a user can have open | ||
28 | * ZZZ temp - set high for testing. Revisit. | ||
29 | */ | ||
30 | #define GRU_MAX_OPEN_CONTEXTS 32 | ||
31 | |||
32 | /* Set Number of Request Blocks */ | ||
33 | #define GRU_CREATE_CONTEXT _IOWR(GRU_IOCTL_NUM, 1, void *) | ||
34 | |||
35 | /* Register task as using the slice */ | ||
36 | #define GRU_SET_TASK_SLICE _IOWR(GRU_IOCTL_NUM, 5, void *) | ||
37 | |||
38 | /* Fetch exception detail */ | ||
39 | #define GRU_USER_GET_EXCEPTION_DETAIL _IOWR(GRU_IOCTL_NUM, 6, void *) | ||
40 | |||
41 | /* For user call_os handling - normally a TLB fault */ | ||
42 | #define GRU_USER_CALL_OS _IOWR(GRU_IOCTL_NUM, 8, void *) | ||
43 | |||
44 | /* For user unload context */ | ||
45 | #define GRU_USER_UNLOAD_CONTEXT _IOWR(GRU_IOCTL_NUM, 9, void *) | ||
46 | |||
47 | /* For fetching GRU chiplet status */ | ||
48 | #define GRU_GET_CHIPLET_STATUS _IOWR(GRU_IOCTL_NUM, 10, void *) | ||
49 | |||
50 | /* For user TLB flushing (primarily for tests) */ | ||
51 | #define GRU_USER_FLUSH_TLB _IOWR(GRU_IOCTL_NUM, 50, void *) | ||
52 | |||
53 | /* Get some config options (primarily for tests & emulator) */ | ||
54 | #define GRU_GET_CONFIG_INFO _IOWR(GRU_IOCTL_NUM, 51, void *) | ||
55 | |||
56 | #define CONTEXT_WINDOW_BYTES(th) (GRU_GSEG_PAGESIZE * (th)) | ||
57 | #define THREAD_POINTER(p, th) (p + GRU_GSEG_PAGESIZE * (th)) | ||
58 | |||
59 | /* | ||
60 | * Structure used to pass TLB flush parameters to the driver | ||
61 | */ | ||
62 | struct gru_create_context_req { | ||
63 | unsigned long gseg; | ||
64 | unsigned int data_segment_bytes; | ||
65 | unsigned int control_blocks; | ||
66 | unsigned int maximum_thread_count; | ||
67 | unsigned int options; | ||
68 | }; | ||
69 | |||
70 | /* | ||
71 | * Structure used to pass unload context parameters to the driver | ||
72 | */ | ||
73 | struct gru_unload_context_req { | ||
74 | unsigned long gseg; | ||
75 | }; | ||
76 | |||
77 | /* | ||
78 | * Structure used to pass TLB flush parameters to the driver | ||
79 | */ | ||
80 | struct gru_flush_tlb_req { | ||
81 | unsigned long gseg; | ||
82 | unsigned long vaddr; | ||
83 | size_t len; | ||
84 | }; | ||
85 | |||
86 | /* | ||
87 | * GRU configuration info (temp - for testing) | ||
88 | */ | ||
89 | struct gru_config_info { | ||
90 | int cpus; | ||
91 | int blades; | ||
92 | int nodes; | ||
93 | int chiplets; | ||
94 | int fill[16]; | ||
95 | }; | ||
96 | |||
97 | #endif /* __GRULIB_H__ */ | ||
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h new file mode 100644 index 000000000000..f97d84640129 --- /dev/null +++ b/drivers/misc/sgi-gru/grutables.h | |||
@@ -0,0 +1,545 @@ | |||
1 | /* | ||
2 | * SN Platform GRU Driver | ||
3 | * | ||
4 | * GRU DRIVER TABLES, MACROS, externs, etc | ||
5 | * | ||
6 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | */ | ||
22 | |||
23 | #ifndef __GRUTABLES_H__ | ||
24 | #define __GRUTABLES_H__ | ||
25 | |||
26 | /* | ||
27 | * Tables: | ||
28 | * | ||
29 | * VDATA-VMA Data - Holds a few parameters. Head of linked list of | ||
30 | * GTS tables for threads using the GSEG | ||
31 | * GTS - Gru Thread State - contains info for managing a GSEG context. A | ||
32 | * GTS is allocated for each thread accessing a | ||
33 | * GSEG. | ||
34 | * GTD - GRU Thread Data - contains shadow copy of GRU data when GSEG is | ||
35 | * not loaded into a GRU | ||
36 | * GMS - GRU Memory Struct - Used to manage TLB shootdowns. Tracks GRUs | ||
37 | * where a GSEG has been loaded. Similar to | ||
38 | * an mm_struct but for GRU. | ||
39 | * | ||
40 | * GS - GRU State - Used to manage the state of a GRU chiplet | ||
41 | * BS - Blade State - Used to manage state of all GRU chiplets | ||
42 | * on a blade | ||
43 | * | ||
44 | * | ||
45 | * Normal task tables for task using GRU. | ||
46 | * - 2 threads in process | ||
47 | * - 2 GSEGs open in process | ||
48 | * - GSEG1 is being used by both threads | ||
49 | * - GSEG2 is used only by thread 2 | ||
50 | * | ||
51 | * task -->| | ||
52 | * task ---+---> mm ->------ (notifier) -------+-> gms | ||
53 | * | | | ||
54 | * |--> vma -> vdata ---> gts--->| GSEG1 (thread1) | ||
55 | * | | | | ||
56 | * | +-> gts--->| GSEG1 (thread2) | ||
57 | * | | | ||
58 | * |--> vma -> vdata ---> gts--->| GSEG2 (thread2) | ||
59 | * . | ||
60 | * . | ||
61 | * | ||
62 | * GSEGs are marked DONTCOPY on fork | ||
63 | * | ||
64 | * At open | ||
65 | * file.private_data -> NULL | ||
66 | * | ||
67 | * At mmap, | ||
68 | * vma -> vdata | ||
69 | * | ||
70 | * After gseg reference | ||
71 | * vma -> vdata ->gts | ||
72 | * | ||
73 | * After fork | ||
74 | * parent | ||
75 | * vma -> vdata -> gts | ||
76 | * child | ||
77 | * (vma is not copied) | ||
78 | * | ||
79 | */ | ||
80 | |||
81 | #include <linux/rmap.h> | ||
82 | #include <linux/interrupt.h> | ||
83 | #include <linux/mutex.h> | ||
84 | #include <linux/wait.h> | ||
85 | #include <linux/mmu_notifier.h> | ||
86 | #include "gru.h" | ||
87 | #include "gruhandles.h" | ||
88 | |||
89 | extern struct gru_stats_s gru_stats; | ||
90 | extern struct gru_blade_state *gru_base[]; | ||
91 | extern unsigned long gru_start_paddr, gru_end_paddr; | ||
92 | |||
93 | #define GRU_MAX_BLADES MAX_NUMNODES | ||
94 | #define GRU_MAX_GRUS (GRU_MAX_BLADES * GRU_CHIPLETS_PER_BLADE) | ||
95 | |||
96 | #define GRU_DRIVER_ID_STR "SGI GRU Device Driver" | ||
97 | #define GRU_DRIVER_VERSION_STR "0.80" | ||
98 | |||
99 | /* | ||
100 | * GRU statistics. | ||
101 | */ | ||
102 | struct gru_stats_s { | ||
103 | atomic_long_t vdata_alloc; | ||
104 | atomic_long_t vdata_free; | ||
105 | atomic_long_t gts_alloc; | ||
106 | atomic_long_t gts_free; | ||
107 | atomic_long_t vdata_double_alloc; | ||
108 | atomic_long_t gts_double_allocate; | ||
109 | atomic_long_t assign_context; | ||
110 | atomic_long_t assign_context_failed; | ||
111 | atomic_long_t free_context; | ||
112 | atomic_long_t load_context; | ||
113 | atomic_long_t unload_context; | ||
114 | atomic_long_t steal_context; | ||
115 | atomic_long_t steal_context_failed; | ||
116 | atomic_long_t nopfn; | ||
117 | atomic_long_t break_cow; | ||
118 | atomic_long_t asid_new; | ||
119 | atomic_long_t asid_next; | ||
120 | atomic_long_t asid_wrap; | ||
121 | atomic_long_t asid_reuse; | ||
122 | atomic_long_t intr; | ||
123 | atomic_long_t call_os; | ||
124 | atomic_long_t call_os_check_for_bug; | ||
125 | atomic_long_t call_os_wait_queue; | ||
126 | atomic_long_t user_flush_tlb; | ||
127 | atomic_long_t user_unload_context; | ||
128 | atomic_long_t user_exception; | ||
129 | atomic_long_t set_task_slice; | ||
130 | atomic_long_t migrate_check; | ||
131 | atomic_long_t migrated_retarget; | ||
132 | atomic_long_t migrated_unload; | ||
133 | atomic_long_t migrated_unload_delay; | ||
134 | atomic_long_t migrated_nopfn_retarget; | ||
135 | atomic_long_t migrated_nopfn_unload; | ||
136 | atomic_long_t tlb_dropin; | ||
137 | atomic_long_t tlb_dropin_fail_no_asid; | ||
138 | atomic_long_t tlb_dropin_fail_upm; | ||
139 | atomic_long_t tlb_dropin_fail_invalid; | ||
140 | atomic_long_t tlb_dropin_fail_range_active; | ||
141 | atomic_long_t tlb_dropin_fail_idle; | ||
142 | atomic_long_t tlb_dropin_fail_fmm; | ||
143 | atomic_long_t mmu_invalidate_range; | ||
144 | atomic_long_t mmu_invalidate_page; | ||
145 | atomic_long_t mmu_clear_flush_young; | ||
146 | atomic_long_t flush_tlb; | ||
147 | atomic_long_t flush_tlb_gru; | ||
148 | atomic_long_t flush_tlb_gru_tgh; | ||
149 | atomic_long_t flush_tlb_gru_zero_asid; | ||
150 | |||
151 | atomic_long_t copy_gpa; | ||
152 | |||
153 | atomic_long_t mesq_receive; | ||
154 | atomic_long_t mesq_receive_none; | ||
155 | atomic_long_t mesq_send; | ||
156 | atomic_long_t mesq_send_failed; | ||
157 | atomic_long_t mesq_noop; | ||
158 | atomic_long_t mesq_send_unexpected_error; | ||
159 | atomic_long_t mesq_send_lb_overflow; | ||
160 | atomic_long_t mesq_send_qlimit_reached; | ||
161 | atomic_long_t mesq_send_amo_nacked; | ||
162 | atomic_long_t mesq_send_put_nacked; | ||
163 | atomic_long_t mesq_qf_not_full; | ||
164 | atomic_long_t mesq_qf_locked; | ||
165 | atomic_long_t mesq_qf_noop_not_full; | ||
166 | atomic_long_t mesq_qf_switch_head_failed; | ||
167 | atomic_long_t mesq_qf_unexpected_error; | ||
168 | atomic_long_t mesq_noop_unexpected_error; | ||
169 | atomic_long_t mesq_noop_lb_overflow; | ||
170 | atomic_long_t mesq_noop_qlimit_reached; | ||
171 | atomic_long_t mesq_noop_amo_nacked; | ||
172 | atomic_long_t mesq_noop_put_nacked; | ||
173 | |||
174 | }; | ||
175 | |||
176 | #define OPT_DPRINT 1 | ||
177 | #define OPT_STATS 2 | ||
178 | #define GRU_QUICKLOOK 4 | ||
179 | |||
180 | |||
181 | #define IRQ_GRU 110 /* Starting IRQ number for interrupts */ | ||
182 | |||
183 | /* Delay in jiffies between attempts to assign a GRU context */ | ||
184 | #define GRU_ASSIGN_DELAY ((HZ * 20) / 1000) | ||
185 | |||
186 | /* | ||
187 | * If a process has it's context stolen, min delay in jiffies before trying to | ||
188 | * steal a context from another process. | ||
189 | */ | ||
190 | #define GRU_STEAL_DELAY ((HZ * 200) / 1000) | ||
191 | |||
192 | #define STAT(id) do { \ | ||
193 | if (options & OPT_STATS) \ | ||
194 | atomic_long_inc(&gru_stats.id); \ | ||
195 | } while (0) | ||
196 | |||
197 | #ifdef CONFIG_SGI_GRU_DEBUG | ||
198 | #define gru_dbg(dev, fmt, x...) \ | ||
199 | do { \ | ||
200 | if (options & OPT_DPRINT) \ | ||
201 | dev_dbg(dev, "%s: " fmt, __func__, x); \ | ||
202 | } while (0) | ||
203 | #else | ||
204 | #define gru_dbg(x...) | ||
205 | #endif | ||
206 | |||
207 | /*----------------------------------------------------------------------------- | ||
208 | * ASID management | ||
209 | */ | ||
210 | #define MAX_ASID 0xfffff0 | ||
211 | #define MIN_ASID 8 | ||
212 | #define ASID_INC 8 /* number of regions */ | ||
213 | |||
214 | /* Generate a GRU asid value from a GRU base asid & a virtual address. */ | ||
215 | #if defined CONFIG_IA64 | ||
216 | #define VADDR_HI_BIT 64 | ||
217 | #define GRUREGION(addr) ((addr) >> (VADDR_HI_BIT - 3) & 3) | ||
218 | #elif defined __x86_64 | ||
219 | #define VADDR_HI_BIT 48 | ||
220 | #define GRUREGION(addr) (0) /* ZZZ could do better */ | ||
221 | #else | ||
222 | #error "Unsupported architecture" | ||
223 | #endif | ||
224 | #define GRUASID(asid, addr) ((asid) + GRUREGION(addr)) | ||
225 | |||
226 | /*------------------------------------------------------------------------------ | ||
227 | * File & VMS Tables | ||
228 | */ | ||
229 | |||
230 | struct gru_state; | ||
231 | |||
232 | /* | ||
233 | * This structure is pointed to from the mmstruct via the notifier pointer. | ||
234 | * There is one of these per address space. | ||
235 | */ | ||
236 | struct gru_mm_tracker { | ||
237 | unsigned int mt_asid_gen; /* ASID wrap count */ | ||
238 | int mt_asid; /* current base ASID for gru */ | ||
239 | unsigned short mt_ctxbitmap; /* bitmap of contexts using | ||
240 | asid */ | ||
241 | }; | ||
242 | |||
243 | struct gru_mm_struct { | ||
244 | struct mmu_notifier ms_notifier; | ||
245 | atomic_t ms_refcnt; | ||
246 | spinlock_t ms_asid_lock; /* protects ASID assignment */ | ||
247 | atomic_t ms_range_active;/* num range_invals active */ | ||
248 | char ms_released; | ||
249 | wait_queue_head_t ms_wait_queue; | ||
250 | DECLARE_BITMAP(ms_asidmap, GRU_MAX_GRUS); | ||
251 | struct gru_mm_tracker ms_asids[GRU_MAX_GRUS]; | ||
252 | }; | ||
253 | |||
254 | /* | ||
255 | * One of these structures is allocated when a GSEG is mmaped. The | ||
256 | * structure is pointed to by the vma->vm_private_data field in the vma struct. | ||
257 | */ | ||
258 | struct gru_vma_data { | ||
259 | spinlock_t vd_lock; /* Serialize access to vma */ | ||
260 | struct list_head vd_head; /* head of linked list of gts */ | ||
261 | long vd_user_options;/* misc user option flags */ | ||
262 | int vd_cbr_au_count; | ||
263 | int vd_dsr_au_count; | ||
264 | }; | ||
265 | |||
266 | /* | ||
267 | * One of these is allocated for each thread accessing a mmaped GRU. A linked | ||
268 | * list of these structure is hung off the struct gru_vma_data in the mm_struct. | ||
269 | */ | ||
270 | struct gru_thread_state { | ||
271 | struct list_head ts_next; /* list - head at vma-private */ | ||
272 | struct mutex ts_ctxlock; /* load/unload CTX lock */ | ||
273 | struct mm_struct *ts_mm; /* mm currently mapped to | ||
274 | context */ | ||
275 | struct vm_area_struct *ts_vma; /* vma of GRU context */ | ||
276 | struct gru_state *ts_gru; /* GRU where the context is | ||
277 | loaded */ | ||
278 | struct gru_mm_struct *ts_gms; /* asid & ioproc struct */ | ||
279 | unsigned long ts_cbr_map; /* map of allocated CBRs */ | ||
280 | unsigned long ts_dsr_map; /* map of allocated DATA | ||
281 | resources */ | ||
282 | unsigned long ts_steal_jiffies;/* jiffies when context last | ||
283 | stolen */ | ||
284 | long ts_user_options;/* misc user option flags */ | ||
285 | pid_t ts_tgid_owner; /* task that is using the | ||
286 | context - for migration */ | ||
287 | int ts_tsid; /* thread that owns the | ||
288 | structure */ | ||
289 | int ts_tlb_int_select;/* target cpu if interrupts | ||
290 | enabled */ | ||
291 | int ts_ctxnum; /* context number where the | ||
292 | context is loaded */ | ||
293 | atomic_t ts_refcnt; /* reference count GTS */ | ||
294 | unsigned char ts_dsr_au_count;/* Number of DSR resources | ||
295 | required for contest */ | ||
296 | unsigned char ts_cbr_au_count;/* Number of CBR resources | ||
297 | required for contest */ | ||
298 | char ts_force_unload;/* force context to be unloaded | ||
299 | after migration */ | ||
300 | char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each | ||
301 | allocated CB */ | ||
302 | unsigned long ts_gdata[0]; /* save area for GRU data (CB, | ||
303 | DS, CBE) */ | ||
304 | }; | ||
305 | |||
306 | /* | ||
307 | * Threaded programs actually allocate an array of GSEGs when a context is | ||
308 | * created. Each thread uses a separate GSEG. TSID is the index into the GSEG | ||
309 | * array. | ||
310 | */ | ||
311 | #define TSID(a, v) (((a) - (v)->vm_start) / GRU_GSEG_PAGESIZE) | ||
312 | #define UGRUADDR(gts) ((gts)->ts_vma->vm_start + \ | ||
313 | (gts)->ts_tsid * GRU_GSEG_PAGESIZE) | ||
314 | |||
315 | #define NULLCTX (-1) /* if context not loaded into GRU */ | ||
316 | |||
317 | /*----------------------------------------------------------------------------- | ||
318 | * GRU State Tables | ||
319 | */ | ||
320 | |||
321 | /* | ||
322 | * One of these exists for each GRU chiplet. | ||
323 | */ | ||
324 | struct gru_state { | ||
325 | struct gru_blade_state *gs_blade; /* GRU state for entire | ||
326 | blade */ | ||
327 | unsigned long gs_gru_base_paddr; /* Physical address of | ||
328 | gru segments (64) */ | ||
329 | void *gs_gru_base_vaddr; /* Virtual address of | ||
330 | gru segments (64) */ | ||
331 | unsigned char gs_gid; /* unique GRU number */ | ||
332 | unsigned char gs_tgh_local_shift; /* used to pick TGH for | ||
333 | local flush */ | ||
334 | unsigned char gs_tgh_first_remote; /* starting TGH# for | ||
335 | remote flush */ | ||
336 | unsigned short gs_blade_id; /* blade of GRU */ | ||
337 | spinlock_t gs_asid_lock; /* lock used for | ||
338 | assigning asids */ | ||
339 | spinlock_t gs_lock; /* lock used for | ||
340 | assigning contexts */ | ||
341 | |||
342 | /* -- the following are protected by the gs_asid_lock spinlock ---- */ | ||
343 | unsigned int gs_asid; /* Next availe ASID */ | ||
344 | unsigned int gs_asid_limit; /* Limit of available | ||
345 | ASIDs */ | ||
346 | unsigned int gs_asid_gen; /* asid generation. | ||
347 | Inc on wrap */ | ||
348 | |||
349 | /* --- the following fields are protected by the gs_lock spinlock --- */ | ||
350 | unsigned long gs_context_map; /* bitmap to manage | ||
351 | contexts in use */ | ||
352 | unsigned long gs_cbr_map; /* bitmap to manage CB | ||
353 | resources */ | ||
354 | unsigned long gs_dsr_map; /* bitmap used to manage | ||
355 | DATA resources */ | ||
356 | unsigned int gs_reserved_cbrs; /* Number of kernel- | ||
357 | reserved cbrs */ | ||
358 | unsigned int gs_reserved_dsr_bytes; /* Bytes of kernel- | ||
359 | reserved dsrs */ | ||
360 | unsigned short gs_active_contexts; /* number of contexts | ||
361 | in use */ | ||
362 | struct gru_thread_state *gs_gts[GRU_NUM_CCH]; /* GTS currently using | ||
363 | the context */ | ||
364 | }; | ||
365 | |||
366 | /* | ||
367 | * This structure contains the GRU state for all the GRUs on a blade. | ||
368 | */ | ||
369 | struct gru_blade_state { | ||
370 | void *kernel_cb; /* First kernel | ||
371 | reserved cb */ | ||
372 | void *kernel_dsr; /* First kernel | ||
373 | reserved DSR */ | ||
374 | /* ---- the following are protected by the bs_lock spinlock ---- */ | ||
375 | spinlock_t bs_lock; /* lock used for | ||
376 | stealing contexts */ | ||
377 | int bs_lru_ctxnum; /* STEAL - last context | ||
378 | stolen */ | ||
379 | struct gru_state *bs_lru_gru; /* STEAL - last gru | ||
380 | stolen */ | ||
381 | |||
382 | struct gru_state bs_grus[GRU_CHIPLETS_PER_BLADE]; | ||
383 | }; | ||
384 | |||
385 | /*----------------------------------------------------------------------------- | ||
386 | * Address Primitives | ||
387 | */ | ||
388 | #define get_tfm_for_cpu(g, c) \ | ||
389 | ((struct gru_tlb_fault_map *)get_tfm((g)->gs_gru_base_vaddr, (c))) | ||
390 | #define get_tfh_by_index(g, i) \ | ||
391 | ((struct gru_tlb_fault_handle *)get_tfh((g)->gs_gru_base_vaddr, (i))) | ||
392 | #define get_tgh_by_index(g, i) \ | ||
393 | ((struct gru_tlb_global_handle *)get_tgh((g)->gs_gru_base_vaddr, (i))) | ||
394 | #define get_cbe_by_index(g, i) \ | ||
395 | ((struct gru_control_block_extended *)get_cbe((g)->gs_gru_base_vaddr,\ | ||
396 | (i))) | ||
397 | |||
398 | /*----------------------------------------------------------------------------- | ||
399 | * Useful Macros | ||
400 | */ | ||
401 | |||
402 | /* Given a blade# & chiplet#, get a pointer to the GRU */ | ||
403 | #define get_gru(b, c) (&gru_base[b]->bs_grus[c]) | ||
404 | |||
405 | /* Number of bytes to save/restore when unloading/loading GRU contexts */ | ||
406 | #define DSR_BYTES(dsr) ((dsr) * GRU_DSR_AU_BYTES) | ||
407 | #define CBR_BYTES(cbr) ((cbr) * GRU_HANDLE_BYTES * GRU_CBR_AU_SIZE * 2) | ||
408 | |||
409 | /* Convert a user CB number to the actual CBRNUM */ | ||
410 | #define thread_cbr_number(gts, n) ((gts)->ts_cbr_idx[(n) / GRU_CBR_AU_SIZE] \ | ||
411 | * GRU_CBR_AU_SIZE + (n) % GRU_CBR_AU_SIZE) | ||
412 | |||
413 | /* Convert a gid to a pointer to the GRU */ | ||
414 | #define GID_TO_GRU(gid) \ | ||
415 | (gru_base[(gid) / GRU_CHIPLETS_PER_BLADE] ? \ | ||
416 | (&gru_base[(gid) / GRU_CHIPLETS_PER_BLADE]-> \ | ||
417 | bs_grus[(gid) % GRU_CHIPLETS_PER_BLADE]) : \ | ||
418 | NULL) | ||
419 | |||
420 | /* Scan all active GRUs in a GRU bitmap */ | ||
421 | #define for_each_gru_in_bitmap(gid, map) \ | ||
422 | for ((gid) = find_first_bit((map), GRU_MAX_GRUS); (gid) < GRU_MAX_GRUS;\ | ||
423 | (gid)++, (gid) = find_next_bit((map), GRU_MAX_GRUS, (gid))) | ||
424 | |||
425 | /* Scan all active GRUs on a specific blade */ | ||
426 | #define for_each_gru_on_blade(gru, nid, i) \ | ||
427 | for ((gru) = gru_base[nid]->bs_grus, (i) = 0; \ | ||
428 | (i) < GRU_CHIPLETS_PER_BLADE; \ | ||
429 | (i)++, (gru)++) | ||
430 | |||
431 | /* Scan all active GTSs on a gru. Note: must hold ss_lock to use this macro. */ | ||
432 | #define for_each_gts_on_gru(gts, gru, ctxnum) \ | ||
433 | for ((ctxnum) = 0; (ctxnum) < GRU_NUM_CCH; (ctxnum)++) \ | ||
434 | if (((gts) = (gru)->gs_gts[ctxnum])) | ||
435 | |||
436 | /* Scan each CBR whose bit is set in a TFM (or copy of) */ | ||
437 | #define for_each_cbr_in_tfm(i, map) \ | ||
438 | for ((i) = find_first_bit(map, GRU_NUM_CBE); \ | ||
439 | (i) < GRU_NUM_CBE; \ | ||
440 | (i)++, (i) = find_next_bit(map, GRU_NUM_CBE, i)) | ||
441 | |||
442 | /* Scan each CBR in a CBR bitmap. Note: multiple CBRs in an allocation unit */ | ||
443 | #define for_each_cbr_in_allocation_map(i, map, k) \ | ||
444 | for ((k) = find_first_bit(map, GRU_CBR_AU); (k) < GRU_CBR_AU; \ | ||
445 | (k) = find_next_bit(map, GRU_CBR_AU, (k) + 1)) \ | ||
446 | for ((i) = (k)*GRU_CBR_AU_SIZE; \ | ||
447 | (i) < ((k) + 1) * GRU_CBR_AU_SIZE; (i)++) | ||
448 | |||
449 | /* Scan each DSR in a DSR bitmap. Note: multiple DSRs in an allocation unit */ | ||
450 | #define for_each_dsr_in_allocation_map(i, map, k) \ | ||
451 | for ((k) = find_first_bit((const unsigned long *)map, GRU_DSR_AU);\ | ||
452 | (k) < GRU_DSR_AU; \ | ||
453 | (k) = find_next_bit((const unsigned long *)map, \ | ||
454 | GRU_DSR_AU, (k) + 1)) \ | ||
455 | for ((i) = (k) * GRU_DSR_AU_CL; \ | ||
456 | (i) < ((k) + 1) * GRU_DSR_AU_CL; (i)++) | ||
457 | |||
458 | #define gseg_physical_address(gru, ctxnum) \ | ||
459 | ((gru)->gs_gru_base_paddr + ctxnum * GRU_GSEG_STRIDE) | ||
460 | #define gseg_virtual_address(gru, ctxnum) \ | ||
461 | ((gru)->gs_gru_base_vaddr + ctxnum * GRU_GSEG_STRIDE) | ||
462 | |||
463 | /*----------------------------------------------------------------------------- | ||
464 | * Lock / Unlock GRU handles | ||
465 | * Use the "delresp" bit in the handle as a "lock" bit. | ||
466 | */ | ||
467 | |||
468 | /* Lock hierarchy checking enabled only in emulator */ | ||
469 | |||
470 | static inline void __lock_handle(void *h) | ||
471 | { | ||
472 | while (test_and_set_bit(1, h)) | ||
473 | cpu_relax(); | ||
474 | } | ||
475 | |||
476 | static inline void __unlock_handle(void *h) | ||
477 | { | ||
478 | clear_bit(1, h); | ||
479 | } | ||
480 | |||
481 | static inline void lock_cch_handle(struct gru_context_configuration_handle *cch) | ||
482 | { | ||
483 | __lock_handle(cch); | ||
484 | } | ||
485 | |||
486 | static inline void unlock_cch_handle(struct gru_context_configuration_handle | ||
487 | *cch) | ||
488 | { | ||
489 | __unlock_handle(cch); | ||
490 | } | ||
491 | |||
492 | static inline void lock_tgh_handle(struct gru_tlb_global_handle *tgh) | ||
493 | { | ||
494 | __lock_handle(tgh); | ||
495 | } | ||
496 | |||
497 | static inline void unlock_tgh_handle(struct gru_tlb_global_handle *tgh) | ||
498 | { | ||
499 | __unlock_handle(tgh); | ||
500 | } | ||
501 | |||
502 | /*----------------------------------------------------------------------------- | ||
503 | * Function prototypes & externs | ||
504 | */ | ||
505 | struct gru_unload_context_req; | ||
506 | |||
507 | extern struct vm_operations_struct gru_vm_ops; | ||
508 | extern struct device *grudev; | ||
509 | |||
510 | extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, | ||
511 | int tsid); | ||
512 | extern struct gru_thread_state *gru_find_thread_state(struct vm_area_struct | ||
513 | *vma, int tsid); | ||
514 | extern struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct | ||
515 | *vma, int tsid); | ||
516 | extern void gru_unload_context(struct gru_thread_state *gts, int savestate); | ||
517 | extern void gts_drop(struct gru_thread_state *gts); | ||
518 | extern void gru_tgh_flush_init(struct gru_state *gru); | ||
519 | extern int gru_kservices_init(struct gru_state *gru); | ||
520 | extern irqreturn_t gru_intr(int irq, void *dev_id); | ||
521 | extern int gru_handle_user_call_os(unsigned long address); | ||
522 | extern int gru_user_flush_tlb(unsigned long arg); | ||
523 | extern int gru_user_unload_context(unsigned long arg); | ||
524 | extern int gru_get_exception_detail(unsigned long arg); | ||
525 | extern int gru_set_task_slice(long address); | ||
526 | extern int gru_cpu_fault_map_id(void); | ||
527 | extern struct vm_area_struct *gru_find_vma(unsigned long vaddr); | ||
528 | extern void gru_flush_all_tlb(struct gru_state *gru); | ||
529 | extern int gru_proc_init(void); | ||
530 | extern void gru_proc_exit(void); | ||
531 | |||
532 | extern unsigned long reserve_gru_cb_resources(struct gru_state *gru, | ||
533 | int cbr_au_count, char *cbmap); | ||
534 | extern unsigned long reserve_gru_ds_resources(struct gru_state *gru, | ||
535 | int dsr_au_count, char *dsmap); | ||
536 | extern int gru_fault(struct vm_area_struct *, struct vm_fault *vmf); | ||
537 | extern struct gru_mm_struct *gru_register_mmu_notifier(void); | ||
538 | extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); | ||
539 | |||
540 | extern void gru_flush_tlb_range(struct gru_mm_struct *gms, unsigned long start, | ||
541 | unsigned long len); | ||
542 | |||
543 | extern unsigned long options; | ||
544 | |||
545 | #endif /* __GRUTABLES_H__ */ | ||