diff options
Diffstat (limited to 'drivers/misc/sgi-gru/grukdump.c')
-rw-r--r-- | drivers/misc/sgi-gru/grukdump.c | 218 |
1 files changed, 218 insertions, 0 deletions
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c new file mode 100644 index 000000000000..27e00931a7b8 --- /dev/null +++ b/drivers/misc/sgi-gru/grukdump.c | |||
@@ -0,0 +1,218 @@ | |||
1 | /* | ||
2 | * SN Platform GRU Driver | ||
3 | * | ||
4 | * Dump GRU State | ||
5 | * | ||
6 | * This file is subject to the terms and conditions of the GNU General Public | ||
7 | * License. See the file "COPYING" in the main directory of this archive | ||
8 | * for more details. | ||
9 | * | ||
10 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/bitops.h> | ||
19 | #include <asm/uv/uv_hub.h> | ||
20 | #include "gru.h" | ||
21 | #include "grutables.h" | ||
22 | #include "gruhandles.h" | ||
23 | #include "grulib.h" | ||
24 | |||
25 | #define CCH_LOCK_ATTEMPTS 10 | ||
26 | |||
27 | static int gru_user_copy_handle(void __user **dp, void *s) | ||
28 | { | ||
29 | if (copy_to_user(dp, s, GRU_HANDLE_BYTES)) | ||
30 | return -1; | ||
31 | *dp += GRU_HANDLE_BYTES; | ||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | static int gru_dump_context_data(void *grubase, | ||
36 | struct gru_context_configuration_handle *cch, | ||
37 | void __user *ubuf, int ctxnum, int dsrcnt) | ||
38 | { | ||
39 | void *cb, *cbe, *tfh, *gseg; | ||
40 | int i, scr; | ||
41 | |||
42 | gseg = grubase + ctxnum * GRU_GSEG_STRIDE; | ||
43 | cb = gseg + GRU_CB_BASE; | ||
44 | cbe = grubase + GRU_CBE_BASE; | ||
45 | tfh = grubase + GRU_TFH_BASE; | ||
46 | |||
47 | for_each_cbr_in_allocation_map(i, &cch->cbr_allocation_map, scr) { | ||
48 | if (gru_user_copy_handle(&ubuf, cb)) | ||
49 | goto fail; | ||
50 | if (gru_user_copy_handle(&ubuf, tfh + i * GRU_HANDLE_STRIDE)) | ||
51 | goto fail; | ||
52 | if (gru_user_copy_handle(&ubuf, cbe + i * GRU_HANDLE_STRIDE)) | ||
53 | goto fail; | ||
54 | cb += GRU_HANDLE_STRIDE; | ||
55 | } | ||
56 | if (dsrcnt) | ||
57 | memcpy(ubuf, gseg + GRU_DS_BASE, dsrcnt * GRU_HANDLE_STRIDE); | ||
58 | return 0; | ||
59 | |||
60 | fail: | ||
61 | return -EFAULT; | ||
62 | } | ||
63 | |||
64 | static int gru_dump_tfm(struct gru_state *gru, | ||
65 | void __user *ubuf, void __user *ubufend) | ||
66 | { | ||
67 | struct gru_tlb_fault_map *tfm; | ||
68 | int i, ret, bytes; | ||
69 | |||
70 | bytes = GRU_NUM_TFM * GRU_CACHE_LINE_BYTES; | ||
71 | if (bytes > ubufend - ubuf) | ||
72 | ret = -EFBIG; | ||
73 | |||
74 | for (i = 0; i < GRU_NUM_TFM; i++) { | ||
75 | tfm = get_tfm(gru->gs_gru_base_vaddr, i); | ||
76 | if (gru_user_copy_handle(&ubuf, tfm)) | ||
77 | goto fail; | ||
78 | } | ||
79 | return GRU_NUM_TFM * GRU_CACHE_LINE_BYTES; | ||
80 | |||
81 | fail: | ||
82 | return -EFAULT; | ||
83 | } | ||
84 | |||
85 | static int gru_dump_tgh(struct gru_state *gru, | ||
86 | void __user *ubuf, void __user *ubufend) | ||
87 | { | ||
88 | struct gru_tlb_global_handle *tgh; | ||
89 | int i, ret, bytes; | ||
90 | |||
91 | bytes = GRU_NUM_TGH * GRU_CACHE_LINE_BYTES; | ||
92 | if (bytes > ubufend - ubuf) | ||
93 | ret = -EFBIG; | ||
94 | |||
95 | for (i = 0; i < GRU_NUM_TGH; i++) { | ||
96 | tgh = get_tgh(gru->gs_gru_base_vaddr, i); | ||
97 | if (gru_user_copy_handle(&ubuf, tgh)) | ||
98 | goto fail; | ||
99 | } | ||
100 | return GRU_NUM_TGH * GRU_CACHE_LINE_BYTES; | ||
101 | |||
102 | fail: | ||
103 | return -EFAULT; | ||
104 | } | ||
105 | |||
106 | static int gru_dump_context(struct gru_state *gru, int ctxnum, | ||
107 | void __user *ubuf, void __user *ubufend, char data_opt, | ||
108 | char lock_cch) | ||
109 | { | ||
110 | struct gru_dump_context_header hdr; | ||
111 | struct gru_dump_context_header __user *uhdr = ubuf; | ||
112 | struct gru_context_configuration_handle *cch; | ||
113 | struct gru_thread_state *gts; | ||
114 | int try, cch_locked, cbrcnt = 0, dsrcnt = 0, bytes = 0, ret = 0; | ||
115 | void *grubase; | ||
116 | |||
117 | memset(&hdr, 0, sizeof(hdr)); | ||
118 | grubase = gru->gs_gru_base_vaddr; | ||
119 | cch = get_cch(grubase, ctxnum); | ||
120 | for (try = 0; try < CCH_LOCK_ATTEMPTS; try++) { | ||
121 | cch_locked = trylock_cch_handle(cch); | ||
122 | if (cch_locked) | ||
123 | break; | ||
124 | msleep(1); | ||
125 | } | ||
126 | |||
127 | ubuf += sizeof(hdr); | ||
128 | if (gru_user_copy_handle(&ubuf, cch)) | ||
129 | goto fail; | ||
130 | bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES; | ||
131 | |||
132 | if (cch_locked || !lock_cch) { | ||
133 | gts = gru->gs_gts[ctxnum]; | ||
134 | if (gts) { | ||
135 | hdr.pid = gts->ts_tgid_owner; | ||
136 | hdr.vaddr = gts->ts_vma->vm_start; | ||
137 | } | ||
138 | if (cch->state != CCHSTATE_INACTIVE) { | ||
139 | cbrcnt = hweight64(cch->cbr_allocation_map) * | ||
140 | GRU_CBR_AU_SIZE; | ||
141 | dsrcnt = data_opt ? hweight32(cch->dsr_allocation_map) * | ||
142 | GRU_DSR_AU_CL : 0; | ||
143 | } | ||
144 | bytes += (3 * cbrcnt + dsrcnt) * GRU_CACHE_LINE_BYTES; | ||
145 | if (bytes > ubufend - ubuf) | ||
146 | ret = -EFBIG; | ||
147 | else | ||
148 | ret = gru_dump_context_data(grubase, cch, ubuf, ctxnum, | ||
149 | dsrcnt); | ||
150 | |||
151 | } | ||
152 | if (cch_locked) | ||
153 | unlock_cch_handle(cch); | ||
154 | if (ret) | ||
155 | return ret; | ||
156 | |||
157 | hdr.magic = GRU_DUMP_MAGIC; | ||
158 | hdr.ctxnum = ctxnum; | ||
159 | hdr.cbrcnt = cbrcnt; | ||
160 | hdr.dsrcnt = dsrcnt; | ||
161 | hdr.cch_locked = cch_locked; | ||
162 | if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr))) | ||
163 | ret = -EFAULT; | ||
164 | |||
165 | return ret ? ret : bytes; | ||
166 | |||
167 | fail: | ||
168 | unlock_cch_handle(cch); | ||
169 | return -EFAULT; | ||
170 | } | ||
171 | |||
172 | int gru_dump_chiplet_request(unsigned long arg) | ||
173 | { | ||
174 | struct gru_state *gru; | ||
175 | struct gru_dump_chiplet_state_req req; | ||
176 | void __user *ubuf; | ||
177 | void __user *ubufend; | ||
178 | int ctxnum, ret, cnt = 0; | ||
179 | |||
180 | if (copy_from_user(&req, (void __user *)arg, sizeof(req))) | ||
181 | return -EFAULT; | ||
182 | |||
183 | /* Currently, only dump by gid is implemented */ | ||
184 | if (req.gid >= gru_max_gids || req.gid < 0) | ||
185 | return -EINVAL; | ||
186 | |||
187 | gru = GID_TO_GRU(req.gid); | ||
188 | ubuf = req.buf; | ||
189 | ubufend = req.buf + req.buflen; | ||
190 | |||
191 | ret = gru_dump_tfm(gru, ubuf, ubufend); | ||
192 | if (ret < 0) | ||
193 | goto fail; | ||
194 | ubuf += ret; | ||
195 | |||
196 | ret = gru_dump_tgh(gru, ubuf, ubufend); | ||
197 | if (ret < 0) | ||
198 | goto fail; | ||
199 | ubuf += ret; | ||
200 | |||
201 | for (ctxnum = 0; ctxnum < GRU_NUM_CCH; ctxnum++) { | ||
202 | if (req.ctxnum == ctxnum || req.ctxnum < 0) { | ||
203 | ret = gru_dump_context(gru, ctxnum, ubuf, ubufend, | ||
204 | req.data_opt, req.lock_cch); | ||
205 | if (ret < 0) | ||
206 | goto fail; | ||
207 | ubuf += ret; | ||
208 | cnt++; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | if (copy_to_user((void __user *)arg, &req, sizeof(req))) | ||
213 | return -EFAULT; | ||
214 | return cnt; | ||
215 | |||
216 | fail: | ||
217 | return ret; | ||
218 | } | ||