diff options
Diffstat (limited to 'drivers/crypto/qat/qat_common/qat_uclo.c')
-rw-r--r-- | drivers/crypto/qat/qat_common/qat_uclo.c | 1181 |
1 files changed, 1181 insertions, 0 deletions
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c new file mode 100644 index 000000000000..1e27f9f7fddf --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_uclo.c | |||
@@ -0,0 +1,1181 @@ | |||
1 | /* | ||
2 | This file is provided under a dual BSD/GPLv2 license. When using or | ||
3 | redistributing this file, you may do so under either license. | ||
4 | |||
5 | GPL LICENSE SUMMARY | ||
6 | Copyright(c) 2014 Intel Corporation. | ||
7 | This program is free software; you can redistribute it and/or modify | ||
8 | it under the terms of version 2 of the GNU General Public License as | ||
9 | published by the Free Software Foundation. | ||
10 | |||
11 | This program is distributed in the hope that it will be useful, but | ||
12 | WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | General Public License for more details. | ||
15 | |||
16 | Contact Information: | ||
17 | qat-linux@intel.com | ||
18 | |||
19 | BSD LICENSE | ||
20 | Copyright(c) 2014 Intel Corporation. | ||
21 | Redistribution and use in source and binary forms, with or without | ||
22 | modification, are permitted provided that the following conditions | ||
23 | are met: | ||
24 | |||
25 | * Redistributions of source code must retain the above copyright | ||
26 | notice, this list of conditions and the following disclaimer. | ||
27 | * Redistributions in binary form must reproduce the above copyright | ||
28 | notice, this list of conditions and the following disclaimer in | ||
29 | the documentation and/or other materials provided with the | ||
30 | distribution. | ||
31 | * Neither the name of Intel Corporation nor the names of its | ||
32 | contributors may be used to endorse or promote products derived | ||
33 | from this software without specific prior written permission. | ||
34 | |||
35 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
36 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
37 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
38 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
39 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
40 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
41 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
42 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
43 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
44 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
45 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
46 | */ | ||
47 | #include <linux/slab.h> | ||
48 | #include <linux/ctype.h> | ||
49 | #include <linux/kernel.h> | ||
50 | |||
51 | #include "adf_accel_devices.h" | ||
52 | #include "adf_common_drv.h" | ||
53 | #include "icp_qat_uclo.h" | ||
54 | #include "icp_qat_hal.h" | ||
55 | #include "icp_qat_fw_loader_handle.h" | ||
56 | |||
57 | #define UWORD_CPYBUF_SIZE 1024 | ||
58 | #define INVLD_UWORD 0xffffffffffull | ||
59 | #define PID_MINOR_REV 0xf | ||
60 | #define PID_MAJOR_REV (0xf << 4) | ||
61 | |||
62 | static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, | ||
63 | unsigned int ae, unsigned int image_num) | ||
64 | { | ||
65 | struct icp_qat_uclo_aedata *ae_data; | ||
66 | struct icp_qat_uclo_encapme *encap_image; | ||
67 | struct icp_qat_uclo_page *page = NULL; | ||
68 | struct icp_qat_uclo_aeslice *ae_slice = NULL; | ||
69 | |||
70 | ae_data = &obj_handle->ae_data[ae]; | ||
71 | encap_image = &obj_handle->ae_uimage[image_num]; | ||
72 | ae_slice = &ae_data->ae_slices[ae_data->slice_num]; | ||
73 | ae_slice->encap_image = encap_image; | ||
74 | |||
75 | if (encap_image->img_ptr) { | ||
76 | ae_slice->ctx_mask_assigned = | ||
77 | encap_image->img_ptr->ctx_assigned; | ||
78 | ae_data->eff_ustore_size = obj_handle->ustore_phy_size; | ||
79 | } else { | ||
80 | ae_slice->ctx_mask_assigned = 0; | ||
81 | } | ||
82 | ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL); | ||
83 | if (!ae_slice->region) | ||
84 | return -ENOMEM; | ||
85 | ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL); | ||
86 | if (!ae_slice->page) | ||
87 | goto out_err; | ||
88 | page = ae_slice->page; | ||
89 | page->encap_page = encap_image->page; | ||
90 | ae_slice->page->region = ae_slice->region; | ||
91 | ae_data->slice_num++; | ||
92 | return 0; | ||
93 | out_err: | ||
94 | kfree(ae_slice->region); | ||
95 | ae_slice->region = NULL; | ||
96 | return -ENOMEM; | ||
97 | } | ||
98 | |||
99 | static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) | ||
100 | { | ||
101 | unsigned int i; | ||
102 | |||
103 | if (!ae_data) { | ||
104 | pr_err("QAT: bad argument, ae_data is NULL\n "); | ||
105 | return -EINVAL; | ||
106 | } | ||
107 | |||
108 | for (i = 0; i < ae_data->slice_num; i++) { | ||
109 | kfree(ae_data->ae_slices[i].region); | ||
110 | ae_data->ae_slices[i].region = NULL; | ||
111 | kfree(ae_data->ae_slices[i].page); | ||
112 | ae_data->ae_slices[i].page = NULL; | ||
113 | } | ||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, | ||
118 | unsigned int str_offset) | ||
119 | { | ||
120 | if ((!str_table->table_len) || (str_offset > str_table->table_len)) | ||
121 | return NULL; | ||
122 | return (char *)(((unsigned long)(str_table->strings)) + str_offset); | ||
123 | } | ||
124 | |||
125 | static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr) | ||
126 | { | ||
127 | int maj = hdr->maj_ver & 0xff; | ||
128 | int min = hdr->min_ver & 0xff; | ||
129 | |||
130 | if (hdr->file_id != ICP_QAT_UOF_FID) { | ||
131 | pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); | ||
132 | return -EINVAL; | ||
133 | } | ||
134 | if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { | ||
135 | pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", | ||
136 | maj, min); | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, | ||
143 | unsigned int addr, unsigned int *val, | ||
144 | unsigned int num_in_bytes) | ||
145 | { | ||
146 | unsigned int outval; | ||
147 | unsigned char *ptr = (unsigned char *)val; | ||
148 | |||
149 | while (num_in_bytes) { | ||
150 | memcpy(&outval, ptr, 4); | ||
151 | SRAM_WRITE(handle, addr, outval); | ||
152 | num_in_bytes -= 4; | ||
153 | ptr += 4; | ||
154 | addr += 4; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, | ||
159 | unsigned char ae, unsigned int addr, | ||
160 | unsigned int *val, | ||
161 | unsigned int num_in_bytes) | ||
162 | { | ||
163 | unsigned int outval; | ||
164 | unsigned char *ptr = (unsigned char *)val; | ||
165 | |||
166 | addr >>= 0x2; /* convert to uword address */ | ||
167 | |||
168 | while (num_in_bytes) { | ||
169 | memcpy(&outval, ptr, 4); | ||
170 | qat_hal_wr_umem(handle, ae, addr++, 1, &outval); | ||
171 | num_in_bytes -= 4; | ||
172 | ptr += 4; | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, | ||
177 | unsigned char ae, | ||
178 | struct icp_qat_uof_batch_init | ||
179 | *umem_init_header) | ||
180 | { | ||
181 | struct icp_qat_uof_batch_init *umem_init; | ||
182 | |||
183 | if (!umem_init_header) | ||
184 | return; | ||
185 | umem_init = umem_init_header->next; | ||
186 | while (umem_init) { | ||
187 | unsigned int addr, *value, size; | ||
188 | |||
189 | ae = umem_init->ae; | ||
190 | addr = umem_init->addr; | ||
191 | value = umem_init->value; | ||
192 | size = umem_init->size; | ||
193 | qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); | ||
194 | umem_init = umem_init->next; | ||
195 | } | ||
196 | } | ||
197 | |||
198 | static void | ||
199 | qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, | ||
200 | struct icp_qat_uof_batch_init **base) | ||
201 | { | ||
202 | struct icp_qat_uof_batch_init *umem_init; | ||
203 | |||
204 | umem_init = *base; | ||
205 | while (umem_init) { | ||
206 | struct icp_qat_uof_batch_init *pre; | ||
207 | |||
208 | pre = umem_init; | ||
209 | umem_init = umem_init->next; | ||
210 | kfree(pre); | ||
211 | } | ||
212 | *base = NULL; | ||
213 | } | ||
214 | |||
215 | static int qat_uclo_parse_num(char *str, unsigned int *num) | ||
216 | { | ||
217 | char buf[16] = {0}; | ||
218 | unsigned long ae = 0; | ||
219 | int i; | ||
220 | |||
221 | strncpy(buf, str, 15); | ||
222 | for (i = 0; i < 16; i++) { | ||
223 | if (!isdigit(buf[i])) { | ||
224 | buf[i] = '\0'; | ||
225 | break; | ||
226 | } | ||
227 | } | ||
228 | if ((kstrtoul(buf, 10, &ae))) | ||
229 | return -EFAULT; | ||
230 | |||
231 | *num = (unsigned int)ae; | ||
232 | return 0; | ||
233 | } | ||
234 | |||
235 | static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, | ||
236 | struct icp_qat_uof_initmem *init_mem, | ||
237 | unsigned int size_range, unsigned int *ae) | ||
238 | { | ||
239 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
240 | char *str; | ||
241 | |||
242 | if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { | ||
243 | pr_err("QAT: initmem is out of range"); | ||
244 | return -EINVAL; | ||
245 | } | ||
246 | if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { | ||
247 | pr_err("QAT: Memory scope for init_mem error\n"); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); | ||
251 | if (!str) { | ||
252 | pr_err("QAT: AE name assigned in UOF init table is NULL\n"); | ||
253 | return -EINVAL; | ||
254 | } | ||
255 | if (qat_uclo_parse_num(str, ae)) { | ||
256 | pr_err("QAT: Parse num for AE number failed\n"); | ||
257 | return -EINVAL; | ||
258 | } | ||
259 | if (*ae >= ICP_QAT_UCLO_MAX_AE) { | ||
260 | pr_err("QAT: ae %d out of range\n", *ae); | ||
261 | return -EINVAL; | ||
262 | } | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle | ||
267 | *handle, struct icp_qat_uof_initmem | ||
268 | *init_mem, unsigned int ae, | ||
269 | struct icp_qat_uof_batch_init | ||
270 | **init_tab_base) | ||
271 | { | ||
272 | struct icp_qat_uof_batch_init *init_header, *tail; | ||
273 | struct icp_qat_uof_batch_init *mem_init, *tail_old; | ||
274 | struct icp_qat_uof_memvar_attr *mem_val_attr; | ||
275 | unsigned int i, flag = 0; | ||
276 | |||
277 | mem_val_attr = | ||
278 | (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + | ||
279 | sizeof(struct icp_qat_uof_initmem)); | ||
280 | |||
281 | init_header = *init_tab_base; | ||
282 | if (!init_header) { | ||
283 | init_header = kzalloc(sizeof(*init_header), GFP_KERNEL); | ||
284 | if (!init_header) | ||
285 | return -ENOMEM; | ||
286 | init_header->size = 1; | ||
287 | *init_tab_base = init_header; | ||
288 | flag = 1; | ||
289 | } | ||
290 | tail_old = init_header; | ||
291 | while (tail_old->next) | ||
292 | tail_old = tail_old->next; | ||
293 | tail = tail_old; | ||
294 | for (i = 0; i < init_mem->val_attr_num; i++) { | ||
295 | mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL); | ||
296 | if (!mem_init) | ||
297 | goto out_err; | ||
298 | mem_init->ae = ae; | ||
299 | mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; | ||
300 | mem_init->value = &mem_val_attr->value; | ||
301 | mem_init->size = 4; | ||
302 | mem_init->next = NULL; | ||
303 | tail->next = mem_init; | ||
304 | tail = mem_init; | ||
305 | init_header->size += qat_hal_get_ins_num(); | ||
306 | mem_val_attr++; | ||
307 | } | ||
308 | return 0; | ||
309 | out_err: | ||
310 | while (tail_old) { | ||
311 | mem_init = tail_old->next; | ||
312 | kfree(tail_old); | ||
313 | tail_old = mem_init; | ||
314 | } | ||
315 | if (flag) | ||
316 | kfree(*init_tab_base); | ||
317 | return -ENOMEM; | ||
318 | } | ||
319 | |||
320 | static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, | ||
321 | struct icp_qat_uof_initmem *init_mem) | ||
322 | { | ||
323 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
324 | unsigned int ae; | ||
325 | |||
326 | if (qat_uclo_fetch_initmem_ae(handle, init_mem, | ||
327 | ICP_QAT_UCLO_MAX_LMEM_REG, &ae)) | ||
328 | return -EINVAL; | ||
329 | if (qat_uclo_create_batch_init_list(handle, init_mem, ae, | ||
330 | &obj_handle->lm_init_tab[ae])) | ||
331 | return -EINVAL; | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, | ||
336 | struct icp_qat_uof_initmem *init_mem) | ||
337 | { | ||
338 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
339 | unsigned int ae, ustore_size, uaddr, i; | ||
340 | |||
341 | ustore_size = obj_handle->ustore_phy_size; | ||
342 | if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) | ||
343 | return -EINVAL; | ||
344 | if (qat_uclo_create_batch_init_list(handle, init_mem, ae, | ||
345 | &obj_handle->umem_init_tab[ae])) | ||
346 | return -EINVAL; | ||
347 | /* set the highest ustore address referenced */ | ||
348 | uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; | ||
349 | for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) { | ||
350 | if (obj_handle->ae_data[ae].ae_slices[i]. | ||
351 | encap_image->uwords_num < uaddr) | ||
352 | obj_handle->ae_data[ae].ae_slices[i]. | ||
353 | encap_image->uwords_num = uaddr; | ||
354 | } | ||
355 | return 0; | ||
356 | } | ||
357 | |||
358 | #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 | ||
359 | static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, | ||
360 | struct icp_qat_uof_initmem *init_mem) | ||
361 | { | ||
362 | unsigned int i; | ||
363 | struct icp_qat_uof_memvar_attr *mem_val_attr; | ||
364 | |||
365 | mem_val_attr = | ||
366 | (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem + | ||
367 | sizeof(struct icp_qat_uof_initmem)); | ||
368 | |||
369 | switch (init_mem->region) { | ||
370 | case ICP_QAT_UOF_SRAM_REGION: | ||
371 | if ((init_mem->addr + init_mem->num_in_bytes) > | ||
372 | ICP_DH895XCC_PESRAM_BAR_SIZE) { | ||
373 | pr_err("QAT: initmem on SRAM is out of range"); | ||
374 | return -EINVAL; | ||
375 | } | ||
376 | for (i = 0; i < init_mem->val_attr_num; i++) { | ||
377 | qat_uclo_wr_sram_by_words(handle, | ||
378 | init_mem->addr + | ||
379 | mem_val_attr->offset_in_byte, | ||
380 | &mem_val_attr->value, 4); | ||
381 | mem_val_attr++; | ||
382 | } | ||
383 | break; | ||
384 | case ICP_QAT_UOF_LMEM_REGION: | ||
385 | if (qat_uclo_init_lmem_seg(handle, init_mem)) | ||
386 | return -EINVAL; | ||
387 | break; | ||
388 | case ICP_QAT_UOF_UMEM_REGION: | ||
389 | if (qat_uclo_init_umem_seg(handle, init_mem)) | ||
390 | return -EINVAL; | ||
391 | break; | ||
392 | default: | ||
393 | pr_err("QAT: initmem region error. region type=0x%x\n", | ||
394 | init_mem->region); | ||
395 | return -EINVAL; | ||
396 | } | ||
397 | return 0; | ||
398 | } | ||
399 | |||
400 | static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, | ||
401 | struct icp_qat_uclo_encapme *image) | ||
402 | { | ||
403 | unsigned int i; | ||
404 | struct icp_qat_uclo_encap_page *page; | ||
405 | struct icp_qat_uof_image *uof_image; | ||
406 | unsigned char ae; | ||
407 | unsigned int ustore_size; | ||
408 | unsigned int patt_pos; | ||
409 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
410 | uint64_t *fill_data; | ||
411 | |||
412 | uof_image = image->img_ptr; | ||
413 | fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t), | ||
414 | GFP_KERNEL); | ||
415 | if (!fill_data) | ||
416 | return -ENOMEM; | ||
417 | for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) | ||
418 | memcpy(&fill_data[i], &uof_image->fill_pattern, | ||
419 | sizeof(uint64_t)); | ||
420 | page = image->page; | ||
421 | |||
422 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
423 | if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned)) | ||
424 | continue; | ||
425 | ustore_size = obj_handle->ae_data[ae].eff_ustore_size; | ||
426 | patt_pos = page->beg_addr_p + page->micro_words_num; | ||
427 | |||
428 | qat_hal_wr_uwords(handle, (unsigned char)ae, 0, | ||
429 | page->beg_addr_p, &fill_data[0]); | ||
430 | qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos, | ||
431 | ustore_size - patt_pos + 1, | ||
432 | &fill_data[page->beg_addr_p]); | ||
433 | } | ||
434 | kfree(fill_data); | ||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) | ||
439 | { | ||
440 | int i, ae; | ||
441 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
442 | struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; | ||
443 | |||
444 | for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { | ||
445 | if (initmem->num_in_bytes) { | ||
446 | if (qat_uclo_init_ae_memory(handle, initmem)) | ||
447 | return -EINVAL; | ||
448 | } | ||
449 | initmem = (struct icp_qat_uof_initmem *)((unsigned long)( | ||
450 | (unsigned long)initmem + | ||
451 | sizeof(struct icp_qat_uof_initmem)) + | ||
452 | (sizeof(struct icp_qat_uof_memvar_attr) * | ||
453 | initmem->val_attr_num)); | ||
454 | } | ||
455 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
456 | if (qat_hal_batch_wr_lm(handle, ae, | ||
457 | obj_handle->lm_init_tab[ae])) { | ||
458 | pr_err("QAT: fail to batch init lmem for AE %d\n", ae); | ||
459 | return -EINVAL; | ||
460 | } | ||
461 | qat_uclo_cleanup_batch_init_list(handle, | ||
462 | &obj_handle->lm_init_tab[ae]); | ||
463 | qat_uclo_batch_wr_umem(handle, ae, | ||
464 | obj_handle->umem_init_tab[ae]); | ||
465 | qat_uclo_cleanup_batch_init_list(handle, | ||
466 | &obj_handle-> | ||
467 | umem_init_tab[ae]); | ||
468 | } | ||
469 | return 0; | ||
470 | } | ||
471 | |||
472 | static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, | ||
473 | char *chunk_id, void *cur) | ||
474 | { | ||
475 | int i; | ||
476 | struct icp_qat_uof_chunkhdr *chunk_hdr = | ||
477 | (struct icp_qat_uof_chunkhdr *) | ||
478 | ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); | ||
479 | |||
480 | for (i = 0; i < obj_hdr->num_chunks; i++) { | ||
481 | if ((cur < (void *)&chunk_hdr[i]) && | ||
482 | !strncmp(chunk_hdr[i].chunk_id, chunk_id, | ||
483 | ICP_QAT_UOF_OBJID_LEN)) { | ||
484 | return &chunk_hdr[i]; | ||
485 | } | ||
486 | } | ||
487 | return NULL; | ||
488 | } | ||
489 | |||
490 | static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch) | ||
491 | { | ||
492 | int i; | ||
493 | unsigned int topbit = 1 << 0xF; | ||
494 | unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); | ||
495 | |||
496 | reg ^= inbyte << 0x8; | ||
497 | for (i = 0; i < 0x8; i++) { | ||
498 | if (reg & topbit) | ||
499 | reg = (reg << 1) ^ 0x1021; | ||
500 | else | ||
501 | reg <<= 1; | ||
502 | } | ||
503 | return reg & 0xFFFF; | ||
504 | } | ||
505 | |||
506 | static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num) | ||
507 | { | ||
508 | unsigned int chksum = 0; | ||
509 | |||
510 | if (ptr) | ||
511 | while (num--) | ||
512 | chksum = qat_uclo_calc_checksum(chksum, *ptr++); | ||
513 | return chksum; | ||
514 | } | ||
515 | |||
516 | static struct icp_qat_uclo_objhdr * | ||
517 | qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, | ||
518 | char *chunk_id) | ||
519 | { | ||
520 | struct icp_qat_uof_filechunkhdr *file_chunk; | ||
521 | struct icp_qat_uclo_objhdr *obj_hdr; | ||
522 | char *chunk; | ||
523 | int i; | ||
524 | |||
525 | file_chunk = (struct icp_qat_uof_filechunkhdr *) | ||
526 | (buf + sizeof(struct icp_qat_uof_filehdr)); | ||
527 | for (i = 0; i < file_hdr->num_chunks; i++) { | ||
528 | if (!strncmp(file_chunk->chunk_id, chunk_id, | ||
529 | ICP_QAT_UOF_OBJID_LEN)) { | ||
530 | chunk = buf + file_chunk->offset; | ||
531 | if (file_chunk->checksum != qat_uclo_calc_str_checksum( | ||
532 | chunk, file_chunk->size)) | ||
533 | break; | ||
534 | obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL); | ||
535 | if (!obj_hdr) | ||
536 | break; | ||
537 | obj_hdr->file_buff = chunk; | ||
538 | obj_hdr->checksum = file_chunk->checksum; | ||
539 | obj_hdr->size = file_chunk->size; | ||
540 | return obj_hdr; | ||
541 | } | ||
542 | file_chunk++; | ||
543 | } | ||
544 | return NULL; | ||
545 | } | ||
546 | |||
547 | static unsigned int | ||
548 | qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, | ||
549 | struct icp_qat_uof_image *image) | ||
550 | { | ||
551 | struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; | ||
552 | struct icp_qat_uof_objtable *neigh_reg_tab; | ||
553 | struct icp_qat_uof_code_page *code_page; | ||
554 | |||
555 | code_page = (struct icp_qat_uof_code_page *) | ||
556 | ((char *)image + sizeof(struct icp_qat_uof_image)); | ||
557 | uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + | ||
558 | code_page->uc_var_tab_offset); | ||
559 | imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + | ||
560 | code_page->imp_var_tab_offset); | ||
561 | imp_expr_tab = (struct icp_qat_uof_objtable *) | ||
562 | (encap_uof_obj->beg_uof + | ||
563 | code_page->imp_expr_tab_offset); | ||
564 | if (uc_var_tab->entry_num || imp_var_tab->entry_num || | ||
565 | imp_expr_tab->entry_num) { | ||
566 | pr_err("QAT: UOF can't contain imported variable to be parsed"); | ||
567 | return -EINVAL; | ||
568 | } | ||
569 | neigh_reg_tab = (struct icp_qat_uof_objtable *) | ||
570 | (encap_uof_obj->beg_uof + | ||
571 | code_page->neigh_reg_tab_offset); | ||
572 | if (neigh_reg_tab->entry_num) { | ||
573 | pr_err("QAT: UOF can't contain shared control store feature"); | ||
574 | return -EINVAL; | ||
575 | } | ||
576 | if (image->numpages > 1) { | ||
577 | pr_err("QAT: UOF can't contain multiple pages"); | ||
578 | return -EINVAL; | ||
579 | } | ||
580 | if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { | ||
581 | pr_err("QAT: UOF can't use shared control store feature"); | ||
582 | return -EFAULT; | ||
583 | } | ||
584 | if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { | ||
585 | pr_err("QAT: UOF can't use reloadable feature"); | ||
586 | return -EFAULT; | ||
587 | } | ||
588 | return 0; | ||
589 | } | ||
590 | |||
591 | static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj | ||
592 | *encap_uof_obj, | ||
593 | struct icp_qat_uof_image *img, | ||
594 | struct icp_qat_uclo_encap_page *page) | ||
595 | { | ||
596 | struct icp_qat_uof_code_page *code_page; | ||
597 | struct icp_qat_uof_code_area *code_area; | ||
598 | struct icp_qat_uof_objtable *uword_block_tab; | ||
599 | struct icp_qat_uof_uword_block *uwblock; | ||
600 | int i; | ||
601 | |||
602 | code_page = (struct icp_qat_uof_code_page *) | ||
603 | ((char *)img + sizeof(struct icp_qat_uof_image)); | ||
604 | page->def_page = code_page->def_page; | ||
605 | page->page_region = code_page->page_region; | ||
606 | page->beg_addr_v = code_page->beg_addr_v; | ||
607 | page->beg_addr_p = code_page->beg_addr_p; | ||
608 | code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + | ||
609 | code_page->code_area_offset); | ||
610 | page->micro_words_num = code_area->micro_words_num; | ||
611 | uword_block_tab = (struct icp_qat_uof_objtable *) | ||
612 | (encap_uof_obj->beg_uof + | ||
613 | code_area->uword_block_tab); | ||
614 | page->uwblock_num = uword_block_tab->entry_num; | ||
615 | uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab + | ||
616 | sizeof(struct icp_qat_uof_objtable)); | ||
617 | page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; | ||
618 | for (i = 0; i < uword_block_tab->entry_num; i++) | ||
619 | page->uwblock[i].micro_words = | ||
620 | (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset; | ||
621 | } | ||
622 | |||
623 | static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, | ||
624 | struct icp_qat_uclo_encapme *ae_uimage, | ||
625 | int max_image) | ||
626 | { | ||
627 | int i, j; | ||
628 | struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; | ||
629 | struct icp_qat_uof_image *image; | ||
630 | struct icp_qat_uof_objtable *ae_regtab; | ||
631 | struct icp_qat_uof_objtable *init_reg_sym_tab; | ||
632 | struct icp_qat_uof_objtable *sbreak_tab; | ||
633 | struct icp_qat_uof_encap_obj *encap_uof_obj = | ||
634 | &obj_handle->encap_uof_obj; | ||
635 | |||
636 | for (j = 0; j < max_image; j++) { | ||
637 | chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, | ||
638 | ICP_QAT_UOF_IMAG, chunk_hdr); | ||
639 | if (!chunk_hdr) | ||
640 | break; | ||
641 | image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + | ||
642 | chunk_hdr->offset); | ||
643 | ae_regtab = (struct icp_qat_uof_objtable *) | ||
644 | (image->reg_tab_offset + | ||
645 | obj_handle->obj_hdr->file_buff); | ||
646 | ae_uimage[j].ae_reg_num = ae_regtab->entry_num; | ||
647 | ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *) | ||
648 | (((char *)ae_regtab) + | ||
649 | sizeof(struct icp_qat_uof_objtable)); | ||
650 | init_reg_sym_tab = (struct icp_qat_uof_objtable *) | ||
651 | (image->init_reg_sym_tab + | ||
652 | obj_handle->obj_hdr->file_buff); | ||
653 | ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; | ||
654 | ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *) | ||
655 | (((char *)init_reg_sym_tab) + | ||
656 | sizeof(struct icp_qat_uof_objtable)); | ||
657 | sbreak_tab = (struct icp_qat_uof_objtable *) | ||
658 | (image->sbreak_tab + obj_handle->obj_hdr->file_buff); | ||
659 | ae_uimage[j].sbreak_num = sbreak_tab->entry_num; | ||
660 | ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *) | ||
661 | (((char *)sbreak_tab) + | ||
662 | sizeof(struct icp_qat_uof_objtable)); | ||
663 | ae_uimage[j].img_ptr = image; | ||
664 | if (qat_uclo_check_image_compat(encap_uof_obj, image)) | ||
665 | goto out_err; | ||
666 | ae_uimage[j].page = | ||
667 | kzalloc(sizeof(struct icp_qat_uclo_encap_page), | ||
668 | GFP_KERNEL); | ||
669 | if (!ae_uimage[j].page) | ||
670 | goto out_err; | ||
671 | qat_uclo_map_image_page(encap_uof_obj, image, | ||
672 | ae_uimage[j].page); | ||
673 | } | ||
674 | return j; | ||
675 | out_err: | ||
676 | for (i = 0; i < j; i++) | ||
677 | kfree(ae_uimage[i].page); | ||
678 | return 0; | ||
679 | } | ||
680 | |||
681 | static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) | ||
682 | { | ||
683 | int i, ae; | ||
684 | int mflag = 0; | ||
685 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
686 | |||
687 | for (ae = 0; ae <= max_ae; ae++) { | ||
688 | if (!test_bit(ae, | ||
689 | (unsigned long *)&handle->hal_handle->ae_mask)) | ||
690 | continue; | ||
691 | for (i = 0; i < obj_handle->uimage_num; i++) { | ||
692 | if (!test_bit(ae, (unsigned long *) | ||
693 | &obj_handle->ae_uimage[i].img_ptr->ae_assigned)) | ||
694 | continue; | ||
695 | mflag = 1; | ||
696 | if (qat_uclo_init_ae_data(obj_handle, ae, i)) | ||
697 | return -EINVAL; | ||
698 | } | ||
699 | } | ||
700 | if (!mflag) { | ||
701 | pr_err("QAT: uimage uses AE not set"); | ||
702 | return -EINVAL; | ||
703 | } | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | static struct icp_qat_uof_strtable * | ||
708 | qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, | ||
709 | char *tab_name, struct icp_qat_uof_strtable *str_table) | ||
710 | { | ||
711 | struct icp_qat_uof_chunkhdr *chunk_hdr; | ||
712 | |||
713 | chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *) | ||
714 | obj_hdr->file_buff, tab_name, NULL); | ||
715 | if (chunk_hdr) { | ||
716 | int hdr_size; | ||
717 | |||
718 | memcpy(&str_table->table_len, obj_hdr->file_buff + | ||
719 | chunk_hdr->offset, sizeof(str_table->table_len)); | ||
720 | hdr_size = (char *)&str_table->strings - (char *)str_table; | ||
721 | str_table->strings = (unsigned long)obj_hdr->file_buff + | ||
722 | chunk_hdr->offset + hdr_size; | ||
723 | return str_table; | ||
724 | } | ||
725 | return NULL; | ||
726 | } | ||
727 | |||
728 | static void | ||
729 | qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, | ||
730 | struct icp_qat_uclo_init_mem_table *init_mem_tab) | ||
731 | { | ||
732 | struct icp_qat_uof_chunkhdr *chunk_hdr; | ||
733 | |||
734 | chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, | ||
735 | ICP_QAT_UOF_IMEM, NULL); | ||
736 | if (chunk_hdr) { | ||
737 | memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof + | ||
738 | chunk_hdr->offset, sizeof(unsigned int)); | ||
739 | init_mem_tab->init_mem = (struct icp_qat_uof_initmem *) | ||
740 | (encap_uof_obj->beg_uof + chunk_hdr->offset + | ||
741 | sizeof(unsigned int)); | ||
742 | } | ||
743 | } | ||
744 | |||
745 | static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) | ||
746 | { | ||
747 | unsigned int maj_ver, prod_type = obj_handle->prod_type; | ||
748 | |||
749 | if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) { | ||
750 | pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n", | ||
751 | obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type); | ||
752 | return -EINVAL; | ||
753 | } | ||
754 | maj_ver = obj_handle->prod_rev & 0xff; | ||
755 | if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) || | ||
756 | (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) { | ||
757 | pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver); | ||
758 | return -EINVAL; | ||
759 | } | ||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, | ||
764 | unsigned char ae, unsigned char ctx_mask, | ||
765 | enum icp_qat_uof_regtype reg_type, | ||
766 | unsigned short reg_addr, unsigned int value) | ||
767 | { | ||
768 | switch (reg_type) { | ||
769 | case ICP_GPA_ABS: | ||
770 | case ICP_GPB_ABS: | ||
771 | ctx_mask = 0; | ||
772 | case ICP_GPA_REL: | ||
773 | case ICP_GPB_REL: | ||
774 | return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, | ||
775 | reg_addr, value); | ||
776 | case ICP_SR_ABS: | ||
777 | case ICP_DR_ABS: | ||
778 | case ICP_SR_RD_ABS: | ||
779 | case ICP_DR_RD_ABS: | ||
780 | ctx_mask = 0; | ||
781 | case ICP_SR_REL: | ||
782 | case ICP_DR_REL: | ||
783 | case ICP_SR_RD_REL: | ||
784 | case ICP_DR_RD_REL: | ||
785 | return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type, | ||
786 | reg_addr, value); | ||
787 | case ICP_SR_WR_ABS: | ||
788 | case ICP_DR_WR_ABS: | ||
789 | ctx_mask = 0; | ||
790 | case ICP_SR_WR_REL: | ||
791 | case ICP_DR_WR_REL: | ||
792 | return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, | ||
793 | reg_addr, value); | ||
794 | case ICP_NEIGH_REL: | ||
795 | return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); | ||
796 | default: | ||
797 | pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type); | ||
798 | return -EFAULT; | ||
799 | } | ||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, | ||
804 | unsigned int ae, | ||
805 | struct icp_qat_uclo_encapme *encap_ae) | ||
806 | { | ||
807 | unsigned int i; | ||
808 | unsigned char ctx_mask; | ||
809 | struct icp_qat_uof_init_regsym *init_regsym; | ||
810 | |||
811 | if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == | ||
812 | ICP_QAT_UCLO_MAX_CTX) | ||
813 | ctx_mask = 0xff; | ||
814 | else | ||
815 | ctx_mask = 0x55; | ||
816 | |||
817 | for (i = 0; i < encap_ae->init_regsym_num; i++) { | ||
818 | unsigned int exp_res; | ||
819 | |||
820 | init_regsym = &encap_ae->init_regsym[i]; | ||
821 | exp_res = init_regsym->value; | ||
822 | switch (init_regsym->init_type) { | ||
823 | case ICP_QAT_UOF_INIT_REG: | ||
824 | qat_uclo_init_reg(handle, ae, ctx_mask, | ||
825 | (enum icp_qat_uof_regtype) | ||
826 | init_regsym->reg_type, | ||
827 | (unsigned short)init_regsym->reg_addr, | ||
828 | exp_res); | ||
829 | break; | ||
830 | case ICP_QAT_UOF_INIT_REG_CTX: | ||
831 | /* check if ctx is appropriate for the ctxMode */ | ||
832 | if (!((1 << init_regsym->ctx) & ctx_mask)) { | ||
833 | pr_err("QAT: invalid ctx num = 0x%x\n", | ||
834 | init_regsym->ctx); | ||
835 | return -EINVAL; | ||
836 | } | ||
837 | qat_uclo_init_reg(handle, ae, | ||
838 | (unsigned char) | ||
839 | (1 << init_regsym->ctx), | ||
840 | (enum icp_qat_uof_regtype) | ||
841 | init_regsym->reg_type, | ||
842 | (unsigned short)init_regsym->reg_addr, | ||
843 | exp_res); | ||
844 | break; | ||
845 | case ICP_QAT_UOF_INIT_EXPR: | ||
846 | pr_err("QAT: INIT_EXPR feature not supported\n"); | ||
847 | return -EINVAL; | ||
848 | case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: | ||
849 | pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n"); | ||
850 | return -EINVAL; | ||
851 | default: | ||
852 | break; | ||
853 | } | ||
854 | } | ||
855 | return 0; | ||
856 | } | ||
857 | |||
858 | static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) | ||
859 | { | ||
860 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
861 | unsigned int s, ae; | ||
862 | |||
863 | if (obj_handle->global_inited) | ||
864 | return 0; | ||
865 | if (obj_handle->init_mem_tab.entry_num) { | ||
866 | if (qat_uclo_init_memory(handle)) { | ||
867 | pr_err("QAT: initialize memory failed\n"); | ||
868 | return -EINVAL; | ||
869 | } | ||
870 | } | ||
871 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
872 | for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { | ||
873 | if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) | ||
874 | continue; | ||
875 | if (qat_uclo_init_reg_sym(handle, ae, | ||
876 | obj_handle->ae_data[ae]. | ||
877 | ae_slices[s].encap_image)) | ||
878 | return -EINVAL; | ||
879 | } | ||
880 | } | ||
881 | obj_handle->global_inited = 1; | ||
882 | return 0; | ||
883 | } | ||
884 | |||
885 | static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) | ||
886 | { | ||
887 | unsigned char ae, nn_mode, s; | ||
888 | struct icp_qat_uof_image *uof_image; | ||
889 | struct icp_qat_uclo_aedata *ae_data; | ||
890 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
891 | |||
892 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
893 | if (!test_bit(ae, | ||
894 | (unsigned long *)&handle->hal_handle->ae_mask)) | ||
895 | continue; | ||
896 | ae_data = &obj_handle->ae_data[ae]; | ||
897 | for (s = 0; s < min_t(unsigned int, ae_data->slice_num, | ||
898 | ICP_QAT_UCLO_MAX_CTX); s++) { | ||
899 | if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) | ||
900 | continue; | ||
901 | uof_image = ae_data->ae_slices[s].encap_image->img_ptr; | ||
902 | if (qat_hal_set_ae_ctx_mode(handle, ae, | ||
903 | (char)ICP_QAT_CTX_MODE | ||
904 | (uof_image->ae_mode))) { | ||
905 | pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); | ||
906 | return -EFAULT; | ||
907 | } | ||
908 | nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); | ||
909 | if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { | ||
910 | pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); | ||
911 | return -EFAULT; | ||
912 | } | ||
913 | if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, | ||
914 | (char)ICP_QAT_LOC_MEM0_MODE | ||
915 | (uof_image->ae_mode))) { | ||
916 | pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); | ||
917 | return -EFAULT; | ||
918 | } | ||
919 | if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, | ||
920 | (char)ICP_QAT_LOC_MEM1_MODE | ||
921 | (uof_image->ae_mode))) { | ||
922 | pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); | ||
923 | return -EFAULT; | ||
924 | } | ||
925 | } | ||
926 | } | ||
927 | return 0; | ||
928 | } | ||
929 | |||
930 | static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) | ||
931 | { | ||
932 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
933 | struct icp_qat_uclo_encapme *image; | ||
934 | int a; | ||
935 | |||
936 | for (a = 0; a < obj_handle->uimage_num; a++) { | ||
937 | image = &obj_handle->ae_uimage[a]; | ||
938 | image->uwords_num = image->page->beg_addr_p + | ||
939 | image->page->micro_words_num; | ||
940 | } | ||
941 | } | ||
942 | |||
943 | static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) | ||
944 | { | ||
945 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
946 | unsigned int ae; | ||
947 | |||
948 | obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t), | ||
949 | GFP_KERNEL); | ||
950 | if (!obj_handle->uword_buf) | ||
951 | return -ENOMEM; | ||
952 | obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; | ||
953 | obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) | ||
954 | obj_handle->obj_hdr->file_buff; | ||
955 | obj_handle->uword_in_bytes = 6; | ||
956 | obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE; | ||
957 | obj_handle->prod_rev = PID_MAJOR_REV | | ||
958 | (PID_MINOR_REV & handle->hal_handle->revision_id); | ||
959 | if (qat_uclo_check_uof_compat(obj_handle)) { | ||
960 | pr_err("QAT: UOF incompatible\n"); | ||
961 | return -EINVAL; | ||
962 | } | ||
963 | obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; | ||
964 | if (!obj_handle->obj_hdr->file_buff || | ||
965 | !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, | ||
966 | &obj_handle->str_table)) { | ||
967 | pr_err("QAT: UOF doesn't have effective images\n"); | ||
968 | goto out_err; | ||
969 | } | ||
970 | obj_handle->uimage_num = | ||
971 | qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage, | ||
972 | ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); | ||
973 | if (!obj_handle->uimage_num) | ||
974 | goto out_err; | ||
975 | if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { | ||
976 | pr_err("QAT: Bad object\n"); | ||
977 | goto out_check_uof_aemask_err; | ||
978 | } | ||
979 | qat_uclo_init_uword_num(handle); | ||
980 | qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, | ||
981 | &obj_handle->init_mem_tab); | ||
982 | if (qat_uclo_set_ae_mode(handle)) | ||
983 | goto out_check_uof_aemask_err; | ||
984 | return 0; | ||
985 | out_check_uof_aemask_err: | ||
986 | for (ae = 0; ae < obj_handle->uimage_num; ae++) | ||
987 | kfree(obj_handle->ae_uimage[ae].page); | ||
988 | out_err: | ||
989 | kfree(obj_handle->uword_buf); | ||
990 | return -EFAULT; | ||
991 | } | ||
992 | |||
993 | int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, | ||
994 | void *addr_ptr, int mem_size) | ||
995 | { | ||
996 | struct icp_qat_uof_filehdr *filehdr; | ||
997 | struct icp_qat_uclo_objhandle *objhdl; | ||
998 | |||
999 | BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= | ||
1000 | (sizeof(handle->hal_handle->ae_mask) * 8)); | ||
1001 | |||
1002 | if (!handle || !addr_ptr || mem_size < 24) | ||
1003 | return -EINVAL; | ||
1004 | objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); | ||
1005 | if (!objhdl) | ||
1006 | return -ENOMEM; | ||
1007 | objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL); | ||
1008 | if (!objhdl->obj_buf) | ||
1009 | goto out_objbuf_err; | ||
1010 | filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; | ||
1011 | if (qat_uclo_check_format(filehdr)) | ||
1012 | goto out_objhdr_err; | ||
1013 | objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, | ||
1014 | ICP_QAT_UOF_OBJS); | ||
1015 | if (!objhdl->obj_hdr) { | ||
1016 | pr_err("QAT: object file chunk is null\n"); | ||
1017 | goto out_objhdr_err; | ||
1018 | } | ||
1019 | handle->obj_handle = objhdl; | ||
1020 | if (qat_uclo_parse_uof_obj(handle)) | ||
1021 | goto out_overlay_obj_err; | ||
1022 | return 0; | ||
1023 | |||
1024 | out_overlay_obj_err: | ||
1025 | handle->obj_handle = NULL; | ||
1026 | kfree(objhdl->obj_hdr); | ||
1027 | out_objhdr_err: | ||
1028 | kfree(objhdl->obj_buf); | ||
1029 | out_objbuf_err: | ||
1030 | kfree(objhdl); | ||
1031 | return -ENOMEM; | ||
1032 | } | ||
1033 | |||
1034 | void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle) | ||
1035 | { | ||
1036 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1037 | unsigned int a; | ||
1038 | |||
1039 | if (!obj_handle) | ||
1040 | return; | ||
1041 | |||
1042 | kfree(obj_handle->uword_buf); | ||
1043 | for (a = 0; a < obj_handle->uimage_num; a++) | ||
1044 | kfree(obj_handle->ae_uimage[a].page); | ||
1045 | |||
1046 | for (a = 0; a < handle->hal_handle->ae_max_num; a++) | ||
1047 | qat_uclo_free_ae_data(&obj_handle->ae_data[a]); | ||
1048 | |||
1049 | kfree(obj_handle->obj_hdr); | ||
1050 | kfree(obj_handle->obj_buf); | ||
1051 | kfree(obj_handle); | ||
1052 | handle->obj_handle = NULL; | ||
1053 | } | ||
1054 | |||
1055 | static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, | ||
1056 | struct icp_qat_uclo_encap_page *encap_page, | ||
1057 | uint64_t *uword, unsigned int addr_p, | ||
1058 | unsigned int raddr, uint64_t fill) | ||
1059 | { | ||
1060 | uint64_t uwrd = 0; | ||
1061 | unsigned int i; | ||
1062 | |||
1063 | if (!encap_page) { | ||
1064 | *uword = fill; | ||
1065 | return; | ||
1066 | } | ||
1067 | for (i = 0; i < encap_page->uwblock_num; i++) { | ||
1068 | if (raddr >= encap_page->uwblock[i].start_addr && | ||
1069 | raddr <= encap_page->uwblock[i].start_addr + | ||
1070 | encap_page->uwblock[i].words_num - 1) { | ||
1071 | raddr -= encap_page->uwblock[i].start_addr; | ||
1072 | raddr *= obj_handle->uword_in_bytes; | ||
1073 | memcpy(&uwrd, (void *)(((unsigned long) | ||
1074 | encap_page->uwblock[i].micro_words) + raddr), | ||
1075 | obj_handle->uword_in_bytes); | ||
1076 | uwrd = uwrd & 0xbffffffffffull; | ||
1077 | } | ||
1078 | } | ||
1079 | *uword = uwrd; | ||
1080 | if (*uword == INVLD_UWORD) | ||
1081 | *uword = fill; | ||
1082 | } | ||
1083 | |||
1084 | static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, | ||
1085 | struct icp_qat_uclo_encap_page | ||
1086 | *encap_page, unsigned int ae) | ||
1087 | { | ||
1088 | unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; | ||
1089 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1090 | uint64_t fill_pat; | ||
1091 | |||
1092 | /* load the page starting at appropriate ustore address */ | ||
1093 | /* get fill-pattern from an image -- they are all the same */ | ||
1094 | memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, | ||
1095 | sizeof(uint64_t)); | ||
1096 | uw_physical_addr = encap_page->beg_addr_p; | ||
1097 | uw_relative_addr = 0; | ||
1098 | words_num = encap_page->micro_words_num; | ||
1099 | while (words_num) { | ||
1100 | if (words_num < UWORD_CPYBUF_SIZE) | ||
1101 | cpylen = words_num; | ||
1102 | else | ||
1103 | cpylen = UWORD_CPYBUF_SIZE; | ||
1104 | |||
1105 | /* load the buffer */ | ||
1106 | for (i = 0; i < cpylen; i++) | ||
1107 | qat_uclo_fill_uwords(obj_handle, encap_page, | ||
1108 | &obj_handle->uword_buf[i], | ||
1109 | uw_physical_addr + i, | ||
1110 | uw_relative_addr + i, fill_pat); | ||
1111 | |||
1112 | /* copy the buffer to ustore */ | ||
1113 | qat_hal_wr_uwords(handle, (unsigned char)ae, | ||
1114 | uw_physical_addr, cpylen, | ||
1115 | obj_handle->uword_buf); | ||
1116 | |||
1117 | uw_physical_addr += cpylen; | ||
1118 | uw_relative_addr += cpylen; | ||
1119 | words_num -= cpylen; | ||
1120 | } | ||
1121 | } | ||
1122 | |||
1123 | static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, | ||
1124 | struct icp_qat_uof_image *image) | ||
1125 | { | ||
1126 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1127 | unsigned int ctx_mask, s; | ||
1128 | struct icp_qat_uclo_page *page; | ||
1129 | unsigned char ae; | ||
1130 | int ctx; | ||
1131 | |||
1132 | if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) | ||
1133 | ctx_mask = 0xff; | ||
1134 | else | ||
1135 | ctx_mask = 0x55; | ||
1136 | /* load the default page and set assigned CTX PC | ||
1137 | * to the entrypoint address */ | ||
1138 | for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) { | ||
1139 | if (!test_bit(ae, (unsigned long *)&image->ae_assigned)) | ||
1140 | continue; | ||
1141 | /* find the slice to which this image is assigned */ | ||
1142 | for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) { | ||
1143 | if (image->ctx_assigned & obj_handle->ae_data[ae]. | ||
1144 | ae_slices[s].ctx_mask_assigned) | ||
1145 | break; | ||
1146 | } | ||
1147 | if (s >= obj_handle->ae_data[ae].slice_num) | ||
1148 | continue; | ||
1149 | page = obj_handle->ae_data[ae].ae_slices[s].page; | ||
1150 | if (!page->encap_page->def_page) | ||
1151 | continue; | ||
1152 | qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); | ||
1153 | |||
1154 | page = obj_handle->ae_data[ae].ae_slices[s].page; | ||
1155 | for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) | ||
1156 | obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] = | ||
1157 | (ctx_mask & (1 << ctx)) ? page : NULL; | ||
1158 | qat_hal_set_live_ctx(handle, (unsigned char)ae, | ||
1159 | image->ctx_assigned); | ||
1160 | qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned, | ||
1161 | image->entry_address); | ||
1162 | } | ||
1163 | } | ||
1164 | |||
1165 | int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) | ||
1166 | { | ||
1167 | struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; | ||
1168 | unsigned int i; | ||
1169 | |||
1170 | if (qat_uclo_init_globals(handle)) | ||
1171 | return -EINVAL; | ||
1172 | for (i = 0; i < obj_handle->uimage_num; i++) { | ||
1173 | if (!obj_handle->ae_uimage[i].img_ptr) | ||
1174 | return -EINVAL; | ||
1175 | if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) | ||
1176 | return -EINVAL; | ||
1177 | qat_uclo_wr_uimage_page(handle, | ||
1178 | obj_handle->ae_uimage[i].img_ptr); | ||
1179 | } | ||
1180 | return 0; | ||
1181 | } | ||