diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /drivers/staging/sep | |
parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) |
Diffstat (limited to 'drivers/staging/sep')
-rw-r--r-- | drivers/staging/sep/sep_driver.c | 2930 |
1 files changed, 2930 insertions, 0 deletions
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c new file mode 100644 index 00000000000..bf7286e01a3 --- /dev/null +++ b/drivers/staging/sep/sep_driver.c | |||
@@ -0,0 +1,2930 @@ | |||
1 | /* | ||
2 | * | ||
3 | * sep_driver.c - Security Processor Driver main group of functions | ||
4 | * | ||
5 | * Copyright(c) 2009,2010 Intel Corporation. All rights reserved. | ||
6 | * Contributions(c) 2009,2010 Discretix. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the Free | ||
10 | * Software Foundation; version 2 of the License. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., 59 | ||
19 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
20 | * | ||
21 | * CONTACTS: | ||
22 | * | ||
23 | * Mark Allyn mark.a.allyn@intel.com | ||
24 | * Jayant Mangalampalli jayant.mangalampalli@intel.com | ||
25 | * | ||
26 | * CHANGES: | ||
27 | * | ||
28 | * 2009.06.26 Initial publish | ||
29 | * 2010.09.14 Upgrade to Medfield | ||
30 | * | ||
31 | */ | ||
32 | #include <linux/init.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/miscdevice.h> | ||
35 | #include <linux/fs.h> | ||
36 | #include <linux/cdev.h> | ||
37 | #include <linux/kdev_t.h> | ||
38 | #include <linux/mutex.h> | ||
39 | #include <linux/sched.h> | ||
40 | #include <linux/mm.h> | ||
41 | #include <linux/poll.h> | ||
42 | #include <linux/wait.h> | ||
43 | #include <linux/pci.h> | ||
44 | #include <linux/firmware.h> | ||
45 | #include <linux/slab.h> | ||
46 | #include <linux/ioctl.h> | ||
47 | #include <asm/current.h> | ||
48 | #include <linux/ioport.h> | ||
49 | #include <linux/io.h> | ||
50 | #include <linux/interrupt.h> | ||
51 | #include <linux/pagemap.h> | ||
52 | #include <asm/cacheflush.h> | ||
53 | #include <linux/delay.h> | ||
54 | #include <linux/jiffies.h> | ||
55 | #include <linux/rar_register.h> | ||
56 | |||
57 | #include "sep_driver_hw_defs.h" | ||
58 | #include "sep_driver_config.h" | ||
59 | #include "sep_driver_api.h" | ||
60 | #include "sep_dev.h" | ||
61 | |||
62 | /*---------------------------------------- | ||
63 | DEFINES | ||
64 | -----------------------------------------*/ | ||
65 | |||
66 | #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000 | ||
67 | |||
68 | /*-------------------------------------------- | ||
69 | GLOBAL variables | ||
70 | --------------------------------------------*/ | ||
71 | |||
72 | /* Keep this a single static object for now to keep the conversion easy */ | ||
73 | |||
74 | static struct sep_device *sep_dev; | ||
75 | |||
76 | /** | ||
77 | * sep_dump_message - dump the message that is pending | ||
78 | * @sep: SEP device | ||
79 | */ | ||
80 | static void sep_dump_message(struct sep_device *sep) | ||
81 | { | ||
82 | int count; | ||
83 | u32 *p = sep->shared_addr; | ||
84 | for (count = 0; count < 12 * 4; count += 4) | ||
85 | dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n", | ||
86 | count, *p++); | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * sep_map_and_alloc_shared_area - allocate shared block | ||
91 | * @sep: security processor | ||
92 | * @size: size of shared area | ||
93 | */ | ||
94 | static int sep_map_and_alloc_shared_area(struct sep_device *sep) | ||
95 | { | ||
96 | sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, | ||
97 | sep->shared_size, | ||
98 | &sep->shared_bus, GFP_KERNEL); | ||
99 | |||
100 | if (!sep->shared_addr) { | ||
101 | dev_warn(&sep->pdev->dev, | ||
102 | "shared memory dma_alloc_coherent failed\n"); | ||
103 | return -ENOMEM; | ||
104 | } | ||
105 | dev_dbg(&sep->pdev->dev, | ||
106 | "shared_addr %zx bytes @%p (bus %llx)\n", | ||
107 | sep->shared_size, sep->shared_addr, | ||
108 | (unsigned long long)sep->shared_bus); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | /** | ||
113 | * sep_unmap_and_free_shared_area - free shared block | ||
114 | * @sep: security processor | ||
115 | */ | ||
116 | static void sep_unmap_and_free_shared_area(struct sep_device *sep) | ||
117 | { | ||
118 | dma_free_coherent(&sep->pdev->dev, sep->shared_size, | ||
119 | sep->shared_addr, sep->shared_bus); | ||
120 | } | ||
121 | |||
122 | /** | ||
123 | * sep_shared_bus_to_virt - convert bus/virt addresses | ||
124 | * @sep: pointer to struct sep_device | ||
125 | * @bus_address: address to convert | ||
126 | * | ||
127 | * Returns virtual address inside the shared area according | ||
128 | * to the bus address. | ||
129 | */ | ||
130 | static void *sep_shared_bus_to_virt(struct sep_device *sep, | ||
131 | dma_addr_t bus_address) | ||
132 | { | ||
133 | return sep->shared_addr + (bus_address - sep->shared_bus); | ||
134 | } | ||
135 | |||
136 | /** | ||
137 | * open function for the singleton driver | ||
138 | * @inode_ptr struct inode * | ||
139 | * @file_ptr struct file * | ||
140 | * | ||
141 | * Called when the user opens the singleton device interface | ||
142 | */ | ||
143 | static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr) | ||
144 | { | ||
145 | struct sep_device *sep; | ||
146 | |||
147 | /* | ||
148 | * Get the SEP device structure and use it for the | ||
149 | * private_data field in filp for other methods | ||
150 | */ | ||
151 | sep = sep_dev; | ||
152 | |||
153 | file_ptr->private_data = sep; | ||
154 | |||
155 | if (test_and_set_bit(0, &sep->singleton_access_flag)) | ||
156 | return -EBUSY; | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * sep_open - device open method | ||
162 | * @inode: inode of SEP device | ||
163 | * @filp: file handle to SEP device | ||
164 | * | ||
165 | * Open method for the SEP device. Called when userspace opens | ||
166 | * the SEP device node. | ||
167 | * | ||
168 | * Returns zero on success otherwise an error code. | ||
169 | */ | ||
170 | static int sep_open(struct inode *inode, struct file *filp) | ||
171 | { | ||
172 | struct sep_device *sep; | ||
173 | |||
174 | /* | ||
175 | * Get the SEP device structure and use it for the | ||
176 | * private_data field in filp for other methods | ||
177 | */ | ||
178 | sep = sep_dev; | ||
179 | filp->private_data = sep; | ||
180 | |||
181 | /* Anyone can open; locking takes place at transaction level */ | ||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | /** | ||
186 | * sep_singleton_release - close a SEP singleton device | ||
187 | * @inode: inode of SEP device | ||
188 | * @filp: file handle being closed | ||
189 | * | ||
190 | * Called on the final close of a SEP device. As the open protects against | ||
191 | * multiple simultaenous opens that means this method is called when the | ||
192 | * final reference to the open handle is dropped. | ||
193 | */ | ||
194 | static int sep_singleton_release(struct inode *inode, struct file *filp) | ||
195 | { | ||
196 | struct sep_device *sep = filp->private_data; | ||
197 | |||
198 | clear_bit(0, &sep->singleton_access_flag); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * sep_request_daemon_open - request daemon open method | ||
204 | * @inode: inode of SEP device | ||
205 | * @filp: file handle to SEP device | ||
206 | * | ||
207 | * Open method for the SEP request daemon. Called when | ||
208 | * request daemon in userspace opens the SEP device node. | ||
209 | * | ||
210 | * Returns zero on success otherwise an error code. | ||
211 | */ | ||
212 | static int sep_request_daemon_open(struct inode *inode, struct file *filp) | ||
213 | { | ||
214 | struct sep_device *sep = sep_dev; | ||
215 | int error = 0; | ||
216 | |||
217 | filp->private_data = sep; | ||
218 | |||
219 | /* There is supposed to be only one request daemon */ | ||
220 | if (test_and_set_bit(0, &sep->request_daemon_open)) | ||
221 | error = -EBUSY; | ||
222 | return error; | ||
223 | } | ||
224 | |||
225 | /** | ||
226 | * sep_request_daemon_release - close a SEP daemon | ||
227 | * @inode: inode of SEP device | ||
228 | * @filp: file handle being closed | ||
229 | * | ||
230 | * Called on the final close of a SEP daemon. | ||
231 | */ | ||
232 | static int sep_request_daemon_release(struct inode *inode, struct file *filp) | ||
233 | { | ||
234 | struct sep_device *sep = filp->private_data; | ||
235 | |||
236 | dev_dbg(&sep->pdev->dev, "Request daemon release for pid %d\n", | ||
237 | current->pid); | ||
238 | |||
239 | /* Clear the request_daemon_open flag */ | ||
240 | clear_bit(0, &sep->request_daemon_open); | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * sep_req_daemon_send_reply_command_handler - poke the SEP | ||
246 | * @sep: struct sep_device * | ||
247 | * | ||
248 | * This function raises interrupt to SEPm that signals that is has a | ||
249 | * new command from HOST | ||
250 | */ | ||
251 | static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep) | ||
252 | { | ||
253 | unsigned long lck_flags; | ||
254 | |||
255 | sep_dump_message(sep); | ||
256 | |||
257 | /* Counters are lockable region */ | ||
258 | spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); | ||
259 | sep->send_ct++; | ||
260 | sep->reply_ct++; | ||
261 | |||
262 | /* Send the interrupt to SEP */ | ||
263 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct); | ||
264 | sep->send_ct++; | ||
265 | |||
266 | spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); | ||
267 | |||
268 | dev_dbg(&sep->pdev->dev, | ||
269 | "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n", | ||
270 | sep->send_ct, sep->reply_ct); | ||
271 | |||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | |||
276 | /** | ||
277 | * sep_free_dma_table_data_handler - free DMA table | ||
278 | * @sep: pointere to struct sep_device | ||
279 | * | ||
280 | * Handles the request to free DMA table for synchronic actions | ||
281 | */ | ||
282 | static int sep_free_dma_table_data_handler(struct sep_device *sep) | ||
283 | { | ||
284 | int count; | ||
285 | int dcb_counter; | ||
286 | /* Pointer to the current dma_resource struct */ | ||
287 | struct sep_dma_resource *dma; | ||
288 | |||
289 | for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) { | ||
290 | dma = &sep->dma_res_arr[dcb_counter]; | ||
291 | |||
292 | /* Unmap and free input map array */ | ||
293 | if (dma->in_map_array) { | ||
294 | for (count = 0; count < dma->in_num_pages; count++) { | ||
295 | dma_unmap_page(&sep->pdev->dev, | ||
296 | dma->in_map_array[count].dma_addr, | ||
297 | dma->in_map_array[count].size, | ||
298 | DMA_TO_DEVICE); | ||
299 | } | ||
300 | kfree(dma->in_map_array); | ||
301 | } | ||
302 | |||
303 | /* Unmap output map array, DON'T free it yet */ | ||
304 | if (dma->out_map_array) { | ||
305 | for (count = 0; count < dma->out_num_pages; count++) { | ||
306 | dma_unmap_page(&sep->pdev->dev, | ||
307 | dma->out_map_array[count].dma_addr, | ||
308 | dma->out_map_array[count].size, | ||
309 | DMA_FROM_DEVICE); | ||
310 | } | ||
311 | kfree(dma->out_map_array); | ||
312 | } | ||
313 | |||
314 | /* Free page cache for output */ | ||
315 | if (dma->in_page_array) { | ||
316 | for (count = 0; count < dma->in_num_pages; count++) { | ||
317 | flush_dcache_page(dma->in_page_array[count]); | ||
318 | page_cache_release(dma->in_page_array[count]); | ||
319 | } | ||
320 | kfree(dma->in_page_array); | ||
321 | } | ||
322 | |||
323 | if (dma->out_page_array) { | ||
324 | for (count = 0; count < dma->out_num_pages; count++) { | ||
325 | if (!PageReserved(dma->out_page_array[count])) | ||
326 | SetPageDirty(dma->out_page_array[count]); | ||
327 | flush_dcache_page(dma->out_page_array[count]); | ||
328 | page_cache_release(dma->out_page_array[count]); | ||
329 | } | ||
330 | kfree(dma->out_page_array); | ||
331 | } | ||
332 | |||
333 | /* Reset all the values */ | ||
334 | dma->in_page_array = NULL; | ||
335 | dma->out_page_array = NULL; | ||
336 | dma->in_num_pages = 0; | ||
337 | dma->out_num_pages = 0; | ||
338 | dma->in_map_array = NULL; | ||
339 | dma->out_map_array = NULL; | ||
340 | dma->in_map_num_entries = 0; | ||
341 | dma->out_map_num_entries = 0; | ||
342 | } | ||
343 | |||
344 | sep->nr_dcb_creat = 0; | ||
345 | sep->num_lli_tables_created = 0; | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * sep_request_daemon_mmap - maps the shared area to user space | ||
352 | * @filp: pointer to struct file | ||
353 | * @vma: pointer to vm_area_struct | ||
354 | * | ||
355 | * Called by the kernel when the daemon attempts an mmap() syscall | ||
356 | * using our handle. | ||
357 | */ | ||
358 | static int sep_request_daemon_mmap(struct file *filp, | ||
359 | struct vm_area_struct *vma) | ||
360 | { | ||
361 | struct sep_device *sep = filp->private_data; | ||
362 | dma_addr_t bus_address; | ||
363 | int error = 0; | ||
364 | |||
365 | if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) { | ||
366 | error = -EINVAL; | ||
367 | goto end_function; | ||
368 | } | ||
369 | |||
370 | /* Get physical address */ | ||
371 | bus_address = sep->shared_bus; | ||
372 | |||
373 | if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT, | ||
374 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) { | ||
375 | |||
376 | dev_warn(&sep->pdev->dev, "remap_page_range failed\n"); | ||
377 | error = -EAGAIN; | ||
378 | goto end_function; | ||
379 | } | ||
380 | |||
381 | end_function: | ||
382 | return error; | ||
383 | } | ||
384 | |||
385 | /** | ||
386 | * sep_request_daemon_poll - poll implementation | ||
387 | * @sep: struct sep_device * for current SEP device | ||
388 | * @filp: struct file * for open file | ||
389 | * @wait: poll_table * for poll | ||
390 | * | ||
391 | * Called when our device is part of a poll() or select() syscall | ||
392 | */ | ||
393 | static unsigned int sep_request_daemon_poll(struct file *filp, | ||
394 | poll_table *wait) | ||
395 | { | ||
396 | u32 mask = 0; | ||
397 | /* GPR2 register */ | ||
398 | u32 retval2; | ||
399 | unsigned long lck_flags; | ||
400 | struct sep_device *sep = filp->private_data; | ||
401 | |||
402 | poll_wait(filp, &sep->event_request_daemon, wait); | ||
403 | |||
404 | dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n", | ||
405 | sep->send_ct, sep->reply_ct); | ||
406 | |||
407 | spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); | ||
408 | /* Check if the data is ready */ | ||
409 | if (sep->send_ct == sep->reply_ct) { | ||
410 | spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); | ||
411 | |||
412 | retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); | ||
413 | dev_dbg(&sep->pdev->dev, | ||
414 | "daemon poll: data check (GPR2) is %x\n", retval2); | ||
415 | |||
416 | /* Check if PRINT request */ | ||
417 | if ((retval2 >> 30) & 0x1) { | ||
418 | dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n"); | ||
419 | mask |= POLLIN; | ||
420 | goto end_function; | ||
421 | } | ||
422 | /* Check if NVS request */ | ||
423 | if (retval2 >> 31) { | ||
424 | dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n"); | ||
425 | mask |= POLLPRI | POLLWRNORM; | ||
426 | } | ||
427 | } else { | ||
428 | spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); | ||
429 | dev_dbg(&sep->pdev->dev, | ||
430 | "daemon poll: no reply received; returning 0\n"); | ||
431 | mask = 0; | ||
432 | } | ||
433 | end_function: | ||
434 | return mask; | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * sep_release - close a SEP device | ||
439 | * @inode: inode of SEP device | ||
440 | * @filp: file handle being closed | ||
441 | * | ||
442 | * Called on the final close of a SEP device. | ||
443 | */ | ||
444 | static int sep_release(struct inode *inode, struct file *filp) | ||
445 | { | ||
446 | struct sep_device *sep = filp->private_data; | ||
447 | |||
448 | dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid); | ||
449 | |||
450 | mutex_lock(&sep->sep_mutex); | ||
451 | /* Is this the process that has a transaction open? | ||
452 | * If so, lets reset pid_doing_transaction to 0 and | ||
453 | * clear the in use flags, and then wake up sep_event | ||
454 | * so that other processes can do transactions | ||
455 | */ | ||
456 | if (sep->pid_doing_transaction == current->pid) { | ||
457 | clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags); | ||
458 | clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags); | ||
459 | sep_free_dma_table_data_handler(sep); | ||
460 | wake_up(&sep->event); | ||
461 | sep->pid_doing_transaction = 0; | ||
462 | } | ||
463 | |||
464 | mutex_unlock(&sep->sep_mutex); | ||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * sep_mmap - maps the shared area to user space | ||
470 | * @filp: pointer to struct file | ||
471 | * @vma: pointer to vm_area_struct | ||
472 | * | ||
473 | * Called on an mmap of our space via the normal SEP device | ||
474 | */ | ||
475 | static int sep_mmap(struct file *filp, struct vm_area_struct *vma) | ||
476 | { | ||
477 | dma_addr_t bus_addr; | ||
478 | struct sep_device *sep = filp->private_data; | ||
479 | unsigned long error = 0; | ||
480 | |||
481 | /* Set the transaction busy (own the device) */ | ||
482 | wait_event_interruptible(sep->event, | ||
483 | test_and_set_bit(SEP_MMAP_LOCK_BIT, | ||
484 | &sep->in_use_flags) == 0); | ||
485 | |||
486 | if (signal_pending(current)) { | ||
487 | error = -EINTR; | ||
488 | goto end_function_with_error; | ||
489 | } | ||
490 | /* | ||
491 | * The pid_doing_transaction indicates that this process | ||
492 | * now owns the facilities to performa a transaction with | ||
493 | * the SEP. While this process is performing a transaction, | ||
494 | * no other process who has the SEP device open can perform | ||
495 | * any transactions. This method allows more than one process | ||
496 | * to have the device open at any given time, which provides | ||
497 | * finer granularity for device utilization by multiple | ||
498 | * processes. | ||
499 | */ | ||
500 | mutex_lock(&sep->sep_mutex); | ||
501 | sep->pid_doing_transaction = current->pid; | ||
502 | mutex_unlock(&sep->sep_mutex); | ||
503 | |||
504 | /* Zero the pools and the number of data pool alocation pointers */ | ||
505 | sep->data_pool_bytes_allocated = 0; | ||
506 | sep->num_of_data_allocations = 0; | ||
507 | |||
508 | /* | ||
509 | * Check that the size of the mapped range is as the size of the message | ||
510 | * shared area | ||
511 | */ | ||
512 | if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) { | ||
513 | error = -EINVAL; | ||
514 | goto end_function_with_error; | ||
515 | } | ||
516 | |||
517 | dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr); | ||
518 | |||
519 | /* Get bus address */ | ||
520 | bus_addr = sep->shared_bus; | ||
521 | |||
522 | if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, | ||
523 | vma->vm_end - vma->vm_start, vma->vm_page_prot)) { | ||
524 | dev_warn(&sep->pdev->dev, "remap_page_range failed\n"); | ||
525 | error = -EAGAIN; | ||
526 | goto end_function_with_error; | ||
527 | } | ||
528 | goto end_function; | ||
529 | |||
530 | end_function_with_error: | ||
531 | /* Clear the bit */ | ||
532 | clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags); | ||
533 | mutex_lock(&sep->sep_mutex); | ||
534 | sep->pid_doing_transaction = 0; | ||
535 | mutex_unlock(&sep->sep_mutex); | ||
536 | |||
537 | /* Raise event for stuck contextes */ | ||
538 | |||
539 | wake_up(&sep->event); | ||
540 | |||
541 | end_function: | ||
542 | return error; | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * sep_poll - poll handler | ||
547 | * @filp: pointer to struct file | ||
548 | * @wait: pointer to poll_table | ||
549 | * | ||
550 | * Called by the OS when the kernel is asked to do a poll on | ||
551 | * a SEP file handle. | ||
552 | */ | ||
553 | static unsigned int sep_poll(struct file *filp, poll_table *wait) | ||
554 | { | ||
555 | u32 mask = 0; | ||
556 | u32 retval = 0; | ||
557 | u32 retval2 = 0; | ||
558 | unsigned long lck_flags; | ||
559 | |||
560 | struct sep_device *sep = filp->private_data; | ||
561 | |||
562 | /* Am I the process that owns the transaction? */ | ||
563 | mutex_lock(&sep->sep_mutex); | ||
564 | if (current->pid != sep->pid_doing_transaction) { | ||
565 | dev_dbg(&sep->pdev->dev, "poll; wrong pid\n"); | ||
566 | mask = POLLERR; | ||
567 | mutex_unlock(&sep->sep_mutex); | ||
568 | goto end_function; | ||
569 | } | ||
570 | mutex_unlock(&sep->sep_mutex); | ||
571 | |||
572 | /* Check if send command or send_reply were activated previously */ | ||
573 | if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) { | ||
574 | mask = POLLERR; | ||
575 | goto end_function; | ||
576 | } | ||
577 | |||
578 | /* Add the event to the polling wait table */ | ||
579 | dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n"); | ||
580 | |||
581 | poll_wait(filp, &sep->event, wait); | ||
582 | |||
583 | dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n", | ||
584 | sep->send_ct, sep->reply_ct); | ||
585 | |||
586 | /* Check if error occurred during poll */ | ||
587 | retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR); | ||
588 | if (retval2 != 0x0) { | ||
589 | dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2); | ||
590 | mask |= POLLERR; | ||
591 | goto end_function; | ||
592 | } | ||
593 | |||
594 | spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); | ||
595 | |||
596 | if (sep->send_ct == sep->reply_ct) { | ||
597 | spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); | ||
598 | retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); | ||
599 | dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n", | ||
600 | retval); | ||
601 | |||
602 | /* Check if printf request */ | ||
603 | if ((retval >> 30) & 0x1) { | ||
604 | dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n"); | ||
605 | wake_up(&sep->event_request_daemon); | ||
606 | goto end_function; | ||
607 | } | ||
608 | |||
609 | /* Check if the this is SEP reply or request */ | ||
610 | if (retval >> 31) { | ||
611 | dev_dbg(&sep->pdev->dev, "poll: SEP request\n"); | ||
612 | wake_up(&sep->event_request_daemon); | ||
613 | } else { | ||
614 | dev_dbg(&sep->pdev->dev, "poll: normal return\n"); | ||
615 | /* In case it is again by send_reply_comand */ | ||
616 | clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags); | ||
617 | sep_dump_message(sep); | ||
618 | dev_dbg(&sep->pdev->dev, | ||
619 | "poll; SEP reply POLLIN | POLLRDNORM\n"); | ||
620 | mask |= POLLIN | POLLRDNORM; | ||
621 | } | ||
622 | } else { | ||
623 | spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); | ||
624 | dev_dbg(&sep->pdev->dev, | ||
625 | "poll; no reply received; returning mask of 0\n"); | ||
626 | mask = 0; | ||
627 | } | ||
628 | |||
629 | end_function: | ||
630 | return mask; | ||
631 | } | ||
632 | |||
633 | /** | ||
634 | * sep_time_address - address in SEP memory of time | ||
635 | * @sep: SEP device we want the address from | ||
636 | * | ||
637 | * Return the address of the two dwords in memory used for time | ||
638 | * setting. | ||
639 | */ | ||
640 | static u32 *sep_time_address(struct sep_device *sep) | ||
641 | { | ||
642 | return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES; | ||
643 | } | ||
644 | |||
645 | /** | ||
646 | * sep_set_time - set the SEP time | ||
647 | * @sep: the SEP we are setting the time for | ||
648 | * | ||
649 | * Calculates time and sets it at the predefined address. | ||
650 | * Called with the SEP mutex held. | ||
651 | */ | ||
652 | static unsigned long sep_set_time(struct sep_device *sep) | ||
653 | { | ||
654 | struct timeval time; | ||
655 | u32 *time_addr; /* Address of time as seen by the kernel */ | ||
656 | |||
657 | |||
658 | do_gettimeofday(&time); | ||
659 | |||
660 | /* Set value in the SYSTEM MEMORY offset */ | ||
661 | time_addr = sep_time_address(sep); | ||
662 | |||
663 | time_addr[0] = SEP_TIME_VAL_TOKEN; | ||
664 | time_addr[1] = time.tv_sec; | ||
665 | |||
666 | dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec); | ||
667 | dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr); | ||
668 | dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr); | ||
669 | |||
670 | return time.tv_sec; | ||
671 | } | ||
672 | |||
673 | /** | ||
674 | * sep_set_caller_id_handler - insert caller id entry | ||
675 | * @sep: SEP device | ||
676 | * @arg: pointer to struct caller_id_struct | ||
677 | * | ||
678 | * Inserts the data into the caller id table. Note that this function | ||
679 | * falls under the ioctl lock | ||
680 | */ | ||
681 | static int sep_set_caller_id_handler(struct sep_device *sep, unsigned long arg) | ||
682 | { | ||
683 | void __user *hash; | ||
684 | int error = 0; | ||
685 | int i; | ||
686 | struct caller_id_struct command_args; | ||
687 | |||
688 | for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) { | ||
689 | if (sep->caller_id_table[i].pid == 0) | ||
690 | break; | ||
691 | } | ||
692 | |||
693 | if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) { | ||
694 | dev_dbg(&sep->pdev->dev, "no more caller id entries left\n"); | ||
695 | dev_dbg(&sep->pdev->dev, "maximum number is %d\n", | ||
696 | SEP_CALLER_ID_TABLE_NUM_ENTRIES); | ||
697 | error = -EUSERS; | ||
698 | goto end_function; | ||
699 | } | ||
700 | |||
701 | /* Copy the data */ | ||
702 | if (copy_from_user(&command_args, (void __user *)arg, | ||
703 | sizeof(command_args))) { | ||
704 | error = -EFAULT; | ||
705 | goto end_function; | ||
706 | } | ||
707 | |||
708 | hash = (void __user *)(unsigned long)command_args.callerIdAddress; | ||
709 | |||
710 | if (!command_args.pid || !command_args.callerIdSizeInBytes) { | ||
711 | error = -EINVAL; | ||
712 | goto end_function; | ||
713 | } | ||
714 | |||
715 | dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid); | ||
716 | dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n", | ||
717 | command_args.callerIdSizeInBytes); | ||
718 | |||
719 | if (command_args.callerIdSizeInBytes > | ||
720 | SEP_CALLER_ID_HASH_SIZE_IN_BYTES) { | ||
721 | error = -EMSGSIZE; | ||
722 | goto end_function; | ||
723 | } | ||
724 | |||
725 | sep->caller_id_table[i].pid = command_args.pid; | ||
726 | |||
727 | if (copy_from_user(sep->caller_id_table[i].callerIdHash, | ||
728 | hash, command_args.callerIdSizeInBytes)) | ||
729 | error = -EFAULT; | ||
730 | end_function: | ||
731 | return error; | ||
732 | } | ||
733 | |||
734 | /** | ||
735 | * sep_set_current_caller_id - set the caller id | ||
736 | * @sep: pointer to struct_sep_device | ||
737 | * | ||
738 | * Set the caller ID (if it exists) to the SEP. Note that this | ||
739 | * function falls under the ioctl lock | ||
740 | */ | ||
741 | static int sep_set_current_caller_id(struct sep_device *sep) | ||
742 | { | ||
743 | int i; | ||
744 | u32 *hash_buf_ptr; | ||
745 | |||
746 | /* Zero the previous value */ | ||
747 | memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES, | ||
748 | 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES); | ||
749 | |||
750 | for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) { | ||
751 | if (sep->caller_id_table[i].pid == current->pid) { | ||
752 | dev_dbg(&sep->pdev->dev, "Caller Id found\n"); | ||
753 | |||
754 | memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES, | ||
755 | (void *)(sep->caller_id_table[i].callerIdHash), | ||
756 | SEP_CALLER_ID_HASH_SIZE_IN_BYTES); | ||
757 | break; | ||
758 | } | ||
759 | } | ||
760 | /* Ensure data is in little endian */ | ||
761 | hash_buf_ptr = (u32 *)sep->shared_addr + | ||
762 | SEP_CALLER_ID_OFFSET_BYTES; | ||
763 | |||
764 | for (i = 0; i < SEP_CALLER_ID_HASH_SIZE_IN_WORDS; i++) | ||
765 | hash_buf_ptr[i] = cpu_to_le32(hash_buf_ptr[i]); | ||
766 | |||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * sep_send_command_handler - kick off a command | ||
772 | * @sep: SEP being signalled | ||
773 | * | ||
774 | * This function raises interrupt to SEP that signals that is has a new | ||
775 | * command from the host | ||
776 | * | ||
777 | * Note that this function does fall under the ioctl lock | ||
778 | */ | ||
779 | static int sep_send_command_handler(struct sep_device *sep) | ||
780 | { | ||
781 | unsigned long lck_flags; | ||
782 | int error = 0; | ||
783 | |||
784 | if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) { | ||
785 | error = -EPROTO; | ||
786 | goto end_function; | ||
787 | } | ||
788 | sep_set_time(sep); | ||
789 | |||
790 | sep_set_current_caller_id(sep); | ||
791 | |||
792 | sep_dump_message(sep); | ||
793 | |||
794 | /* Update counter */ | ||
795 | spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); | ||
796 | sep->send_ct++; | ||
797 | spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); | ||
798 | |||
799 | dev_dbg(&sep->pdev->dev, | ||
800 | "sep_send_command_handler send_ct %lx reply_ct %lx\n", | ||
801 | sep->send_ct, sep->reply_ct); | ||
802 | |||
803 | /* Send interrupt to SEP */ | ||
804 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2); | ||
805 | |||
806 | end_function: | ||
807 | return error; | ||
808 | } | ||
809 | |||
810 | /** | ||
811 | * sep_allocate_data_pool_memory_handler -allocate pool memory | ||
812 | * @sep: pointer to struct sep_device | ||
813 | * @arg: pointer to struct alloc_struct | ||
814 | * | ||
815 | * This function handles the allocate data pool memory request | ||
816 | * This function returns calculates the bus address of the | ||
817 | * allocated memory, and the offset of this area from the mapped address. | ||
818 | * Therefore, the FVOs in user space can calculate the exact virtual | ||
819 | * address of this allocated memory | ||
820 | */ | ||
821 | static int sep_allocate_data_pool_memory_handler(struct sep_device *sep, | ||
822 | unsigned long arg) | ||
823 | { | ||
824 | int error = 0; | ||
825 | struct alloc_struct command_args; | ||
826 | |||
827 | /* Holds the allocated buffer address in the system memory pool */ | ||
828 | u32 *token_addr; | ||
829 | |||
830 | if (copy_from_user(&command_args, (void __user *)arg, | ||
831 | sizeof(struct alloc_struct))) { | ||
832 | error = -EFAULT; | ||
833 | goto end_function; | ||
834 | } | ||
835 | |||
836 | /* Allocate memory */ | ||
837 | if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > | ||
838 | SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) { | ||
839 | error = -ENOMEM; | ||
840 | goto end_function; | ||
841 | } | ||
842 | |||
843 | dev_dbg(&sep->pdev->dev, | ||
844 | "data pool bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated); | ||
845 | dev_dbg(&sep->pdev->dev, | ||
846 | "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES); | ||
847 | /* Set the virtual and bus address */ | ||
848 | command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + | ||
849 | sep->data_pool_bytes_allocated; | ||
850 | |||
851 | /* Place in the shared area that is known by the SEP */ | ||
852 | token_addr = (u32 *)(sep->shared_addr + | ||
853 | SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES + | ||
854 | (sep->num_of_data_allocations)*2*sizeof(u32)); | ||
855 | |||
856 | token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN; | ||
857 | token_addr[1] = (u32)sep->shared_bus + | ||
858 | SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + | ||
859 | sep->data_pool_bytes_allocated; | ||
860 | |||
861 | /* Write the memory back to the user space */ | ||
862 | error = copy_to_user((void *)arg, (void *)&command_args, | ||
863 | sizeof(struct alloc_struct)); | ||
864 | if (error) { | ||
865 | error = -EFAULT; | ||
866 | goto end_function; | ||
867 | } | ||
868 | |||
869 | /* Update the allocation */ | ||
870 | sep->data_pool_bytes_allocated += command_args.num_bytes; | ||
871 | sep->num_of_data_allocations += 1; | ||
872 | |||
873 | end_function: | ||
874 | return error; | ||
875 | } | ||
876 | |||
877 | /** | ||
878 | * sep_lock_kernel_pages - map kernel pages for DMA | ||
879 | * @sep: pointer to struct sep_device | ||
880 | * @kernel_virt_addr: address of data buffer in kernel | ||
881 | * @data_size: size of data | ||
882 | * @lli_array_ptr: lli array | ||
883 | * @in_out_flag: input into device or output from device | ||
884 | * | ||
885 | * This function locks all the physical pages of the kernel virtual buffer | ||
886 | * and construct a basic lli array, where each entry holds the physical | ||
887 | * page address and the size that application data holds in this page | ||
888 | * This function is used only during kernel crypto mod calls from within | ||
889 | * the kernel (when ioctl is not used) | ||
890 | */ | ||
891 | static int sep_lock_kernel_pages(struct sep_device *sep, | ||
892 | unsigned long kernel_virt_addr, | ||
893 | u32 data_size, | ||
894 | struct sep_lli_entry **lli_array_ptr, | ||
895 | int in_out_flag) | ||
896 | |||
897 | { | ||
898 | int error = 0; | ||
899 | /* Array of lli */ | ||
900 | struct sep_lli_entry *lli_array; | ||
901 | /* Map array */ | ||
902 | struct sep_dma_map *map_array; | ||
903 | |||
904 | dev_dbg(&sep->pdev->dev, "lock kernel pages kernel_virt_addr is %08lx\n", | ||
905 | (unsigned long)kernel_virt_addr); | ||
906 | dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size); | ||
907 | |||
908 | lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC); | ||
909 | if (!lli_array) { | ||
910 | error = -ENOMEM; | ||
911 | goto end_function; | ||
912 | } | ||
913 | map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC); | ||
914 | if (!map_array) { | ||
915 | error = -ENOMEM; | ||
916 | goto end_function_with_error; | ||
917 | } | ||
918 | |||
919 | map_array[0].dma_addr = | ||
920 | dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr, | ||
921 | data_size, DMA_BIDIRECTIONAL); | ||
922 | map_array[0].size = data_size; | ||
923 | |||
924 | |||
925 | /* | ||
926 | * Set the start address of the first page - app data may start not at | ||
927 | * the beginning of the page | ||
928 | */ | ||
929 | lli_array[0].bus_address = (u32)map_array[0].dma_addr; | ||
930 | lli_array[0].block_size = map_array[0].size; | ||
931 | |||
932 | dev_dbg(&sep->pdev->dev, | ||
933 | "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n", | ||
934 | (unsigned long)lli_array[0].bus_address, | ||
935 | lli_array[0].block_size); | ||
936 | |||
937 | /* Set the output parameters */ | ||
938 | if (in_out_flag == SEP_DRIVER_IN_FLAG) { | ||
939 | *lli_array_ptr = lli_array; | ||
940 | sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1; | ||
941 | sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL; | ||
942 | sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array; | ||
943 | sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1; | ||
944 | } else { | ||
945 | *lli_array_ptr = lli_array; | ||
946 | sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1; | ||
947 | sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL; | ||
948 | sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array; | ||
949 | sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1; | ||
950 | } | ||
951 | goto end_function; | ||
952 | |||
953 | end_function_with_error: | ||
954 | kfree(lli_array); | ||
955 | |||
956 | end_function: | ||
957 | return error; | ||
958 | } | ||
959 | |||
960 | /** | ||
961 | * sep_lock_user_pages - lock and map user pages for DMA | ||
962 | * @sep: pointer to struct sep_device | ||
963 | * @app_virt_addr: user memory data buffer | ||
964 | * @data_size: size of data buffer | ||
965 | * @lli_array_ptr: lli array | ||
966 | * @in_out_flag: input or output to device | ||
967 | * | ||
968 | * This function locks all the physical pages of the application | ||
969 | * virtual buffer and construct a basic lli array, where each entry | ||
970 | * holds the physical page address and the size that application | ||
971 | * data holds in this physical pages | ||
972 | */ | ||
973 | static int sep_lock_user_pages(struct sep_device *sep, | ||
974 | u32 app_virt_addr, | ||
975 | u32 data_size, | ||
976 | struct sep_lli_entry **lli_array_ptr, | ||
977 | int in_out_flag) | ||
978 | |||
979 | { | ||
980 | int error = 0; | ||
981 | u32 count; | ||
982 | int result; | ||
983 | /* The the page of the end address of the user space buffer */ | ||
984 | u32 end_page; | ||
985 | /* The page of the start address of the user space buffer */ | ||
986 | u32 start_page; | ||
987 | /* The range in pages */ | ||
988 | u32 num_pages; | ||
989 | /* Array of pointers to page */ | ||
990 | struct page **page_array; | ||
991 | /* Array of lli */ | ||
992 | struct sep_lli_entry *lli_array; | ||
993 | /* Map array */ | ||
994 | struct sep_dma_map *map_array; | ||
995 | /* Direction of the DMA mapping for locked pages */ | ||
996 | enum dma_data_direction dir; | ||
997 | |||
998 | /* Set start and end pages and num pages */ | ||
999 | end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT; | ||
1000 | start_page = app_virt_addr >> PAGE_SHIFT; | ||
1001 | num_pages = end_page - start_page + 1; | ||
1002 | |||
1003 | dev_dbg(&sep->pdev->dev, "lock user pages app_virt_addr is %x\n", app_virt_addr); | ||
1004 | dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size); | ||
1005 | dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page); | ||
1006 | dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page); | ||
1007 | dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages); | ||
1008 | |||
1009 | /* Allocate array of pages structure pointers */ | ||
1010 | page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC); | ||
1011 | if (!page_array) { | ||
1012 | error = -ENOMEM; | ||
1013 | goto end_function; | ||
1014 | } | ||
1015 | map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC); | ||
1016 | if (!map_array) { | ||
1017 | dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n"); | ||
1018 | error = -ENOMEM; | ||
1019 | goto end_function_with_error1; | ||
1020 | } | ||
1021 | |||
1022 | lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages, | ||
1023 | GFP_ATOMIC); | ||
1024 | |||
1025 | if (!lli_array) { | ||
1026 | dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n"); | ||
1027 | error = -ENOMEM; | ||
1028 | goto end_function_with_error2; | ||
1029 | } | ||
1030 | |||
1031 | /* Convert the application virtual address into a set of physical */ | ||
1032 | down_read(¤t->mm->mmap_sem); | ||
1033 | result = get_user_pages(current, current->mm, app_virt_addr, | ||
1034 | num_pages, | ||
1035 | ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1), | ||
1036 | 0, page_array, NULL); | ||
1037 | |||
1038 | up_read(¤t->mm->mmap_sem); | ||
1039 | |||
1040 | /* Check the number of pages locked - if not all then exit with error */ | ||
1041 | if (result != num_pages) { | ||
1042 | dev_warn(&sep->pdev->dev, | ||
1043 | "not all pages locked by get_user_pages\n"); | ||
1044 | error = -ENOMEM; | ||
1045 | goto end_function_with_error3; | ||
1046 | } | ||
1047 | |||
1048 | dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n"); | ||
1049 | |||
1050 | /* Set direction */ | ||
1051 | if (in_out_flag == SEP_DRIVER_IN_FLAG) | ||
1052 | dir = DMA_TO_DEVICE; | ||
1053 | else | ||
1054 | dir = DMA_FROM_DEVICE; | ||
1055 | |||
1056 | /* | ||
1057 | * Fill the array using page array data and | ||
1058 | * map the pages - this action will also flush the cache as needed | ||
1059 | */ | ||
1060 | for (count = 0; count < num_pages; count++) { | ||
1061 | /* Fill the map array */ | ||
1062 | map_array[count].dma_addr = | ||
1063 | dma_map_page(&sep->pdev->dev, page_array[count], | ||
1064 | 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL); | ||
1065 | |||
1066 | map_array[count].size = PAGE_SIZE; | ||
1067 | |||
1068 | /* Fill the lli array entry */ | ||
1069 | lli_array[count].bus_address = (u32)map_array[count].dma_addr; | ||
1070 | lli_array[count].block_size = PAGE_SIZE; | ||
1071 | |||
1072 | dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n", | ||
1073 | count, (unsigned long)lli_array[count].bus_address, | ||
1074 | count, lli_array[count].block_size); | ||
1075 | } | ||
1076 | |||
1077 | /* Check the offset for the first page */ | ||
1078 | lli_array[0].bus_address = | ||
1079 | lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK)); | ||
1080 | |||
1081 | /* Check that not all the data is in the first page only */ | ||
1082 | if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size) | ||
1083 | lli_array[0].block_size = data_size; | ||
1084 | else | ||
1085 | lli_array[0].block_size = | ||
1086 | PAGE_SIZE - (app_virt_addr & (~PAGE_MASK)); | ||
1087 | |||
1088 | dev_dbg(&sep->pdev->dev, | ||
1089 | "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n", | ||
1090 | (unsigned long)lli_array[count].bus_address, | ||
1091 | lli_array[count].block_size); | ||
1092 | |||
1093 | /* Check the size of the last page */ | ||
1094 | if (num_pages > 1) { | ||
1095 | lli_array[num_pages - 1].block_size = | ||
1096 | (app_virt_addr + data_size) & (~PAGE_MASK); | ||
1097 | if (lli_array[num_pages - 1].block_size == 0) | ||
1098 | lli_array[num_pages - 1].block_size = PAGE_SIZE; | ||
1099 | |||
1100 | dev_warn(&sep->pdev->dev, | ||
1101 | "lli_array[%x].bus_address is " | ||
1102 | "%08lx, lli_array[%x].block_size is %x\n", | ||
1103 | num_pages - 1, | ||
1104 | (unsigned long)lli_array[num_pages - 1].bus_address, | ||
1105 | num_pages - 1, | ||
1106 | lli_array[num_pages - 1].block_size); | ||
1107 | } | ||
1108 | |||
1109 | /* Set output params according to the in_out flag */ | ||
1110 | if (in_out_flag == SEP_DRIVER_IN_FLAG) { | ||
1111 | *lli_array_ptr = lli_array; | ||
1112 | sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages; | ||
1113 | sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array; | ||
1114 | sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array; | ||
1115 | sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = | ||
1116 | num_pages; | ||
1117 | } else { | ||
1118 | *lli_array_ptr = lli_array; | ||
1119 | sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages; | ||
1120 | sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = | ||
1121 | page_array; | ||
1122 | sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array; | ||
1123 | sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = | ||
1124 | num_pages; | ||
1125 | } | ||
1126 | goto end_function; | ||
1127 | |||
1128 | end_function_with_error3: | ||
1129 | /* Free lli array */ | ||
1130 | kfree(lli_array); | ||
1131 | |||
1132 | end_function_with_error2: | ||
1133 | kfree(map_array); | ||
1134 | |||
1135 | end_function_with_error1: | ||
1136 | /* Free page array */ | ||
1137 | kfree(page_array); | ||
1138 | |||
1139 | end_function: | ||
1140 | return error; | ||
1141 | } | ||
1142 | |||
1143 | /** | ||
1144 | * u32 sep_calculate_lli_table_max_size - size the LLI table | ||
1145 | * @sep: pointer to struct sep_device | ||
1146 | * @lli_in_array_ptr | ||
1147 | * @num_array_entries | ||
1148 | * @last_table_flag | ||
1149 | * | ||
1150 | * This function calculates the size of data that can be inserted into | ||
1151 | * the lli table from this array, such that either the table is full | ||
1152 | * (all entries are entered), or there are no more entries in the | ||
1153 | * lli array | ||
1154 | */ | ||
1155 | static u32 sep_calculate_lli_table_max_size(struct sep_device *sep, | ||
1156 | struct sep_lli_entry *lli_in_array_ptr, | ||
1157 | u32 num_array_entries, | ||
1158 | u32 *last_table_flag) | ||
1159 | { | ||
1160 | u32 counter; | ||
1161 | /* Table data size */ | ||
1162 | u32 table_data_size = 0; | ||
1163 | /* Data size for the next table */ | ||
1164 | u32 next_table_data_size; | ||
1165 | |||
1166 | *last_table_flag = 0; | ||
1167 | |||
1168 | /* | ||
1169 | * Calculate the data in the out lli table till we fill the whole | ||
1170 | * table or till the data has ended | ||
1171 | */ | ||
1172 | for (counter = 0; | ||
1173 | (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && | ||
1174 | (counter < num_array_entries); counter++) | ||
1175 | table_data_size += lli_in_array_ptr[counter].block_size; | ||
1176 | |||
1177 | /* | ||
1178 | * Check if we reached the last entry, | ||
1179 | * meaning this ia the last table to build, | ||
1180 | * and no need to check the block alignment | ||
1181 | */ | ||
1182 | if (counter == num_array_entries) { | ||
1183 | /* Set the last table flag */ | ||
1184 | *last_table_flag = 1; | ||
1185 | goto end_function; | ||
1186 | } | ||
1187 | |||
1188 | /* | ||
1189 | * Calculate the data size of the next table. | ||
1190 | * Stop if no entries left or if data size is more the DMA restriction | ||
1191 | */ | ||
1192 | next_table_data_size = 0; | ||
1193 | for (; counter < num_array_entries; counter++) { | ||
1194 | next_table_data_size += lli_in_array_ptr[counter].block_size; | ||
1195 | if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) | ||
1196 | break; | ||
1197 | } | ||
1198 | |||
1199 | /* | ||
1200 | * Check if the next table data size is less then DMA rstriction. | ||
1201 | * if it is - recalculate the current table size, so that the next | ||
1202 | * table data size will be adaquete for DMA | ||
1203 | */ | ||
1204 | if (next_table_data_size && | ||
1205 | next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) | ||
1206 | |||
1207 | table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE - | ||
1208 | next_table_data_size); | ||
1209 | |||
1210 | end_function: | ||
1211 | return table_data_size; | ||
1212 | } | ||
1213 | |||
1214 | /** | ||
1215 | * sep_build_lli_table - build an lli array for the given table | ||
1216 | * @sep: pointer to struct sep_device | ||
1217 | * @lli_array_ptr: pointer to lli array | ||
1218 | * @lli_table_ptr: pointer to lli table | ||
1219 | * @num_processed_entries_ptr: pointer to number of entries | ||
1220 | * @num_table_entries_ptr: pointer to number of tables | ||
1221 | * @table_data_size: total data size | ||
1222 | * | ||
1223 | * Builds ant lli table from the lli_array according to | ||
1224 | * the given size of data | ||
1225 | */ | ||
1226 | static void sep_build_lli_table(struct sep_device *sep, | ||
1227 | struct sep_lli_entry *lli_array_ptr, | ||
1228 | struct sep_lli_entry *lli_table_ptr, | ||
1229 | u32 *num_processed_entries_ptr, | ||
1230 | u32 *num_table_entries_ptr, | ||
1231 | u32 table_data_size) | ||
1232 | { | ||
1233 | /* Current table data size */ | ||
1234 | u32 curr_table_data_size; | ||
1235 | /* Counter of lli array entry */ | ||
1236 | u32 array_counter; | ||
1237 | |||
1238 | /* Init currrent table data size and lli array entry counter */ | ||
1239 | curr_table_data_size = 0; | ||
1240 | array_counter = 0; | ||
1241 | *num_table_entries_ptr = 1; | ||
1242 | |||
1243 | dev_dbg(&sep->pdev->dev, "build lli table table_data_size is %x\n", table_data_size); | ||
1244 | |||
1245 | /* Fill the table till table size reaches the needed amount */ | ||
1246 | while (curr_table_data_size < table_data_size) { | ||
1247 | /* Update the number of entries in table */ | ||
1248 | (*num_table_entries_ptr)++; | ||
1249 | |||
1250 | lli_table_ptr->bus_address = | ||
1251 | cpu_to_le32(lli_array_ptr[array_counter].bus_address); | ||
1252 | |||
1253 | lli_table_ptr->block_size = | ||
1254 | cpu_to_le32(lli_array_ptr[array_counter].block_size); | ||
1255 | |||
1256 | curr_table_data_size += lli_array_ptr[array_counter].block_size; | ||
1257 | |||
1258 | dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n", | ||
1259 | lli_table_ptr); | ||
1260 | dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n", | ||
1261 | (unsigned long)lli_table_ptr->bus_address); | ||
1262 | dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n", | ||
1263 | lli_table_ptr->block_size); | ||
1264 | |||
1265 | /* Check for overflow of the table data */ | ||
1266 | if (curr_table_data_size > table_data_size) { | ||
1267 | dev_dbg(&sep->pdev->dev, | ||
1268 | "curr_table_data_size too large\n"); | ||
1269 | |||
1270 | /* Update the size of block in the table */ | ||
1271 | lli_table_ptr->block_size -= | ||
1272 | cpu_to_le32((curr_table_data_size - table_data_size)); | ||
1273 | |||
1274 | /* Update the physical address in the lli array */ | ||
1275 | lli_array_ptr[array_counter].bus_address += | ||
1276 | cpu_to_le32(lli_table_ptr->block_size); | ||
1277 | |||
1278 | /* Update the block size left in the lli array */ | ||
1279 | lli_array_ptr[array_counter].block_size = | ||
1280 | (curr_table_data_size - table_data_size); | ||
1281 | } else | ||
1282 | /* Advance to the next entry in the lli_array */ | ||
1283 | array_counter++; | ||
1284 | |||
1285 | dev_dbg(&sep->pdev->dev, | ||
1286 | "lli_table_ptr->bus_address is %08lx\n", | ||
1287 | (unsigned long)lli_table_ptr->bus_address); | ||
1288 | dev_dbg(&sep->pdev->dev, | ||
1289 | "lli_table_ptr->block_size is %x\n", | ||
1290 | lli_table_ptr->block_size); | ||
1291 | |||
1292 | /* Move to the next entry in table */ | ||
1293 | lli_table_ptr++; | ||
1294 | } | ||
1295 | |||
1296 | /* Set the info entry to default */ | ||
1297 | lli_table_ptr->bus_address = 0xffffffff; | ||
1298 | lli_table_ptr->block_size = 0; | ||
1299 | |||
1300 | /* Set the output parameter */ | ||
1301 | *num_processed_entries_ptr += array_counter; | ||
1302 | |||
1303 | } | ||
1304 | |||
1305 | /** | ||
1306 | * sep_shared_area_virt_to_bus - map shared area to bus address | ||
1307 | * @sep: pointer to struct sep_device | ||
1308 | * @virt_address: virtual address to convert | ||
1309 | * | ||
1310 | * This functions returns the physical address inside shared area according | ||
1311 | * to the virtual address. It can be either on the externa RAM device | ||
1312 | * (ioremapped), or on the system RAM | ||
1313 | * This implementation is for the external RAM | ||
1314 | */ | ||
1315 | static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep, | ||
1316 | void *virt_address) | ||
1317 | { | ||
1318 | dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address); | ||
1319 | dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n", | ||
1320 | (unsigned long) | ||
1321 | sep->shared_bus + (virt_address - sep->shared_addr)); | ||
1322 | |||
1323 | return sep->shared_bus + (size_t)(virt_address - sep->shared_addr); | ||
1324 | } | ||
1325 | |||
1326 | /** | ||
1327 | * sep_shared_area_bus_to_virt - map shared area bus address to kernel | ||
1328 | * @sep: pointer to struct sep_device | ||
1329 | * @bus_address: bus address to convert | ||
1330 | * | ||
1331 | * This functions returns the virtual address inside shared area | ||
1332 | * according to the physical address. It can be either on the | ||
1333 | * externa RAM device (ioremapped), or on the system RAM | ||
1334 | * This implementation is for the external RAM | ||
1335 | */ | ||
1336 | static void *sep_shared_area_bus_to_virt(struct sep_device *sep, | ||
1337 | dma_addr_t bus_address) | ||
1338 | { | ||
1339 | dev_dbg(&sep->pdev->dev, "shared bus to virt b=%lx v=%lx\n", | ||
1340 | (unsigned long)bus_address, (unsigned long)(sep->shared_addr + | ||
1341 | (size_t)(bus_address - sep->shared_bus))); | ||
1342 | |||
1343 | return sep->shared_addr + (size_t)(bus_address - sep->shared_bus); | ||
1344 | } | ||
1345 | |||
1346 | /** | ||
1347 | * sep_debug_print_lli_tables - dump LLI table | ||
1348 | * @sep: pointer to struct sep_device | ||
1349 | * @lli_table_ptr: pointer to sep_lli_entry | ||
1350 | * @num_table_entries: number of entries | ||
1351 | * @table_data_size: total data size | ||
1352 | * | ||
1353 | * Walk the the list of the print created tables and print all the data | ||
1354 | */ | ||
1355 | static void sep_debug_print_lli_tables(struct sep_device *sep, | ||
1356 | struct sep_lli_entry *lli_table_ptr, | ||
1357 | unsigned long num_table_entries, | ||
1358 | unsigned long table_data_size) | ||
1359 | { | ||
1360 | unsigned long table_count = 1; | ||
1361 | unsigned long entries_count = 0; | ||
1362 | |||
1363 | dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n"); | ||
1364 | |||
1365 | while ((unsigned long) lli_table_ptr->bus_address != 0xffffffff) { | ||
1366 | dev_dbg(&sep->pdev->dev, | ||
1367 | "lli table %08lx, table_data_size is %lu\n", | ||
1368 | table_count, table_data_size); | ||
1369 | dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n", | ||
1370 | num_table_entries); | ||
1371 | |||
1372 | /* Print entries of the table (without info entry) */ | ||
1373 | for (entries_count = 0; entries_count < num_table_entries; | ||
1374 | entries_count++, lli_table_ptr++) { | ||
1375 | |||
1376 | dev_dbg(&sep->pdev->dev, | ||
1377 | "lli_table_ptr address is %08lx\n", | ||
1378 | (unsigned long) lli_table_ptr); | ||
1379 | |||
1380 | dev_dbg(&sep->pdev->dev, | ||
1381 | "phys address is %08lx block size is %x\n", | ||
1382 | (unsigned long)lli_table_ptr->bus_address, | ||
1383 | lli_table_ptr->block_size); | ||
1384 | } | ||
1385 | /* Point to the info entry */ | ||
1386 | lli_table_ptr--; | ||
1387 | |||
1388 | dev_dbg(&sep->pdev->dev, | ||
1389 | "phys lli_table_ptr->block_size is %x\n", | ||
1390 | lli_table_ptr->block_size); | ||
1391 | |||
1392 | dev_dbg(&sep->pdev->dev, | ||
1393 | "phys lli_table_ptr->physical_address is %08lu\n", | ||
1394 | (unsigned long)lli_table_ptr->bus_address); | ||
1395 | |||
1396 | |||
1397 | table_data_size = lli_table_ptr->block_size & 0xffffff; | ||
1398 | num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff; | ||
1399 | |||
1400 | dev_dbg(&sep->pdev->dev, | ||
1401 | "phys table_data_size is %lu num_table_entries is" | ||
1402 | " %lu bus_address is%lu\n", table_data_size, | ||
1403 | num_table_entries, (unsigned long)lli_table_ptr->bus_address); | ||
1404 | |||
1405 | if ((unsigned long)lli_table_ptr->bus_address != 0xffffffff) | ||
1406 | lli_table_ptr = (struct sep_lli_entry *) | ||
1407 | sep_shared_bus_to_virt(sep, | ||
1408 | (unsigned long)lli_table_ptr->bus_address); | ||
1409 | |||
1410 | table_count++; | ||
1411 | } | ||
1412 | dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n"); | ||
1413 | } | ||
1414 | |||
1415 | |||
1416 | /** | ||
1417 | * sep_prepare_empty_lli_table - create a blank LLI table | ||
1418 | * @sep: pointer to struct sep_device | ||
1419 | * @lli_table_addr_ptr: pointer to lli table | ||
1420 | * @num_entries_ptr: pointer to number of entries | ||
1421 | * @table_data_size_ptr: point to table data size | ||
1422 | * | ||
1423 | * This function creates empty lli tables when there is no data | ||
1424 | */ | ||
1425 | static void sep_prepare_empty_lli_table(struct sep_device *sep, | ||
1426 | dma_addr_t *lli_table_addr_ptr, | ||
1427 | u32 *num_entries_ptr, | ||
1428 | u32 *table_data_size_ptr) | ||
1429 | { | ||
1430 | struct sep_lli_entry *lli_table_ptr; | ||
1431 | |||
1432 | /* Find the area for new table */ | ||
1433 | lli_table_ptr = | ||
1434 | (struct sep_lli_entry *)(sep->shared_addr + | ||
1435 | SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + | ||
1436 | sep->num_lli_tables_created * sizeof(struct sep_lli_entry) * | ||
1437 | SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); | ||
1438 | |||
1439 | lli_table_ptr->bus_address = 0; | ||
1440 | lli_table_ptr->block_size = 0; | ||
1441 | |||
1442 | lli_table_ptr++; | ||
1443 | lli_table_ptr->bus_address = 0xFFFFFFFF; | ||
1444 | lli_table_ptr->block_size = 0; | ||
1445 | |||
1446 | /* Set the output parameter value */ | ||
1447 | *lli_table_addr_ptr = sep->shared_bus + | ||
1448 | SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + | ||
1449 | sep->num_lli_tables_created * | ||
1450 | sizeof(struct sep_lli_entry) * | ||
1451 | SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; | ||
1452 | |||
1453 | /* Set the num of entries and table data size for empty table */ | ||
1454 | *num_entries_ptr = 2; | ||
1455 | *table_data_size_ptr = 0; | ||
1456 | |||
1457 | /* Update the number of created tables */ | ||
1458 | sep->num_lli_tables_created++; | ||
1459 | } | ||
1460 | |||
1461 | /** | ||
1462 | * sep_prepare_input_dma_table - prepare input DMA mappings | ||
1463 | * @sep: pointer to struct sep_device | ||
1464 | * @data_size: | ||
1465 | * @block_size: | ||
1466 | * @lli_table_ptr: | ||
1467 | * @num_entries_ptr: | ||
1468 | * @table_data_size_ptr: | ||
1469 | * @is_kva: set for kernel data (kernel cryptio call) | ||
1470 | * | ||
1471 | * This function prepares only input DMA table for synhronic symmetric | ||
1472 | * operations (HASH) | ||
1473 | * Note that all bus addresses that are passed to the SEP | ||
1474 | * are in 32 bit format; the SEP is a 32 bit device | ||
1475 | */ | ||
1476 | static int sep_prepare_input_dma_table(struct sep_device *sep, | ||
1477 | unsigned long app_virt_addr, | ||
1478 | u32 data_size, | ||
1479 | u32 block_size, | ||
1480 | dma_addr_t *lli_table_ptr, | ||
1481 | u32 *num_entries_ptr, | ||
1482 | u32 *table_data_size_ptr, | ||
1483 | bool is_kva) | ||
1484 | { | ||
1485 | int error = 0; | ||
1486 | /* Pointer to the info entry of the table - the last entry */ | ||
1487 | struct sep_lli_entry *info_entry_ptr; | ||
1488 | /* Array of pointers to page */ | ||
1489 | struct sep_lli_entry *lli_array_ptr; | ||
1490 | /* Points to the first entry to be processed in the lli_in_array */ | ||
1491 | u32 current_entry = 0; | ||
1492 | /* Num entries in the virtual buffer */ | ||
1493 | u32 sep_lli_entries = 0; | ||
1494 | /* Lli table pointer */ | ||
1495 | struct sep_lli_entry *in_lli_table_ptr; | ||
1496 | /* The total data in one table */ | ||
1497 | u32 table_data_size = 0; | ||
1498 | /* Flag for last table */ | ||
1499 | u32 last_table_flag = 0; | ||
1500 | /* Number of entries in lli table */ | ||
1501 | u32 num_entries_in_table = 0; | ||
1502 | /* Next table address */ | ||
1503 | void *lli_table_alloc_addr = 0; | ||
1504 | |||
1505 | dev_dbg(&sep->pdev->dev, "prepare intput dma table data_size is %x\n", data_size); | ||
1506 | dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size); | ||
1507 | |||
1508 | /* Initialize the pages pointers */ | ||
1509 | sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL; | ||
1510 | sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0; | ||
1511 | |||
1512 | /* Set the kernel address for first table to be allocated */ | ||
1513 | lli_table_alloc_addr = (void *)(sep->shared_addr + | ||
1514 | SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + | ||
1515 | sep->num_lli_tables_created * sizeof(struct sep_lli_entry) * | ||
1516 | SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); | ||
1517 | |||
1518 | if (data_size == 0) { | ||
1519 | /* Special case - create meptu table - 2 entries, zero data */ | ||
1520 | sep_prepare_empty_lli_table(sep, lli_table_ptr, | ||
1521 | num_entries_ptr, table_data_size_ptr); | ||
1522 | goto update_dcb_counter; | ||
1523 | } | ||
1524 | |||
1525 | /* Check if the pages are in Kernel Virtual Address layout */ | ||
1526 | if (is_kva == true) | ||
1527 | /* Lock the pages in the kernel */ | ||
1528 | error = sep_lock_kernel_pages(sep, app_virt_addr, | ||
1529 | data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG); | ||
1530 | else | ||
1531 | /* | ||
1532 | * Lock the pages of the user buffer | ||
1533 | * and translate them to pages | ||
1534 | */ | ||
1535 | error = sep_lock_user_pages(sep, app_virt_addr, | ||
1536 | data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG); | ||
1537 | |||
1538 | if (error) | ||
1539 | goto end_function; | ||
1540 | |||
1541 | dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n", | ||
1542 | sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages); | ||
1543 | |||
1544 | current_entry = 0; | ||
1545 | info_entry_ptr = NULL; | ||
1546 | |||
1547 | sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages; | ||
1548 | |||
1549 | /* Loop till all the entries in in array are not processed */ | ||
1550 | while (current_entry < sep_lli_entries) { | ||
1551 | |||
1552 | /* Set the new input and output tables */ | ||
1553 | in_lli_table_ptr = | ||
1554 | (struct sep_lli_entry *)lli_table_alloc_addr; | ||
1555 | |||
1556 | lli_table_alloc_addr += sizeof(struct sep_lli_entry) * | ||
1557 | SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; | ||
1558 | |||
1559 | if (lli_table_alloc_addr > | ||
1560 | ((void *)sep->shared_addr + | ||
1561 | SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + | ||
1562 | SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) { | ||
1563 | |||
1564 | error = -ENOMEM; | ||
1565 | goto end_function_error; | ||
1566 | |||
1567 | } | ||
1568 | |||
1569 | /* Update the number of created tables */ | ||
1570 | sep->num_lli_tables_created++; | ||
1571 | |||
1572 | /* Calculate the maximum size of data for input table */ | ||
1573 | table_data_size = sep_calculate_lli_table_max_size(sep, | ||
1574 | &lli_array_ptr[current_entry], | ||
1575 | (sep_lli_entries - current_entry), | ||
1576 | &last_table_flag); | ||
1577 | |||
1578 | /* | ||
1579 | * If this is not the last table - | ||
1580 | * then align it to the block size | ||
1581 | */ | ||
1582 | if (!last_table_flag) | ||
1583 | table_data_size = | ||
1584 | (table_data_size / block_size) * block_size; | ||
1585 | |||
1586 | dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n", | ||
1587 | table_data_size); | ||
1588 | |||
1589 | /* Construct input lli table */ | ||
1590 | sep_build_lli_table(sep, &lli_array_ptr[current_entry], | ||
1591 | in_lli_table_ptr, | ||
1592 | ¤t_entry, &num_entries_in_table, table_data_size); | ||
1593 | |||
1594 | if (info_entry_ptr == NULL) { | ||
1595 | |||
1596 | /* Set the output parameters to physical addresses */ | ||
1597 | *lli_table_ptr = sep_shared_area_virt_to_bus(sep, | ||
1598 | in_lli_table_ptr); | ||
1599 | *num_entries_ptr = num_entries_in_table; | ||
1600 | *table_data_size_ptr = table_data_size; | ||
1601 | |||
1602 | dev_dbg(&sep->pdev->dev, | ||
1603 | "output lli_table_in_ptr is %08lx\n", | ||
1604 | (unsigned long)*lli_table_ptr); | ||
1605 | |||
1606 | } else { | ||
1607 | /* Update the info entry of the previous in table */ | ||
1608 | info_entry_ptr->bus_address = | ||
1609 | sep_shared_area_virt_to_bus(sep, | ||
1610 | in_lli_table_ptr); | ||
1611 | info_entry_ptr->block_size = | ||
1612 | ((num_entries_in_table) << 24) | | ||
1613 | (table_data_size); | ||
1614 | } | ||
1615 | /* Save the pointer to the info entry of the current tables */ | ||
1616 | info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1; | ||
1617 | } | ||
1618 | /* Print input tables */ | ||
1619 | sep_debug_print_lli_tables(sep, (struct sep_lli_entry *) | ||
1620 | sep_shared_area_bus_to_virt(sep, *lli_table_ptr), | ||
1621 | *num_entries_ptr, *table_data_size_ptr); | ||
1622 | /* The array of the pages */ | ||
1623 | kfree(lli_array_ptr); | ||
1624 | |||
1625 | update_dcb_counter: | ||
1626 | /* Update DCB counter */ | ||
1627 | sep->nr_dcb_creat++; | ||
1628 | goto end_function; | ||
1629 | |||
1630 | end_function_error: | ||
1631 | /* Free all the allocated resources */ | ||
1632 | kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array); | ||
1633 | kfree(lli_array_ptr); | ||
1634 | kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array); | ||
1635 | |||
1636 | end_function: | ||
1637 | return error; | ||
1638 | |||
1639 | } | ||
1640 | /** | ||
1641 | * sep_construct_dma_tables_from_lli - prepare AES/DES mappings | ||
1642 | * @sep: pointer to struct sep_device | ||
1643 | * @lli_in_array: | ||
1644 | * @sep_in_lli_entries: | ||
1645 | * @lli_out_array: | ||
1646 | * @sep_out_lli_entries | ||
1647 | * @block_size | ||
1648 | * @lli_table_in_ptr | ||
1649 | * @lli_table_out_ptr | ||
1650 | * @in_num_entries_ptr | ||
1651 | * @out_num_entries_ptr | ||
1652 | * @table_data_size_ptr | ||
1653 | * | ||
1654 | * This function creates the input and output DMA tables for | ||
1655 | * symmetric operations (AES/DES) according to the block | ||
1656 | * size from LLI arays | ||
1657 | * Note that all bus addresses that are passed to the SEP | ||
1658 | * are in 32 bit format; the SEP is a 32 bit device | ||
1659 | */ | ||
1660 | static int sep_construct_dma_tables_from_lli( | ||
1661 | struct sep_device *sep, | ||
1662 | struct sep_lli_entry *lli_in_array, | ||
1663 | u32 sep_in_lli_entries, | ||
1664 | struct sep_lli_entry *lli_out_array, | ||
1665 | u32 sep_out_lli_entries, | ||
1666 | u32 block_size, | ||
1667 | dma_addr_t *lli_table_in_ptr, | ||
1668 | dma_addr_t *lli_table_out_ptr, | ||
1669 | u32 *in_num_entries_ptr, | ||
1670 | u32 *out_num_entries_ptr, | ||
1671 | u32 *table_data_size_ptr) | ||
1672 | { | ||
1673 | /* Points to the area where next lli table can be allocated */ | ||
1674 | void *lli_table_alloc_addr = 0; | ||
1675 | /* Input lli table */ | ||
1676 | struct sep_lli_entry *in_lli_table_ptr = NULL; | ||
1677 | /* Output lli table */ | ||
1678 | struct sep_lli_entry *out_lli_table_ptr = NULL; | ||
1679 | /* Pointer to the info entry of the table - the last entry */ | ||
1680 | struct sep_lli_entry *info_in_entry_ptr = NULL; | ||
1681 | /* Pointer to the info entry of the table - the last entry */ | ||
1682 | struct sep_lli_entry *info_out_entry_ptr = NULL; | ||
1683 | /* Points to the first entry to be processed in the lli_in_array */ | ||
1684 | u32 current_in_entry = 0; | ||
1685 | /* Points to the first entry to be processed in the lli_out_array */ | ||
1686 | u32 current_out_entry = 0; | ||
1687 | /* Max size of the input table */ | ||
1688 | u32 in_table_data_size = 0; | ||
1689 | /* Max size of the output table */ | ||
1690 | u32 out_table_data_size = 0; | ||
1691 | /* Flag te signifies if this is the last tables build */ | ||
1692 | u32 last_table_flag = 0; | ||
1693 | /* The data size that should be in table */ | ||
1694 | u32 table_data_size = 0; | ||
1695 | /* Number of etnries in the input table */ | ||
1696 | u32 num_entries_in_table = 0; | ||
1697 | /* Number of etnries in the output table */ | ||
1698 | u32 num_entries_out_table = 0; | ||
1699 | |||
1700 | /* Initiate to point after the message area */ | ||
1701 | lli_table_alloc_addr = (void *)(sep->shared_addr + | ||
1702 | SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + | ||
1703 | (sep->num_lli_tables_created * | ||
1704 | (sizeof(struct sep_lli_entry) * | ||
1705 | SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP))); | ||
1706 | |||
1707 | /* Loop till all the entries in in array are not processed */ | ||
1708 | while (current_in_entry < sep_in_lli_entries) { | ||
1709 | /* Set the new input and output tables */ | ||
1710 | in_lli_table_ptr = | ||
1711 | (struct sep_lli_entry *)lli_table_alloc_addr; | ||
1712 | |||
1713 | lli_table_alloc_addr += sizeof(struct sep_lli_entry) * | ||
1714 | SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; | ||
1715 | |||
1716 | /* Set the first output tables */ | ||
1717 | out_lli_table_ptr = | ||
1718 | (struct sep_lli_entry *)lli_table_alloc_addr; | ||
1719 | |||
1720 | /* Check if the DMA table area limit was overrun */ | ||
1721 | if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) * | ||
1722 | SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) > | ||
1723 | ((void *)sep->shared_addr + | ||
1724 | SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES + | ||
1725 | SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) { | ||
1726 | |||
1727 | dev_warn(&sep->pdev->dev, "dma table limit overrun\n"); | ||
1728 | return -ENOMEM; | ||
1729 | } | ||
1730 | |||
1731 | /* Update the number of the lli tables created */ | ||
1732 | sep->num_lli_tables_created += 2; | ||
1733 | |||
1734 | lli_table_alloc_addr += sizeof(struct sep_lli_entry) * | ||
1735 | SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP; | ||
1736 | |||
1737 | /* Calculate the maximum size of data for input table */ | ||
1738 | in_table_data_size = | ||
1739 | sep_calculate_lli_table_max_size(sep, | ||
1740 | &lli_in_array[current_in_entry], | ||
1741 | (sep_in_lli_entries - current_in_entry), | ||
1742 | &last_table_flag); | ||
1743 | |||
1744 | /* Calculate the maximum size of data for output table */ | ||
1745 | out_table_data_size = | ||
1746 | sep_calculate_lli_table_max_size(sep, | ||
1747 | &lli_out_array[current_out_entry], | ||
1748 | (sep_out_lli_entries - current_out_entry), | ||
1749 | &last_table_flag); | ||
1750 | |||
1751 | dev_dbg(&sep->pdev->dev, | ||
1752 | "construct tables from lli in_table_data_size is %x\n", | ||
1753 | in_table_data_size); | ||
1754 | |||
1755 | dev_dbg(&sep->pdev->dev, | ||
1756 | "construct tables from lli out_table_data_size is %x\n", | ||
1757 | out_table_data_size); | ||
1758 | |||
1759 | table_data_size = in_table_data_size; | ||
1760 | |||
1761 | if (!last_table_flag) { | ||
1762 | /* | ||
1763 | * If this is not the last table, | ||
1764 | * then must check where the data is smallest | ||
1765 | * and then align it to the block size | ||
1766 | */ | ||
1767 | if (table_data_size > out_table_data_size) | ||
1768 | table_data_size = out_table_data_size; | ||
1769 | |||
1770 | /* | ||
1771 | * Now calculate the table size so that | ||
1772 | * it will be module block size | ||
1773 | */ | ||
1774 | table_data_size = (table_data_size / block_size) * | ||
1775 | block_size; | ||
1776 | } | ||
1777 | |||
1778 | /* Construct input lli table */ | ||
1779 | sep_build_lli_table(sep, &lli_in_array[current_in_entry], | ||
1780 | in_lli_table_ptr, | ||
1781 | ¤t_in_entry, | ||
1782 | &num_entries_in_table, | ||
1783 | table_data_size); | ||
1784 | |||
1785 | /* Construct output lli table */ | ||
1786 | sep_build_lli_table(sep, &lli_out_array[current_out_entry], | ||
1787 | out_lli_table_ptr, | ||
1788 | ¤t_out_entry, | ||
1789 | &num_entries_out_table, | ||
1790 | table_data_size); | ||
1791 | |||
1792 | /* If info entry is null - this is the first table built */ | ||
1793 | if (info_in_entry_ptr == NULL) { | ||
1794 | /* Set the output parameters to physical addresses */ | ||
1795 | *lli_table_in_ptr = | ||
1796 | sep_shared_area_virt_to_bus(sep, in_lli_table_ptr); | ||
1797 | |||
1798 | *in_num_entries_ptr = num_entries_in_table; | ||
1799 | |||
1800 | *lli_table_out_ptr = | ||
1801 | sep_shared_area_virt_to_bus(sep, | ||
1802 | out_lli_table_ptr); | ||
1803 | |||
1804 | *out_num_entries_ptr = num_entries_out_table; | ||
1805 | *table_data_size_ptr = table_data_size; | ||
1806 | |||
1807 | dev_dbg(&sep->pdev->dev, | ||
1808 | "output lli_table_in_ptr is %08lx\n", | ||
1809 | (unsigned long)*lli_table_in_ptr); | ||
1810 | dev_dbg(&sep->pdev->dev, | ||
1811 | "output lli_table_out_ptr is %08lx\n", | ||
1812 | (unsigned long)*lli_table_out_ptr); | ||
1813 | } else { | ||
1814 | /* Update the info entry of the previous in table */ | ||
1815 | info_in_entry_ptr->bus_address = | ||
1816 | sep_shared_area_virt_to_bus(sep, | ||
1817 | in_lli_table_ptr); | ||
1818 | |||
1819 | info_in_entry_ptr->block_size = | ||
1820 | ((num_entries_in_table) << 24) | | ||
1821 | (table_data_size); | ||
1822 | |||
1823 | /* Update the info entry of the previous in table */ | ||
1824 | info_out_entry_ptr->bus_address = | ||
1825 | sep_shared_area_virt_to_bus(sep, | ||
1826 | out_lli_table_ptr); | ||
1827 | |||
1828 | info_out_entry_ptr->block_size = | ||
1829 | ((num_entries_out_table) << 24) | | ||
1830 | (table_data_size); | ||
1831 | |||
1832 | dev_dbg(&sep->pdev->dev, | ||
1833 | "output lli_table_in_ptr:%08lx %08x\n", | ||
1834 | (unsigned long)info_in_entry_ptr->bus_address, | ||
1835 | info_in_entry_ptr->block_size); | ||
1836 | |||
1837 | dev_dbg(&sep->pdev->dev, | ||
1838 | "output lli_table_out_ptr:%08lx %08x\n", | ||
1839 | (unsigned long)info_out_entry_ptr->bus_address, | ||
1840 | info_out_entry_ptr->block_size); | ||
1841 | } | ||
1842 | |||
1843 | /* Save the pointer to the info entry of the current tables */ | ||
1844 | info_in_entry_ptr = in_lli_table_ptr + | ||
1845 | num_entries_in_table - 1; | ||
1846 | info_out_entry_ptr = out_lli_table_ptr + | ||
1847 | num_entries_out_table - 1; | ||
1848 | |||
1849 | dev_dbg(&sep->pdev->dev, | ||
1850 | "output num_entries_out_table is %x\n", | ||
1851 | (u32)num_entries_out_table); | ||
1852 | dev_dbg(&sep->pdev->dev, | ||
1853 | "output info_in_entry_ptr is %lx\n", | ||
1854 | (unsigned long)info_in_entry_ptr); | ||
1855 | dev_dbg(&sep->pdev->dev, | ||
1856 | "output info_out_entry_ptr is %lx\n", | ||
1857 | (unsigned long)info_out_entry_ptr); | ||
1858 | } | ||
1859 | |||
1860 | /* Print input tables */ | ||
1861 | sep_debug_print_lli_tables(sep, | ||
1862 | (struct sep_lli_entry *) | ||
1863 | sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr), | ||
1864 | *in_num_entries_ptr, | ||
1865 | *table_data_size_ptr); | ||
1866 | |||
1867 | /* Print output tables */ | ||
1868 | sep_debug_print_lli_tables(sep, | ||
1869 | (struct sep_lli_entry *) | ||
1870 | sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr), | ||
1871 | *out_num_entries_ptr, | ||
1872 | *table_data_size_ptr); | ||
1873 | |||
1874 | return 0; | ||
1875 | } | ||
1876 | |||
1877 | /** | ||
1878 | * sep_prepare_input_output_dma_table - prepare DMA I/O table | ||
1879 | * @app_virt_in_addr: | ||
1880 | * @app_virt_out_addr: | ||
1881 | * @data_size: | ||
1882 | * @block_size: | ||
1883 | * @lli_table_in_ptr: | ||
1884 | * @lli_table_out_ptr: | ||
1885 | * @in_num_entries_ptr: | ||
1886 | * @out_num_entries_ptr: | ||
1887 | * @table_data_size_ptr: | ||
1888 | * @is_kva: set for kernel data; used only for kernel crypto module | ||
1889 | * | ||
1890 | * This function builds input and output DMA tables for synhronic | ||
1891 | * symmetric operations (AES, DES, HASH). It also checks that each table | ||
1892 | * is of the modular block size | ||
1893 | * Note that all bus addresses that are passed to the SEP | ||
1894 | * are in 32 bit format; the SEP is a 32 bit device | ||
1895 | */ | ||
1896 | static int sep_prepare_input_output_dma_table(struct sep_device *sep, | ||
1897 | unsigned long app_virt_in_addr, | ||
1898 | unsigned long app_virt_out_addr, | ||
1899 | u32 data_size, | ||
1900 | u32 block_size, | ||
1901 | dma_addr_t *lli_table_in_ptr, | ||
1902 | dma_addr_t *lli_table_out_ptr, | ||
1903 | u32 *in_num_entries_ptr, | ||
1904 | u32 *out_num_entries_ptr, | ||
1905 | u32 *table_data_size_ptr, | ||
1906 | bool is_kva) | ||
1907 | |||
1908 | { | ||
1909 | int error = 0; | ||
1910 | /* Array of pointers of page */ | ||
1911 | struct sep_lli_entry *lli_in_array; | ||
1912 | /* Array of pointers of page */ | ||
1913 | struct sep_lli_entry *lli_out_array; | ||
1914 | |||
1915 | if (data_size == 0) { | ||
1916 | /* Prepare empty table for input and output */ | ||
1917 | sep_prepare_empty_lli_table(sep, lli_table_in_ptr, | ||
1918 | in_num_entries_ptr, table_data_size_ptr); | ||
1919 | |||
1920 | sep_prepare_empty_lli_table(sep, lli_table_out_ptr, | ||
1921 | out_num_entries_ptr, table_data_size_ptr); | ||
1922 | |||
1923 | goto update_dcb_counter; | ||
1924 | } | ||
1925 | |||
1926 | /* Initialize the pages pointers */ | ||
1927 | sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL; | ||
1928 | sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL; | ||
1929 | |||
1930 | /* Lock the pages of the buffer and translate them to pages */ | ||
1931 | if (is_kva == true) { | ||
1932 | error = sep_lock_kernel_pages(sep, app_virt_in_addr, | ||
1933 | data_size, &lli_in_array, SEP_DRIVER_IN_FLAG); | ||
1934 | |||
1935 | if (error) { | ||
1936 | dev_warn(&sep->pdev->dev, | ||
1937 | "lock kernel for in failed\n"); | ||
1938 | goto end_function; | ||
1939 | } | ||
1940 | |||
1941 | error = sep_lock_kernel_pages(sep, app_virt_out_addr, | ||
1942 | data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG); | ||
1943 | |||
1944 | if (error) { | ||
1945 | dev_warn(&sep->pdev->dev, | ||
1946 | "lock kernel for out failed\n"); | ||
1947 | goto end_function; | ||
1948 | } | ||
1949 | } | ||
1950 | |||
1951 | else { | ||
1952 | error = sep_lock_user_pages(sep, app_virt_in_addr, | ||
1953 | data_size, &lli_in_array, SEP_DRIVER_IN_FLAG); | ||
1954 | if (error) { | ||
1955 | dev_warn(&sep->pdev->dev, | ||
1956 | "sep_lock_user_pages for input virtual buffer failed\n"); | ||
1957 | goto end_function; | ||
1958 | } | ||
1959 | |||
1960 | error = sep_lock_user_pages(sep, app_virt_out_addr, | ||
1961 | data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG); | ||
1962 | |||
1963 | if (error) { | ||
1964 | dev_warn(&sep->pdev->dev, | ||
1965 | "sep_lock_user_pages for output virtual buffer failed\n"); | ||
1966 | goto end_function_free_lli_in; | ||
1967 | } | ||
1968 | } | ||
1969 | |||
1970 | dev_dbg(&sep->pdev->dev, "prep input output dma table sep_in_num_pages is %x\n", | ||
1971 | sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages); | ||
1972 | dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n", | ||
1973 | sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages); | ||
1974 | dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", | ||
1975 | SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP); | ||
1976 | |||
1977 | /* Call the function that creates table from the lli arrays */ | ||
1978 | error = sep_construct_dma_tables_from_lli(sep, lli_in_array, | ||
1979 | sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages, | ||
1980 | lli_out_array, | ||
1981 | sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages, | ||
1982 | block_size, lli_table_in_ptr, lli_table_out_ptr, | ||
1983 | in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr); | ||
1984 | |||
1985 | if (error) { | ||
1986 | dev_warn(&sep->pdev->dev, | ||
1987 | "sep_construct_dma_tables_from_lli failed\n"); | ||
1988 | goto end_function_with_error; | ||
1989 | } | ||
1990 | |||
1991 | kfree(lli_out_array); | ||
1992 | kfree(lli_in_array); | ||
1993 | |||
1994 | update_dcb_counter: | ||
1995 | /* Update DCB counter */ | ||
1996 | sep->nr_dcb_creat++; | ||
1997 | |||
1998 | goto end_function; | ||
1999 | |||
2000 | end_function_with_error: | ||
2001 | kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array); | ||
2002 | kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array); | ||
2003 | kfree(lli_out_array); | ||
2004 | |||
2005 | |||
2006 | end_function_free_lli_in: | ||
2007 | kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array); | ||
2008 | kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array); | ||
2009 | kfree(lli_in_array); | ||
2010 | |||
2011 | end_function: | ||
2012 | |||
2013 | return error; | ||
2014 | |||
2015 | } | ||
2016 | |||
2017 | /** | ||
2018 | * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks | ||
2019 | * @app_in_address: unsigned long; for data buffer in (user space) | ||
2020 | * @app_out_address: unsigned long; for data buffer out (user space) | ||
2021 | * @data_in_size: u32; for size of data | ||
2022 | * @block_size: u32; for block size | ||
2023 | * @tail_block_size: u32; for size of tail block | ||
2024 | * @isapplet: bool; to indicate external app | ||
2025 | * @is_kva: bool; kernel buffer; only used for kernel crypto module | ||
2026 | * | ||
2027 | * This function prepares the linked DMA tables and puts the | ||
2028 | * address for the linked list of tables inta a DCB (data control | ||
2029 | * block) the address of which is known by the SEP hardware | ||
2030 | * Note that all bus addresses that are passed to the SEP | ||
2031 | * are in 32 bit format; the SEP is a 32 bit device | ||
2032 | */ | ||
2033 | static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep, | ||
2034 | unsigned long app_in_address, | ||
2035 | unsigned long app_out_address, | ||
2036 | u32 data_in_size, | ||
2037 | u32 block_size, | ||
2038 | u32 tail_block_size, | ||
2039 | bool isapplet, | ||
2040 | bool is_kva) | ||
2041 | { | ||
2042 | int error = 0; | ||
2043 | /* Size of tail */ | ||
2044 | u32 tail_size = 0; | ||
2045 | /* Address of the created DCB table */ | ||
2046 | struct sep_dcblock *dcb_table_ptr = NULL; | ||
2047 | /* The physical address of the first input DMA table */ | ||
2048 | dma_addr_t in_first_mlli_address = 0; | ||
2049 | /* Number of entries in the first input DMA table */ | ||
2050 | u32 in_first_num_entries = 0; | ||
2051 | /* The physical address of the first output DMA table */ | ||
2052 | dma_addr_t out_first_mlli_address = 0; | ||
2053 | /* Number of entries in the first output DMA table */ | ||
2054 | u32 out_first_num_entries = 0; | ||
2055 | /* Data in the first input/output table */ | ||
2056 | u32 first_data_size = 0; | ||
2057 | |||
2058 | if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) { | ||
2059 | /* No more DCBs to allocate */ | ||
2060 | dev_warn(&sep->pdev->dev, "no more DCBs available\n"); | ||
2061 | error = -ENOSPC; | ||
2062 | goto end_function; | ||
2063 | } | ||
2064 | |||
2065 | /* Allocate new DCB */ | ||
2066 | dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr + | ||
2067 | SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES + | ||
2068 | (sep->nr_dcb_creat * sizeof(struct sep_dcblock))); | ||
2069 | |||
2070 | /* Set the default values in the DCB */ | ||
2071 | dcb_table_ptr->input_mlli_address = 0; | ||
2072 | dcb_table_ptr->input_mlli_num_entries = 0; | ||
2073 | dcb_table_ptr->input_mlli_data_size = 0; | ||
2074 | dcb_table_ptr->output_mlli_address = 0; | ||
2075 | dcb_table_ptr->output_mlli_num_entries = 0; | ||
2076 | dcb_table_ptr->output_mlli_data_size = 0; | ||
2077 | dcb_table_ptr->tail_data_size = 0; | ||
2078 | dcb_table_ptr->out_vr_tail_pt = 0; | ||
2079 | |||
2080 | if (isapplet == true) { | ||
2081 | |||
2082 | /* Check if there is enough data for DMA operation */ | ||
2083 | if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) { | ||
2084 | if (is_kva == true) { | ||
2085 | memcpy(dcb_table_ptr->tail_data, | ||
2086 | (void *)app_in_address, data_in_size); | ||
2087 | } else { | ||
2088 | if (copy_from_user(dcb_table_ptr->tail_data, | ||
2089 | (void __user *)app_in_address, | ||
2090 | data_in_size)) { | ||
2091 | error = -EFAULT; | ||
2092 | goto end_function; | ||
2093 | } | ||
2094 | } | ||
2095 | |||
2096 | dcb_table_ptr->tail_data_size = data_in_size; | ||
2097 | |||
2098 | /* Set the output user-space address for mem2mem op */ | ||
2099 | if (app_out_address) | ||
2100 | dcb_table_ptr->out_vr_tail_pt = | ||
2101 | (aligned_u64)app_out_address; | ||
2102 | |||
2103 | /* | ||
2104 | * Update both data length parameters in order to avoid | ||
2105 | * second data copy and allow building of empty mlli | ||
2106 | * tables | ||
2107 | */ | ||
2108 | tail_size = 0x0; | ||
2109 | data_in_size = 0x0; | ||
2110 | |||
2111 | } else { | ||
2112 | if (!app_out_address) { | ||
2113 | tail_size = data_in_size % block_size; | ||
2114 | if (!tail_size) { | ||
2115 | if (tail_block_size == block_size) | ||
2116 | tail_size = block_size; | ||
2117 | } | ||
2118 | } else { | ||
2119 | tail_size = 0; | ||
2120 | } | ||
2121 | } | ||
2122 | if (tail_size) { | ||
2123 | if (is_kva == true) { | ||
2124 | memcpy(dcb_table_ptr->tail_data, | ||
2125 | (void *)(app_in_address + data_in_size - | ||
2126 | tail_size), tail_size); | ||
2127 | } else { | ||
2128 | /* We have tail data - copy it to DCB */ | ||
2129 | if (copy_from_user(dcb_table_ptr->tail_data, | ||
2130 | (void *)(app_in_address + | ||
2131 | data_in_size - tail_size), tail_size)) { | ||
2132 | error = -EFAULT; | ||
2133 | goto end_function; | ||
2134 | } | ||
2135 | } | ||
2136 | if (app_out_address) | ||
2137 | /* | ||
2138 | * Calculate the output address | ||
2139 | * according to tail data size | ||
2140 | */ | ||
2141 | dcb_table_ptr->out_vr_tail_pt = | ||
2142 | (aligned_u64)app_out_address + data_in_size | ||
2143 | - tail_size; | ||
2144 | |||
2145 | /* Save the real tail data size */ | ||
2146 | dcb_table_ptr->tail_data_size = tail_size; | ||
2147 | /* | ||
2148 | * Update the data size without the tail | ||
2149 | * data size AKA data for the dma | ||
2150 | */ | ||
2151 | data_in_size = (data_in_size - tail_size); | ||
2152 | } | ||
2153 | } | ||
2154 | /* Check if we need to build only input table or input/output */ | ||
2155 | if (app_out_address) { | ||
2156 | /* Prepare input/output tables */ | ||
2157 | error = sep_prepare_input_output_dma_table(sep, | ||
2158 | app_in_address, | ||
2159 | app_out_address, | ||
2160 | data_in_size, | ||
2161 | block_size, | ||
2162 | &in_first_mlli_address, | ||
2163 | &out_first_mlli_address, | ||
2164 | &in_first_num_entries, | ||
2165 | &out_first_num_entries, | ||
2166 | &first_data_size, | ||
2167 | is_kva); | ||
2168 | } else { | ||
2169 | /* Prepare input tables */ | ||
2170 | error = sep_prepare_input_dma_table(sep, | ||
2171 | app_in_address, | ||
2172 | data_in_size, | ||
2173 | block_size, | ||
2174 | &in_first_mlli_address, | ||
2175 | &in_first_num_entries, | ||
2176 | &first_data_size, | ||
2177 | is_kva); | ||
2178 | } | ||
2179 | |||
2180 | if (error) { | ||
2181 | dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n"); | ||
2182 | goto end_function; | ||
2183 | } | ||
2184 | |||
2185 | /* Set the DCB values */ | ||
2186 | dcb_table_ptr->input_mlli_address = in_first_mlli_address; | ||
2187 | dcb_table_ptr->input_mlli_num_entries = in_first_num_entries; | ||
2188 | dcb_table_ptr->input_mlli_data_size = first_data_size; | ||
2189 | dcb_table_ptr->output_mlli_address = out_first_mlli_address; | ||
2190 | dcb_table_ptr->output_mlli_num_entries = out_first_num_entries; | ||
2191 | dcb_table_ptr->output_mlli_data_size = first_data_size; | ||
2192 | |||
2193 | end_function: | ||
2194 | return error; | ||
2195 | |||
2196 | } | ||
2197 | |||
2198 | /** | ||
2199 | * sep_free_dma_tables_and_dcb - free DMA tables and DCBs | ||
2200 | * @sep: pointer to struct sep_device | ||
2201 | * @isapplet: indicates external application (used for kernel access) | ||
2202 | * @is_kva: indicates kernel addresses (only used for kernel crypto) | ||
2203 | * | ||
2204 | * This function frees the DMA tables and DCB | ||
2205 | */ | ||
2206 | static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet, | ||
2207 | bool is_kva) | ||
2208 | { | ||
2209 | int i = 0; | ||
2210 | int error = 0; | ||
2211 | int error_temp = 0; | ||
2212 | struct sep_dcblock *dcb_table_ptr; | ||
2213 | unsigned long pt_hold; | ||
2214 | void *tail_pt; | ||
2215 | |||
2216 | if (isapplet == true) { | ||
2217 | /* Set pointer to first DCB table */ | ||
2218 | dcb_table_ptr = (struct sep_dcblock *) | ||
2219 | (sep->shared_addr + | ||
2220 | SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES); | ||
2221 | |||
2222 | /* Go over each DCB and see if tail pointer must be updated */ | ||
2223 | for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) { | ||
2224 | if (dcb_table_ptr->out_vr_tail_pt) { | ||
2225 | pt_hold = (unsigned long)dcb_table_ptr->out_vr_tail_pt; | ||
2226 | tail_pt = (void *)pt_hold; | ||
2227 | if (is_kva == true) { | ||
2228 | memcpy(tail_pt, | ||
2229 | dcb_table_ptr->tail_data, | ||
2230 | dcb_table_ptr->tail_data_size); | ||
2231 | } else { | ||
2232 | error_temp = copy_to_user( | ||
2233 | tail_pt, | ||
2234 | dcb_table_ptr->tail_data, | ||
2235 | dcb_table_ptr->tail_data_size); | ||
2236 | } | ||
2237 | if (error_temp) { | ||
2238 | /* Release the DMA resource */ | ||
2239 | error = -EFAULT; | ||
2240 | break; | ||
2241 | } | ||
2242 | } | ||
2243 | } | ||
2244 | } | ||
2245 | /* Free the output pages, if any */ | ||
2246 | sep_free_dma_table_data_handler(sep); | ||
2247 | |||
2248 | return error; | ||
2249 | } | ||
2250 | |||
2251 | /** | ||
2252 | * sep_get_static_pool_addr_handler - get static pool address | ||
2253 | * @sep: pointer to struct sep_device | ||
2254 | * | ||
2255 | * This function sets the bus and virtual addresses of the static pool | ||
2256 | */ | ||
2257 | static int sep_get_static_pool_addr_handler(struct sep_device *sep) | ||
2258 | { | ||
2259 | u32 *static_pool_addr = NULL; | ||
2260 | |||
2261 | static_pool_addr = (u32 *)(sep->shared_addr + | ||
2262 | SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES); | ||
2263 | |||
2264 | static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN; | ||
2265 | static_pool_addr[1] = (u32)sep->shared_bus + | ||
2266 | SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES; | ||
2267 | |||
2268 | dev_dbg(&sep->pdev->dev, "static pool segment: physical %x\n", | ||
2269 | (u32)static_pool_addr[1]); | ||
2270 | |||
2271 | return 0; | ||
2272 | } | ||
2273 | |||
2274 | /** | ||
2275 | * sep_end_transaction_handler - end transaction | ||
2276 | * @sep: pointer to struct sep_device | ||
2277 | * | ||
2278 | * This API handles the end transaction request | ||
2279 | */ | ||
2280 | static int sep_end_transaction_handler(struct sep_device *sep) | ||
2281 | { | ||
2282 | /* Clear the data pool pointers Token */ | ||
2283 | memset((void *)(sep->shared_addr + | ||
2284 | SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES), | ||
2285 | 0, sep->num_of_data_allocations*2*sizeof(u32)); | ||
2286 | |||
2287 | /* Check that all the DMA resources were freed */ | ||
2288 | sep_free_dma_table_data_handler(sep); | ||
2289 | |||
2290 | clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags); | ||
2291 | |||
2292 | /* | ||
2293 | * We are now through with the transaction. Let's | ||
2294 | * allow other processes who have the device open | ||
2295 | * to perform transactions | ||
2296 | */ | ||
2297 | mutex_lock(&sep->sep_mutex); | ||
2298 | sep->pid_doing_transaction = 0; | ||
2299 | mutex_unlock(&sep->sep_mutex); | ||
2300 | /* Raise event for stuck contextes */ | ||
2301 | wake_up(&sep->event); | ||
2302 | |||
2303 | return 0; | ||
2304 | } | ||
2305 | |||
2306 | /** | ||
2307 | * sep_prepare_dcb_handler - prepare a control block | ||
2308 | * @sep: pointer to struct sep_device | ||
2309 | * @arg: pointer to user parameters | ||
2310 | * | ||
2311 | * This function will retrieve the RAR buffer physical addresses, type | ||
2312 | * & size corresponding to the RAR handles provided in the buffers vector. | ||
2313 | */ | ||
2314 | static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg) | ||
2315 | { | ||
2316 | int error; | ||
2317 | /* Command arguments */ | ||
2318 | struct build_dcb_struct command_args; | ||
2319 | |||
2320 | /* Get the command arguments */ | ||
2321 | if (copy_from_user(&command_args, (void __user *)arg, | ||
2322 | sizeof(struct build_dcb_struct))) { | ||
2323 | error = -EFAULT; | ||
2324 | goto end_function; | ||
2325 | } | ||
2326 | |||
2327 | dev_dbg(&sep->pdev->dev, "prep dcb handler app_in_address is %08llx\n", | ||
2328 | command_args.app_in_address); | ||
2329 | dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n", | ||
2330 | command_args.app_out_address); | ||
2331 | dev_dbg(&sep->pdev->dev, "data_size is %x\n", | ||
2332 | command_args.data_in_size); | ||
2333 | dev_dbg(&sep->pdev->dev, "block_size is %x\n", | ||
2334 | command_args.block_size); | ||
2335 | dev_dbg(&sep->pdev->dev, "tail block_size is %x\n", | ||
2336 | command_args.tail_block_size); | ||
2337 | |||
2338 | error = sep_prepare_input_output_dma_table_in_dcb(sep, | ||
2339 | (unsigned long)command_args.app_in_address, | ||
2340 | (unsigned long)command_args.app_out_address, | ||
2341 | command_args.data_in_size, command_args.block_size, | ||
2342 | command_args.tail_block_size, true, false); | ||
2343 | |||
2344 | end_function: | ||
2345 | return error; | ||
2346 | |||
2347 | } | ||
2348 | |||
2349 | /** | ||
2350 | * sep_free_dcb_handler - free control block resources | ||
2351 | * @sep: pointer to struct sep_device | ||
2352 | * | ||
2353 | * This function frees the DCB resources and updates the needed | ||
2354 | * user-space buffers. | ||
2355 | */ | ||
2356 | static int sep_free_dcb_handler(struct sep_device *sep) | ||
2357 | { | ||
2358 | return sep_free_dma_tables_and_dcb(sep, false, false); | ||
2359 | } | ||
2360 | |||
2361 | /** | ||
2362 | * sep_rar_prepare_output_msg_handler - prepare an output message | ||
2363 | * @sep: pointer to struct sep_device | ||
2364 | * @arg: pointer to user parameters | ||
2365 | * | ||
2366 | * This function will retrieve the RAR buffer physical addresses, type | ||
2367 | * & size corresponding to the RAR handles provided in the buffers vector. | ||
2368 | */ | ||
2369 | static int sep_rar_prepare_output_msg_handler(struct sep_device *sep, | ||
2370 | unsigned long arg) | ||
2371 | { | ||
2372 | int error = 0; | ||
2373 | /* Command args */ | ||
2374 | struct rar_hndl_to_bus_struct command_args; | ||
2375 | /* Bus address */ | ||
2376 | dma_addr_t rar_bus = 0; | ||
2377 | /* Holds the RAR address in the system memory offset */ | ||
2378 | u32 *rar_addr; | ||
2379 | |||
2380 | /* Copy the data */ | ||
2381 | if (copy_from_user(&command_args, (void __user *)arg, | ||
2382 | sizeof(command_args))) { | ||
2383 | error = -EFAULT; | ||
2384 | goto end_function; | ||
2385 | } | ||
2386 | |||
2387 | /* Call to translation function only if user handle is not NULL */ | ||
2388 | if (command_args.rar_handle) | ||
2389 | return -EOPNOTSUPP; | ||
2390 | dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus); | ||
2391 | |||
2392 | /* Set value in the SYSTEM MEMORY offset */ | ||
2393 | rar_addr = (u32 *)(sep->shared_addr + | ||
2394 | SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES); | ||
2395 | |||
2396 | /* Copy the physical address to the System Area for the SEP */ | ||
2397 | rar_addr[0] = SEP_RAR_VAL_TOKEN; | ||
2398 | rar_addr[1] = rar_bus; | ||
2399 | |||
2400 | end_function: | ||
2401 | return error; | ||
2402 | } | ||
2403 | |||
2404 | /** | ||
2405 | * sep_ioctl - ioctl api | ||
2406 | * @filp: pointer to struct file | ||
2407 | * @cmd: command | ||
2408 | * @arg: pointer to argument structure | ||
2409 | * | ||
2410 | * Implement the ioctl methods available on the SEP device. | ||
2411 | */ | ||
2412 | static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | ||
2413 | { | ||
2414 | int error = 0; | ||
2415 | struct sep_device *sep = filp->private_data; | ||
2416 | |||
2417 | /* Make sure we own this device */ | ||
2418 | mutex_lock(&sep->sep_mutex); | ||
2419 | if ((current->pid != sep->pid_doing_transaction) && | ||
2420 | (sep->pid_doing_transaction != 0)) { | ||
2421 | dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n"); | ||
2422 | error = -EACCES; | ||
2423 | goto end_function; | ||
2424 | } | ||
2425 | |||
2426 | mutex_unlock(&sep->sep_mutex); | ||
2427 | |||
2428 | if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) | ||
2429 | return -ENOTTY; | ||
2430 | |||
2431 | /* Lock to prevent the daemon to interfere with operation */ | ||
2432 | mutex_lock(&sep->ioctl_mutex); | ||
2433 | |||
2434 | switch (cmd) { | ||
2435 | case SEP_IOCSENDSEPCOMMAND: | ||
2436 | /* Send command to SEP */ | ||
2437 | error = sep_send_command_handler(sep); | ||
2438 | break; | ||
2439 | case SEP_IOCALLOCDATAPOLL: | ||
2440 | /* Allocate data pool */ | ||
2441 | error = sep_allocate_data_pool_memory_handler(sep, arg); | ||
2442 | break; | ||
2443 | case SEP_IOCGETSTATICPOOLADDR: | ||
2444 | /* Inform the SEP the bus address of the static pool */ | ||
2445 | error = sep_get_static_pool_addr_handler(sep); | ||
2446 | break; | ||
2447 | case SEP_IOCENDTRANSACTION: | ||
2448 | error = sep_end_transaction_handler(sep); | ||
2449 | break; | ||
2450 | case SEP_IOCRARPREPAREMESSAGE: | ||
2451 | error = sep_rar_prepare_output_msg_handler(sep, arg); | ||
2452 | break; | ||
2453 | case SEP_IOCPREPAREDCB: | ||
2454 | error = sep_prepare_dcb_handler(sep, arg); | ||
2455 | break; | ||
2456 | case SEP_IOCFREEDCB: | ||
2457 | error = sep_free_dcb_handler(sep); | ||
2458 | break; | ||
2459 | default: | ||
2460 | error = -ENOTTY; | ||
2461 | break; | ||
2462 | } | ||
2463 | |||
2464 | end_function: | ||
2465 | mutex_unlock(&sep->ioctl_mutex); | ||
2466 | return error; | ||
2467 | } | ||
2468 | |||
2469 | /** | ||
2470 | * sep_singleton_ioctl - ioctl api for singleton interface | ||
2471 | * @filp: pointer to struct file | ||
2472 | * @cmd: command | ||
2473 | * @arg: pointer to argument structure | ||
2474 | * | ||
2475 | * Implement the additional ioctls for the singleton device | ||
2476 | */ | ||
2477 | static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg) | ||
2478 | { | ||
2479 | long error = 0; | ||
2480 | struct sep_device *sep = filp->private_data; | ||
2481 | |||
2482 | /* Check that the command is for the SEP device */ | ||
2483 | if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) | ||
2484 | return -ENOTTY; | ||
2485 | |||
2486 | /* Make sure we own this device */ | ||
2487 | mutex_lock(&sep->sep_mutex); | ||
2488 | if ((current->pid != sep->pid_doing_transaction) && | ||
2489 | (sep->pid_doing_transaction != 0)) { | ||
2490 | dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n"); | ||
2491 | mutex_unlock(&sep->sep_mutex); | ||
2492 | return -EACCES; | ||
2493 | } | ||
2494 | |||
2495 | mutex_unlock(&sep->sep_mutex); | ||
2496 | |||
2497 | switch (cmd) { | ||
2498 | case SEP_IOCTLSETCALLERID: | ||
2499 | mutex_lock(&sep->ioctl_mutex); | ||
2500 | error = sep_set_caller_id_handler(sep, arg); | ||
2501 | mutex_unlock(&sep->ioctl_mutex); | ||
2502 | break; | ||
2503 | default: | ||
2504 | error = sep_ioctl(filp, cmd, arg); | ||
2505 | break; | ||
2506 | } | ||
2507 | return error; | ||
2508 | } | ||
2509 | |||
2510 | /** | ||
2511 | * sep_request_daemon_ioctl - ioctl for daemon | ||
2512 | * @filp: pointer to struct file | ||
2513 | * @cmd: command | ||
2514 | * @arg: pointer to argument structure | ||
2515 | * | ||
2516 | * Called by the request daemon to perform ioctls on the daemon device | ||
2517 | */ | ||
2518 | static long sep_request_daemon_ioctl(struct file *filp, u32 cmd, | ||
2519 | unsigned long arg) | ||
2520 | { | ||
2521 | |||
2522 | long error; | ||
2523 | struct sep_device *sep = filp->private_data; | ||
2524 | |||
2525 | /* Check that the command is for SEP device */ | ||
2526 | if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) | ||
2527 | return -ENOTTY; | ||
2528 | |||
2529 | /* Only one process can access ioctl at any given time */ | ||
2530 | mutex_lock(&sep->ioctl_mutex); | ||
2531 | |||
2532 | switch (cmd) { | ||
2533 | case SEP_IOCSENDSEPRPLYCOMMAND: | ||
2534 | /* Send reply command to SEP */ | ||
2535 | error = sep_req_daemon_send_reply_command_handler(sep); | ||
2536 | break; | ||
2537 | case SEP_IOCENDTRANSACTION: | ||
2538 | /* | ||
2539 | * End req daemon transaction, do nothing | ||
2540 | * will be removed upon update in middleware | ||
2541 | * API library | ||
2542 | */ | ||
2543 | error = 0; | ||
2544 | break; | ||
2545 | default: | ||
2546 | error = -ENOTTY; | ||
2547 | } | ||
2548 | mutex_unlock(&sep->ioctl_mutex); | ||
2549 | return error; | ||
2550 | } | ||
2551 | |||
2552 | /** | ||
2553 | * sep_inthandler - interrupt handler | ||
2554 | * @irq: interrupt | ||
2555 | * @dev_id: device id | ||
2556 | */ | ||
2557 | static irqreturn_t sep_inthandler(int irq, void *dev_id) | ||
2558 | { | ||
2559 | irqreturn_t int_error = IRQ_HANDLED; | ||
2560 | unsigned long lck_flags; | ||
2561 | u32 reg_val, reg_val2 = 0; | ||
2562 | struct sep_device *sep = dev_id; | ||
2563 | |||
2564 | /* Read the IRR register to check if this is SEP interrupt */ | ||
2565 | reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR); | ||
2566 | |||
2567 | if (reg_val & (0x1 << 13)) { | ||
2568 | /* Lock and update the counter of reply messages */ | ||
2569 | spin_lock_irqsave(&sep->snd_rply_lck, lck_flags); | ||
2570 | sep->reply_ct++; | ||
2571 | spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags); | ||
2572 | |||
2573 | dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n", | ||
2574 | sep->send_ct, sep->reply_ct); | ||
2575 | |||
2576 | /* Is this printf or daemon request? */ | ||
2577 | reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); | ||
2578 | dev_dbg(&sep->pdev->dev, | ||
2579 | "SEP Interrupt - reg2 is %08x\n", reg_val2); | ||
2580 | |||
2581 | if ((reg_val2 >> 30) & 0x1) { | ||
2582 | dev_dbg(&sep->pdev->dev, "int: printf request\n"); | ||
2583 | wake_up(&sep->event_request_daemon); | ||
2584 | } else if (reg_val2 >> 31) { | ||
2585 | dev_dbg(&sep->pdev->dev, "int: daemon request\n"); | ||
2586 | wake_up(&sep->event_request_daemon); | ||
2587 | } else { | ||
2588 | dev_dbg(&sep->pdev->dev, "int: SEP reply\n"); | ||
2589 | wake_up(&sep->event); | ||
2590 | } | ||
2591 | } else { | ||
2592 | dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n"); | ||
2593 | int_error = IRQ_NONE; | ||
2594 | } | ||
2595 | if (int_error == IRQ_HANDLED) | ||
2596 | sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val); | ||
2597 | |||
2598 | return int_error; | ||
2599 | } | ||
2600 | |||
2601 | /** | ||
2602 | * sep_reconfig_shared_area - reconfigure shared area | ||
2603 | * @sep: pointer to struct sep_device | ||
2604 | * | ||
2605 | * Reconfig the shared area between HOST and SEP - needed in case | ||
2606 | * the DX_CC_Init function was called before OS loading. | ||
2607 | */ | ||
2608 | static int sep_reconfig_shared_area(struct sep_device *sep) | ||
2609 | { | ||
2610 | int ret_val; | ||
2611 | |||
2612 | /* use to limit waiting for SEP */ | ||
2613 | unsigned long end_time; | ||
2614 | |||
2615 | /* Send the new SHARED MESSAGE AREA to the SEP */ | ||
2616 | dev_dbg(&sep->pdev->dev, "reconfig shared; sending %08llx to sep\n", | ||
2617 | (unsigned long long)sep->shared_bus); | ||
2618 | |||
2619 | sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus); | ||
2620 | |||
2621 | /* Poll for SEP response */ | ||
2622 | ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); | ||
2623 | |||
2624 | end_time = jiffies + (WAIT_TIME * HZ); | ||
2625 | |||
2626 | while ((time_before(jiffies, end_time)) && (ret_val != 0xffffffff) && | ||
2627 | (ret_val != sep->shared_bus)) | ||
2628 | ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR); | ||
2629 | |||
2630 | /* Check the return value (register) */ | ||
2631 | if (ret_val != sep->shared_bus) { | ||
2632 | dev_warn(&sep->pdev->dev, "could not reconfig shared area\n"); | ||
2633 | dev_warn(&sep->pdev->dev, "result was %x\n", ret_val); | ||
2634 | ret_val = -ENOMEM; | ||
2635 | } else | ||
2636 | ret_val = 0; | ||
2637 | |||
2638 | dev_dbg(&sep->pdev->dev, "reconfig shared area end\n"); | ||
2639 | return ret_val; | ||
2640 | } | ||
2641 | |||
2642 | /* File operation for singleton SEP operations */ | ||
2643 | static const struct file_operations singleton_file_operations = { | ||
2644 | .owner = THIS_MODULE, | ||
2645 | .unlocked_ioctl = sep_singleton_ioctl, | ||
2646 | .poll = sep_poll, | ||
2647 | .open = sep_singleton_open, | ||
2648 | .release = sep_singleton_release, | ||
2649 | .mmap = sep_mmap, | ||
2650 | }; | ||
2651 | |||
2652 | /* File operation for daemon operations */ | ||
2653 | static const struct file_operations daemon_file_operations = { | ||
2654 | .owner = THIS_MODULE, | ||
2655 | .unlocked_ioctl = sep_request_daemon_ioctl, | ||
2656 | .poll = sep_request_daemon_poll, | ||
2657 | .open = sep_request_daemon_open, | ||
2658 | .release = sep_request_daemon_release, | ||
2659 | .mmap = sep_request_daemon_mmap, | ||
2660 | }; | ||
2661 | |||
2662 | /* The files operations structure of the driver */ | ||
2663 | static const struct file_operations sep_file_operations = { | ||
2664 | .owner = THIS_MODULE, | ||
2665 | .unlocked_ioctl = sep_ioctl, | ||
2666 | .poll = sep_poll, | ||
2667 | .open = sep_open, | ||
2668 | .release = sep_release, | ||
2669 | .mmap = sep_mmap, | ||
2670 | }; | ||
2671 | |||
2672 | /** | ||
2673 | * sep_register_driver_with_fs - register misc devices | ||
2674 | * @sep: pointer to struct sep_device | ||
2675 | * | ||
2676 | * This function registers the driver with the file system | ||
2677 | */ | ||
2678 | static int sep_register_driver_with_fs(struct sep_device *sep) | ||
2679 | { | ||
2680 | int ret_val; | ||
2681 | |||
2682 | sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR; | ||
2683 | sep->miscdev_sep.name = SEP_DEV_NAME; | ||
2684 | sep->miscdev_sep.fops = &sep_file_operations; | ||
2685 | |||
2686 | sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR; | ||
2687 | sep->miscdev_singleton.name = SEP_DEV_SINGLETON; | ||
2688 | sep->miscdev_singleton.fops = &singleton_file_operations; | ||
2689 | |||
2690 | sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR; | ||
2691 | sep->miscdev_daemon.name = SEP_DEV_DAEMON; | ||
2692 | sep->miscdev_daemon.fops = &daemon_file_operations; | ||
2693 | |||
2694 | ret_val = misc_register(&sep->miscdev_sep); | ||
2695 | if (ret_val) { | ||
2696 | dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n", | ||
2697 | ret_val); | ||
2698 | return ret_val; | ||
2699 | } | ||
2700 | |||
2701 | ret_val = misc_register(&sep->miscdev_singleton); | ||
2702 | if (ret_val) { | ||
2703 | dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n", | ||
2704 | ret_val); | ||
2705 | misc_deregister(&sep->miscdev_sep); | ||
2706 | return ret_val; | ||
2707 | } | ||
2708 | |||
2709 | ret_val = misc_register(&sep->miscdev_daemon); | ||
2710 | if (ret_val) { | ||
2711 | dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n", | ||
2712 | ret_val); | ||
2713 | misc_deregister(&sep->miscdev_sep); | ||
2714 | misc_deregister(&sep->miscdev_singleton); | ||
2715 | |||
2716 | return ret_val; | ||
2717 | } | ||
2718 | return ret_val; | ||
2719 | } | ||
2720 | |||
2721 | |||
2722 | /** | ||
2723 | * sep_probe - probe a matching PCI device | ||
2724 | * @pdev: pci_device | ||
2725 | * @end: pci_device_id | ||
2726 | * | ||
2727 | * Attempt to set up and configure a SEP device that has been | ||
2728 | * discovered by the PCI layer. | ||
2729 | */ | ||
2730 | static int __devinit sep_probe(struct pci_dev *pdev, | ||
2731 | const struct pci_device_id *ent) | ||
2732 | { | ||
2733 | int error = 0; | ||
2734 | struct sep_device *sep; | ||
2735 | |||
2736 | if (sep_dev != NULL) { | ||
2737 | dev_warn(&pdev->dev, "only one SEP supported.\n"); | ||
2738 | return -EBUSY; | ||
2739 | } | ||
2740 | |||
2741 | /* Enable the device */ | ||
2742 | error = pci_enable_device(pdev); | ||
2743 | if (error) { | ||
2744 | dev_warn(&pdev->dev, "error enabling pci device\n"); | ||
2745 | goto end_function; | ||
2746 | } | ||
2747 | |||
2748 | /* Allocate the sep_device structure for this device */ | ||
2749 | sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC); | ||
2750 | if (sep_dev == NULL) { | ||
2751 | dev_warn(&pdev->dev, | ||
2752 | "can't kmalloc the sep_device structure\n"); | ||
2753 | error = -ENOMEM; | ||
2754 | goto end_function_disable_device; | ||
2755 | } | ||
2756 | |||
2757 | /* | ||
2758 | * We're going to use another variable for actually | ||
2759 | * working with the device; this way, if we have | ||
2760 | * multiple devices in the future, it would be easier | ||
2761 | * to make appropriate changes | ||
2762 | */ | ||
2763 | sep = sep_dev; | ||
2764 | |||
2765 | sep->pdev = pci_dev_get(pdev); | ||
2766 | |||
2767 | init_waitqueue_head(&sep->event); | ||
2768 | init_waitqueue_head(&sep->event_request_daemon); | ||
2769 | spin_lock_init(&sep->snd_rply_lck); | ||
2770 | mutex_init(&sep->sep_mutex); | ||
2771 | mutex_init(&sep->ioctl_mutex); | ||
2772 | |||
2773 | dev_dbg(&sep->pdev->dev, "sep probe: PCI obtained, device being prepared\n"); | ||
2774 | dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision); | ||
2775 | |||
2776 | /* Set up our register area */ | ||
2777 | sep->reg_physical_addr = pci_resource_start(sep->pdev, 0); | ||
2778 | if (!sep->reg_physical_addr) { | ||
2779 | dev_warn(&sep->pdev->dev, "Error getting register start\n"); | ||
2780 | error = -ENODEV; | ||
2781 | goto end_function_free_sep_dev; | ||
2782 | } | ||
2783 | |||
2784 | sep->reg_physical_end = pci_resource_end(sep->pdev, 0); | ||
2785 | if (!sep->reg_physical_end) { | ||
2786 | dev_warn(&sep->pdev->dev, "Error getting register end\n"); | ||
2787 | error = -ENODEV; | ||
2788 | goto end_function_free_sep_dev; | ||
2789 | } | ||
2790 | |||
2791 | sep->reg_addr = ioremap_nocache(sep->reg_physical_addr, | ||
2792 | (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1)); | ||
2793 | if (!sep->reg_addr) { | ||
2794 | dev_warn(&sep->pdev->dev, "Error getting register virtual\n"); | ||
2795 | error = -ENODEV; | ||
2796 | goto end_function_free_sep_dev; | ||
2797 | } | ||
2798 | |||
2799 | dev_dbg(&sep->pdev->dev, | ||
2800 | "Register area start %llx end %llx virtual %p\n", | ||
2801 | (unsigned long long)sep->reg_physical_addr, | ||
2802 | (unsigned long long)sep->reg_physical_end, | ||
2803 | sep->reg_addr); | ||
2804 | |||
2805 | /* Allocate the shared area */ | ||
2806 | sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + | ||
2807 | SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES + | ||
2808 | SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + | ||
2809 | SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + | ||
2810 | SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES; | ||
2811 | |||
2812 | if (sep_map_and_alloc_shared_area(sep)) { | ||
2813 | error = -ENOMEM; | ||
2814 | /* Allocation failed */ | ||
2815 | goto end_function_error; | ||
2816 | } | ||
2817 | |||
2818 | /* Clear ICR register */ | ||
2819 | sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF); | ||
2820 | |||
2821 | /* Set the IMR register - open only GPR 2 */ | ||
2822 | sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13))); | ||
2823 | |||
2824 | /* Read send/receive counters from SEP */ | ||
2825 | sep->reply_ct = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR); | ||
2826 | sep->reply_ct &= 0x3FFFFFFF; | ||
2827 | sep->send_ct = sep->reply_ct; | ||
2828 | |||
2829 | /* Get the interrupt line */ | ||
2830 | error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, | ||
2831 | "sep_driver", sep); | ||
2832 | |||
2833 | if (error) | ||
2834 | goto end_function_deallocate_sep_shared_area; | ||
2835 | |||
2836 | /* The new chip requires a shared area reconfigure */ | ||
2837 | if (sep->pdev->revision == 4) { /* Only for new chip */ | ||
2838 | error = sep_reconfig_shared_area(sep); | ||
2839 | if (error) | ||
2840 | goto end_function_free_irq; | ||
2841 | } | ||
2842 | /* Finally magic up the device nodes */ | ||
2843 | /* Register driver with the fs */ | ||
2844 | error = sep_register_driver_with_fs(sep); | ||
2845 | if (error == 0) | ||
2846 | /* Success */ | ||
2847 | return 0; | ||
2848 | |||
2849 | end_function_free_irq: | ||
2850 | free_irq(pdev->irq, sep); | ||
2851 | |||
2852 | end_function_deallocate_sep_shared_area: | ||
2853 | /* De-allocate shared area */ | ||
2854 | sep_unmap_and_free_shared_area(sep); | ||
2855 | |||
2856 | end_function_error: | ||
2857 | iounmap(sep->reg_addr); | ||
2858 | |||
2859 | end_function_free_sep_dev: | ||
2860 | pci_dev_put(sep_dev->pdev); | ||
2861 | kfree(sep_dev); | ||
2862 | sep_dev = NULL; | ||
2863 | |||
2864 | end_function_disable_device: | ||
2865 | pci_disable_device(pdev); | ||
2866 | |||
2867 | end_function: | ||
2868 | return error; | ||
2869 | } | ||
2870 | |||
2871 | static void sep_remove(struct pci_dev *pdev) | ||
2872 | { | ||
2873 | struct sep_device *sep = sep_dev; | ||
2874 | |||
2875 | /* Unregister from fs */ | ||
2876 | misc_deregister(&sep->miscdev_sep); | ||
2877 | misc_deregister(&sep->miscdev_singleton); | ||
2878 | misc_deregister(&sep->miscdev_daemon); | ||
2879 | |||
2880 | /* Free the irq */ | ||
2881 | free_irq(sep->pdev->irq, sep); | ||
2882 | |||
2883 | /* Free the shared area */ | ||
2884 | sep_unmap_and_free_shared_area(sep_dev); | ||
2885 | iounmap((void *) sep_dev->reg_addr); | ||
2886 | } | ||
2887 | |||
2888 | static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = { | ||
2889 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)}, | ||
2890 | {0} | ||
2891 | }; | ||
2892 | |||
2893 | MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl); | ||
2894 | |||
2895 | /* Field for registering driver to PCI device */ | ||
2896 | static struct pci_driver sep_pci_driver = { | ||
2897 | .name = "sep_sec_driver", | ||
2898 | .id_table = sep_pci_id_tbl, | ||
2899 | .probe = sep_probe, | ||
2900 | .remove = sep_remove | ||
2901 | }; | ||
2902 | |||
2903 | |||
2904 | /** | ||
2905 | * sep_init - init function | ||
2906 | * | ||
2907 | * Module load time. Register the PCI device driver. | ||
2908 | */ | ||
2909 | static int __init sep_init(void) | ||
2910 | { | ||
2911 | return pci_register_driver(&sep_pci_driver); | ||
2912 | } | ||
2913 | |||
2914 | |||
2915 | /** | ||
2916 | * sep_exit - called to unload driver | ||
2917 | * | ||
2918 | * Drop the misc devices then remove and unmap the various resources | ||
2919 | * that are not released by the driver remove method. | ||
2920 | */ | ||
2921 | static void __exit sep_exit(void) | ||
2922 | { | ||
2923 | pci_unregister_driver(&sep_pci_driver); | ||
2924 | } | ||
2925 | |||
2926 | |||
2927 | module_init(sep_init); | ||
2928 | module_exit(sep_exit); | ||
2929 | |||
2930 | MODULE_LICENSE("GPL"); | ||