aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2010-08-23 13:28:31 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-08-23 21:15:38 -0400
commitd49824c06778830c82906884b94d94354c3bbdc8 (patch)
tree38cc9ffcf12e37c0a7d7459866bffb0239d109f3 /drivers/staging
parentf86b9984250fa2b71ce36d4693a939a58579583b (diff)
Staging: sep: remove driver
It's currently stalled and the original submitter recommended that it just be dropped at this point in time due. Cc: Alan Cox <alan@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/sep/Kconfig10
-rw-r--r--drivers/staging/sep/Makefile2
-rw-r--r--drivers/staging/sep/TODO8
-rw-r--r--drivers/staging/sep/sep_dev.h110
-rw-r--r--drivers/staging/sep/sep_driver.c2742
-rw-r--r--drivers/staging/sep/sep_driver_api.h425
-rw-r--r--drivers/staging/sep/sep_driver_config.h225
-rw-r--r--drivers/staging/sep/sep_driver_hw_defs.h232
10 files changed, 0 insertions, 3757 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4a7a7a7f11b6..335311a98fdc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -113,8 +113,6 @@ source "drivers/staging/vme/Kconfig"
113 113
114source "drivers/staging/memrar/Kconfig" 114source "drivers/staging/memrar/Kconfig"
115 115
116source "drivers/staging/sep/Kconfig"
117
118source "drivers/staging/iio/Kconfig" 116source "drivers/staging/iio/Kconfig"
119 117
120source "drivers/staging/zram/Kconfig" 118source "drivers/staging/zram/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index ca5c03eb3ce3..e3f1e1b6095e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_FB_UDL) += udlfb/
38obj-$(CONFIG_HYPERV) += hv/ 38obj-$(CONFIG_HYPERV) += hv/
39obj-$(CONFIG_VME_BUS) += vme/ 39obj-$(CONFIG_VME_BUS) += vme/
40obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/ 40obj-$(CONFIG_MRST_RAR_HANDLER) += memrar/
41obj-$(CONFIG_DX_SEP) += sep/
42obj-$(CONFIG_IIO) += iio/ 41obj-$(CONFIG_IIO) += iio/
43obj-$(CONFIG_ZRAM) += zram/ 42obj-$(CONFIG_ZRAM) += zram/
44obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/ 43obj-$(CONFIG_WLAGS49_H2) += wlags49_h2/
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
deleted file mode 100644
index 0a9c39c7f2bd..000000000000
--- a/drivers/staging/sep/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
1config DX_SEP
2 tristate "Discretix SEP driver"
3# depends on MRST
4 depends on RAR_REGISTER && PCI
5 default y
6 help
7 Discretix SEP driver
8
9 If unsure say M. The compiled module will be
10 called sep_driver.ko
diff --git a/drivers/staging/sep/Makefile b/drivers/staging/sep/Makefile
deleted file mode 100644
index 628d5f919414..000000000000
--- a/drivers/staging/sep/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1obj-$(CONFIG_DX_SEP) := sep_driver.o
2
diff --git a/drivers/staging/sep/TODO b/drivers/staging/sep/TODO
deleted file mode 100644
index ff0e931dab64..000000000000
--- a/drivers/staging/sep/TODO
+++ /dev/null
@@ -1,8 +0,0 @@
1Todo's so far (from Alan Cox)
2- Fix firmware loading
3- Get firmware into firmware git tree
4- Review and tidy each algorithm function
5- Check whether it can be plugged into any of the kernel crypto API
6 interfaces
7- Do something about the magic shared memory interface and replace it
8 with something saner (in Linux terms)
diff --git a/drivers/staging/sep/sep_dev.h b/drivers/staging/sep/sep_dev.h
deleted file mode 100644
index 9200524bb64d..000000000000
--- a/drivers/staging/sep/sep_dev.h
+++ /dev/null
@@ -1,110 +0,0 @@
1#ifndef __SEP_DEV_H__
2#define __SEP_DEV_H__
3
4/*
5 *
6 * sep_dev.h - Security Processor Device Structures
7 *
8 * Copyright(c) 2009 Intel Corporation. All rights reserved.
9 * Copyright(c) 2009 Discretix. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc., 59
23 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 * CONTACTS:
26 *
27 * Alan Cox alan@linux.intel.com
28 *
29 */
30
31struct sep_device {
32 /* pointer to pci dev */
33 struct pci_dev *pdev;
34
35 unsigned long in_use;
36
37 /* address of the shared memory allocated during init for SEP driver
38 (coherent alloc) */
39 void *shared_addr;
40 /* the physical address of the shared area */
41 dma_addr_t shared_bus;
42
43 /* restricted access region (coherent alloc) */
44 dma_addr_t rar_bus;
45 void *rar_addr;
46 /* firmware regions: cache is at rar_addr */
47 unsigned long cache_size;
48
49 /* follows the cache */
50 dma_addr_t resident_bus;
51 unsigned long resident_size;
52 void *resident_addr;
53
54 /* start address of the access to the SEP registers from driver */
55 void __iomem *reg_addr;
56 /* transaction counter that coordinates the transactions between SEP and HOST */
57 unsigned long send_ct;
58 /* counter for the messages from sep */
59 unsigned long reply_ct;
60 /* counter for the number of bytes allocated in the pool for the current
61 transaction */
62 unsigned long data_pool_bytes_allocated;
63
64 /* array of pointers to the pages that represent input data for the synchronic
65 DMA action */
66 struct page **in_page_array;
67
68 /* array of pointers to the pages that represent out data for the synchronic
69 DMA action */
70 struct page **out_page_array;
71
72 /* number of pages in the sep_in_page_array */
73 unsigned long in_num_pages;
74
75 /* number of pages in the sep_out_page_array */
76 unsigned long out_num_pages;
77
78 /* global data for every flow */
79 struct sep_flow_context_t flows[SEP_DRIVER_NUM_FLOWS];
80
81 /* pointer to the workqueue that handles the flow done interrupts */
82 struct workqueue_struct *flow_wq;
83
84};
85
86static struct sep_device *sep_dev;
87
88static inline void sep_write_reg(struct sep_device *dev, int reg, u32 value)
89{
90 void __iomem *addr = dev->reg_addr + reg;
91 writel(value, addr);
92}
93
94static inline u32 sep_read_reg(struct sep_device *dev, int reg)
95{
96 void __iomem *addr = dev->reg_addr + reg;
97 return readl(addr);
98}
99
100/* wait for SRAM write complete(indirect write */
101static inline void sep_wait_sram_write(struct sep_device *dev)
102{
103 u32 reg_val;
104 do
105 reg_val = sep_read_reg(dev, HW_SRAM_DATA_READY_REG_ADDR);
106 while (!(reg_val & 1));
107}
108
109
110#endif
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
deleted file mode 100644
index ecbde3467b1b..000000000000
--- a/drivers/staging/sep/sep_driver.c
+++ /dev/null
@@ -1,2742 +0,0 @@
1/*
2 *
3 * sep_driver.c - Security Processor Driver main group of functions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#include <linux/init.h>
33#include <linux/module.h>
34#include <linux/fs.h>
35#include <linux/cdev.h>
36#include <linux/kdev_t.h>
37#include <linux/mutex.h>
38#include <linux/sched.h>
39#include <linux/mm.h>
40#include <linux/poll.h>
41#include <linux/wait.h>
42#include <linux/pci.h>
43#include <linux/firmware.h>
44#include <linux/slab.h>
45#include <asm/ioctl.h>
46#include <linux/ioport.h>
47#include <asm/io.h>
48#include <linux/interrupt.h>
49#include <linux/pagemap.h>
50#include <asm/cacheflush.h>
51#include "sep_driver_hw_defs.h"
52#include "sep_driver_config.h"
53#include "sep_driver_api.h"
54#include "sep_dev.h"
55
56#if SEP_DRIVER_ARM_DEBUG_MODE
57
58#define CRYS_SEP_ROM_length 0x4000
59#define CRYS_SEP_ROM_start_address 0x8000C000UL
60#define CRYS_SEP_ROM_start_address_offset 0xC000UL
61#define SEP_ROM_BANK_register 0x80008420UL
62#define SEP_ROM_BANK_register_offset 0x8420UL
63#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64
65/*
66 * THESE 2 definitions are specific to the board - must be
67 * defined during integration
68 */
69#define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
70
71/* 2M size */
72
73static void sep_load_rom_code(struct sep_device *sep)
74{
75 /* Index variables */
76 unsigned long i, k, j;
77 u32 reg;
78 u32 error;
79 u32 warning;
80
81 /* Loading ROM from SEP_ROM_image.h file */
82 k = sizeof(CRYS_SEP_ROM);
83
84 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
85
86 edbg("SEP Driver: k is %lu\n", k);
87 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
88 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
89
90 for (i = 0; i < 4; i++) {
91 /* write bank */
92 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
93
94 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
95 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
96
97 k = k - 4;
98
99 if (k == 0) {
100 j = CRYS_SEP_ROM_length;
101 i = 4;
102 }
103 }
104 }
105
106 /* reset the SEP */
107 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
108
109 /* poll for SEP ROM boot finish */
110 do
111 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
112 while (!reg);
113
114 edbg("SEP Driver: ROM polling ended\n");
115
116 switch (reg) {
117 case 0x1:
118 /* fatal error - read erro status from GPRO */
119 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
120 edbg("SEP Driver: ROM polling case 1\n");
121 break;
122 case 0x4:
123 /* Cold boot ended successfully */
124 case 0x8:
125 /* Warmboot ended successfully */
126 case 0x10:
127 /* ColdWarm boot ended successfully */
128 error = 0;
129 case 0x2:
130 /* Boot First Phase ended */
131 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
132 case 0x20:
133 edbg("SEP Driver: ROM polling case %d\n", reg);
134 break;
135 }
136
137}
138
139#else
140static void sep_load_rom_code(struct sep_device *sep) { }
141#endif /* SEP_DRIVER_ARM_DEBUG_MODE */
142
143
144
145/*----------------------------------------
146 DEFINES
147-----------------------------------------*/
148
149#define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
150#define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
151
152/*--------------------------------------------
153 GLOBAL variables
154--------------------------------------------*/
155
156/* debug messages level */
157static int debug;
158module_param(debug, int , 0);
159MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
160
161/* Keep this a single static object for now to keep the conversion easy */
162
163static struct sep_device sep_instance;
164static struct sep_device *sep_dev = &sep_instance;
165
166/*
167 mutex for the access to the internals of the sep driver
168*/
169static DEFINE_MUTEX(sep_mutex);
170
171
172/* wait queue head (event) of the driver */
173static DECLARE_WAIT_QUEUE_HEAD(sep_event);
174
175/**
176 * sep_load_firmware - copy firmware cache/resident
177 * @sep: device we are loading
178 *
179 * This functions copies the cache and resident from their source
180 * location into destination shared memory.
181 */
182
183static int sep_load_firmware(struct sep_device *sep)
184{
185 const struct firmware *fw;
186 char *cache_name = "sep/cache.image.bin";
187 char *res_name = "sep/resident.image.bin";
188 int error;
189
190 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
191 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
192
193 /* load cache */
194 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
195 if (error) {
196 edbg("SEP Driver:cant request cache fw\n");
197 return error;
198 }
199 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
200
201 memcpy(sep->rar_addr, (void *)fw->data, fw->size);
202 sep->cache_size = fw->size;
203 release_firmware(fw);
204
205 sep->resident_bus = sep->rar_bus + sep->cache_size;
206 sep->resident_addr = sep->rar_addr + sep->cache_size;
207
208 /* load resident */
209 error = request_firmware(&fw, res_name, &sep->pdev->dev);
210 if (error) {
211 edbg("SEP Driver:cant request res fw\n");
212 return error;
213 }
214 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
215
216 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
217 sep->resident_size = fw->size;
218 release_firmware(fw);
219
220 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
221 sep->resident_addr, (unsigned long long)sep->resident_bus,
222 sep->rar_addr, (unsigned long long)sep->rar_bus);
223 return 0;
224}
225
226MODULE_FIRMWARE("sep/cache.image.bin");
227MODULE_FIRMWARE("sep/resident.image.bin");
228
229/**
230 * sep_map_and_alloc_shared_area - allocate shared block
231 * @sep: security processor
232 * @size: size of shared area
233 *
234 * Allocate a shared buffer in host memory that can be used by both the
235 * kernel and also the hardware interface via DMA.
236 */
237
238static int sep_map_and_alloc_shared_area(struct sep_device *sep,
239 unsigned long size)
240{
241 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
242 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
243 &sep->shared_bus, GFP_KERNEL);
244
245 if (!sep->shared_addr) {
246 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
247 return -ENOMEM;
248 }
249 /* set the bus address of the shared area */
250 edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
251 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
252 return 0;
253}
254
255/**
256 * sep_unmap_and_free_shared_area - free shared block
257 * @sep: security processor
258 *
259 * Free the shared area allocated to the security processor. The
260 * processor must have finished with this and any final posted
261 * writes cleared before we do so.
262 */
263static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
264{
265 dma_free_coherent(&sep->pdev->dev, size,
266 sep->shared_addr, sep->shared_bus);
267}
268
269/**
270 * sep_shared_virt_to_bus - convert bus/virt addresses
271 *
272 * Returns the bus address inside the shared area according
273 * to the virtual address.
274 */
275
276static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
277 void *virt_address)
278{
279 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
280 edbg("sep: virt to bus b %08llx v %p\n", (unsigned long long) pa,
281 virt_address);
282 return pa;
283}
284
285/**
286 * sep_shared_bus_to_virt - convert bus/virt addresses
287 *
288 * Returns virtual address inside the shared area according
289 * to the bus address.
290 */
291
292static void *sep_shared_bus_to_virt(struct sep_device *sep,
293 dma_addr_t bus_address)
294{
295 return sep->shared_addr + (bus_address - sep->shared_bus);
296}
297
298
299/**
300 * sep_try_open - attempt to open a SEP device
301 * @sep: device to attempt to open
302 *
303 * Atomically attempt to get ownership of a SEP device.
304 * Returns 1 if the device was opened, 0 on failure.
305 */
306
307static int sep_try_open(struct sep_device *sep)
308{
309 if (!test_and_set_bit(0, &sep->in_use))
310 return 1;
311 return 0;
312}
313
314/**
315 * sep_open - device open method
316 * @inode: inode of sep device
317 * @filp: file handle to sep device
318 *
319 * Open method for the SEP device. Called when userspace opens
320 * the SEP device node. Must also release the memory data pool
321 * allocations.
322 *
323 * Returns zero on success otherwise an error code.
324 */
325
326static int sep_open(struct inode *inode, struct file *filp)
327{
328 if (sep_dev == NULL)
329 return -ENODEV;
330
331 /* check the blocking mode */
332 if (filp->f_flags & O_NDELAY) {
333 if (sep_try_open(sep_dev) == 0)
334 return -EAGAIN;
335 } else
336 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
337 return -EINTR;
338
339 /* Bind to the device, we only have one which makes it easy */
340 filp->private_data = sep_dev;
341 /* release data pool allocations */
342 sep_dev->data_pool_bytes_allocated = 0;
343 return 0;
344}
345
346
347/**
348 * sep_release - close a SEP device
349 * @inode: inode of SEP device
350 * @filp: file handle being closed
351 *
352 * Called on the final close of a SEP device. As the open protects against
353 * multiple simultaenous opens that means this method is called when the
354 * final reference to the open handle is dropped.
355 */
356
357static int sep_release(struct inode *inode, struct file *filp)
358{
359 struct sep_device *sep = filp->private_data;
360#if 0 /*!SEP_DRIVER_POLLING_MODE */
361 /* close IMR */
362 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
363 /* release IRQ line */
364 free_irq(SEP_DIRVER_IRQ_NUM, sep);
365
366#endif
367 /* Ensure any blocked open progresses */
368 clear_bit(0, &sep->in_use);
369 wake_up(&sep_event);
370 return 0;
371}
372
373/*---------------------------------------------------------------
374 map function - this functions maps the message shared area
375-----------------------------------------------------------------*/
376static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
377{
378 dma_addr_t bus_addr;
379 struct sep_device *sep = filp->private_data;
380
381 dbg("-------->SEP Driver: mmap start\n");
382
383 /* check that the size of the mapped range is as the size of the message
384 shared area */
385 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
386 edbg("SEP Driver mmap requested size is more than allowed\n");
387 printk(KERN_WARNING "SEP Driver mmap requested size is more than allowed\n");
388 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
389 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
390 return -EAGAIN;
391 }
392
393 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
394
395 /* get bus address */
396 bus_addr = sep->shared_bus;
397
398 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
399
400 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
401 edbg("SEP Driver remap_page_range failed\n");
402 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
403 return -EAGAIN;
404 }
405
406 dbg("SEP Driver:<-------- mmap end\n");
407
408 return 0;
409}
410
411
412/*-----------------------------------------------
413 poll function
414*----------------------------------------------*/
415static unsigned int sep_poll(struct file *filp, poll_table * wait)
416{
417 unsigned long count;
418 unsigned int mask = 0;
419 unsigned long retval = 0; /* flow id */
420 struct sep_device *sep = filp->private_data;
421
422 dbg("---------->SEP Driver poll: start\n");
423
424
425#if SEP_DRIVER_POLLING_MODE
426
427 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
428 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
429
430 for (count = 0; count < 10 * 4; count += 4)
431 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
432 }
433
434 sep->reply_ct++;
435#else
436 /* add the event to the polling wait table */
437 poll_wait(filp, &sep_event, wait);
438
439#endif
440
441 edbg("sep->send_ct is %lu\n", sep->send_ct);
442 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
443
444 /* check if the data is ready */
445 if (sep->send_ct == sep->reply_ct) {
446 for (count = 0; count < 12 * 4; count += 4)
447 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
448
449 for (count = 0; count < 10 * 4; count += 4)
450 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
451
452 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
453 edbg("retval is %lu\n", retval);
454 /* check if the this is sep reply or request */
455 if (retval >> 31) {
456 edbg("SEP Driver: sep request in\n");
457 /* request */
458 mask |= POLLOUT | POLLWRNORM;
459 } else {
460 edbg("SEP Driver: sep reply in\n");
461 mask |= POLLIN | POLLRDNORM;
462 }
463 }
464 dbg("SEP Driver:<-------- poll exit\n");
465 return mask;
466}
467
468/**
469 * sep_time_address - address in SEP memory of time
470 * @sep: SEP device we want the address from
471 *
472 * Return the address of the two dwords in memory used for time
473 * setting.
474 */
475
476static u32 *sep_time_address(struct sep_device *sep)
477{
478 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
479}
480
481/**
482 * sep_set_time - set the SEP time
483 * @sep: the SEP we are setting the time for
484 *
485 * Calculates time and sets it at the predefined address.
486 * Called with the sep mutex held.
487 */
488static unsigned long sep_set_time(struct sep_device *sep)
489{
490 struct timeval time;
491 u32 *time_addr; /* address of time as seen by the kernel */
492
493
494 dbg("sep:sep_set_time start\n");
495
496 do_gettimeofday(&time);
497
498 /* set value in the SYSTEM MEMORY offset */
499 time_addr = sep_time_address(sep);
500
501 time_addr[0] = SEP_TIME_VAL_TOKEN;
502 time_addr[1] = time.tv_sec;
503
504 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
505 edbg("SEP Driver:time_addr is %p\n", time_addr);
506 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
507
508 return time.tv_sec;
509}
510
511/**
512 * sep_dump_message - dump the message that is pending
513 * @sep: sep device
514 *
515 * Dump out the message pending in the shared message area
516 */
517
518static void sep_dump_message(struct sep_device *sep)
519{
520 int count;
521 for (count = 0; count < 12 * 4; count += 4)
522 edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
523}
524
525/**
526 * sep_send_command_handler - kick off a command
527 * @sep: sep being signalled
528 *
529 * This function raises interrupt to SEP that signals that is has a new
530 * command from the host
531 */
532
533static void sep_send_command_handler(struct sep_device *sep)
534{
535 dbg("sep:sep_send_command_handler start\n");
536
537 mutex_lock(&sep_mutex);
538 sep_set_time(sep);
539
540 /* FIXME: flush cache */
541 flush_cache_all();
542
543 sep_dump_message(sep);
544 /* update counter */
545 sep->send_ct++;
546 /* send interrupt to SEP */
547 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
548 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
549 mutex_unlock(&sep_mutex);
550 return;
551}
552
553/**
554 * sep_send_reply_command_handler - kick off a command reply
555 * @sep: sep being signalled
556 *
557 * This function raises interrupt to SEP that signals that is has a new
558 * command from the host
559 */
560
561static void sep_send_reply_command_handler(struct sep_device *sep)
562{
563 dbg("sep:sep_send_reply_command_handler start\n");
564
565 /* flash cache */
566 flush_cache_all();
567
568 sep_dump_message(sep);
569
570 mutex_lock(&sep_mutex);
571 sep->send_ct++; /* update counter */
572 /* send the interrupt to SEP */
573 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
574 /* update both counters */
575 sep->send_ct++;
576 sep->reply_ct++;
577 mutex_unlock(&sep_mutex);
578 dbg("sep: sep_send_reply_command_handler end\n");
579}
580
581/*
582 This function handles the allocate data pool memory request
583 This function returns calculates the bus address of the
584 allocated memory, and the offset of this area from the mapped address.
585 Therefore, the FVOs in user space can calculate the exact virtual
586 address of this allocated memory
587*/
588static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
589 unsigned long arg)
590{
591 int error;
592 struct sep_driver_alloc_t command_args;
593
594 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
595
596 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
597 if (error) {
598 error = -EFAULT;
599 goto end_function;
600 }
601
602 /* allocate memory */
603 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
604 error = -ENOMEM;
605 goto end_function;
606 }
607
608 /* set the virtual and bus address */
609 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
610 command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
611
612 /* write the memory back to the user space */
613 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
614 if (error) {
615 error = -EFAULT;
616 goto end_function;
617 }
618
619 /* set the allocation */
620 sep->data_pool_bytes_allocated += command_args.num_bytes;
621
622end_function:
623 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
624 return error;
625}
626
627/*
628 This function handles write into allocated data pool command
629*/
630static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
631{
632 int error;
633 void *virt_address;
634 unsigned long va;
635 unsigned long app_in_address;
636 unsigned long num_bytes;
637 void *data_pool_area_addr;
638
639 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
640
641 /* get the application address */
642 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
643 if (error)
644 goto end_function;
645
646 /* get the virtual kernel address address */
647 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
648 if (error)
649 goto end_function;
650 virt_address = (void *)va;
651
652 /* get the number of bytes */
653 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
654 if (error)
655 goto end_function;
656
657 /* calculate the start of the data pool */
658 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
659
660
661 /* check that the range of the virtual kernel address is correct */
662 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
663 error = -EINVAL;
664 goto end_function;
665 }
666 /* copy the application data */
667 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
668 if (error)
669 error = -EFAULT;
670end_function:
671 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
672 return error;
673}
674
675/*
676 this function handles the read from data pool command
677*/
678static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
679{
680 int error;
681 /* virtual address of dest application buffer */
682 unsigned long app_out_address;
683 /* virtual address of the data pool */
684 unsigned long va;
685 void *virt_address;
686 unsigned long num_bytes;
687 void *data_pool_area_addr;
688
689 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
690
691 /* get the application address */
692 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
693 if (error)
694 goto end_function;
695
696 /* get the virtual kernel address address */
697 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
698 if (error)
699 goto end_function;
700 virt_address = (void *)va;
701
702 /* get the number of bytes */
703 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
704 if (error)
705 goto end_function;
706
707 /* calculate the start of the data pool */
708 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
709
710 /* FIXME: These are incomplete all over the driver: what about + len
711 and when doing that also overflows */
712 /* check that the range of the virtual kernel address is correct */
713 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
714 error = -EINVAL;
715 goto end_function;
716 }
717
718 /* copy the application data */
719 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
720 if (error)
721 error = -EFAULT;
722end_function:
723 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
724 return error;
725}
726
727/*
728 This function releases all the application virtual buffer physical pages,
729 that were previously locked
730*/
731static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
732{
733 unsigned long count;
734
735 if (dirtyFlag) {
736 for (count = 0; count < num_pages; count++) {
737 /* the out array was written, therefore the data was changed */
738 if (!PageReserved(page_array_ptr[count]))
739 SetPageDirty(page_array_ptr[count]);
740 page_cache_release(page_array_ptr[count]);
741 }
742 } else {
743 /* free in pages - the data was only read, therefore no update was done
744 on those pages */
745 for (count = 0; count < num_pages; count++)
746 page_cache_release(page_array_ptr[count]);
747 }
748
749 if (page_array_ptr)
750 /* free the array */
751 kfree(page_array_ptr);
752
753 return 0;
754}
755
756/*
757 This function locks all the physical pages of the kernel virtual buffer
758 and construct a basic lli array, where each entry holds the physical
759 page address and the size that application data holds in this physical pages
760*/
761static int sep_lock_kernel_pages(struct sep_device *sep,
762 unsigned long kernel_virt_addr,
763 unsigned long data_size,
764 unsigned long *num_pages_ptr,
765 struct sep_lli_entry_t **lli_array_ptr,
766 struct page ***page_array_ptr)
767{
768 int error = 0;
769 /* the the page of the end address of the user space buffer */
770 unsigned long end_page;
771 /* the page of the start address of the user space buffer */
772 unsigned long start_page;
773 /* the range in pages */
774 unsigned long num_pages;
775 struct sep_lli_entry_t *lli_array;
776 /* next kernel address to map */
777 unsigned long next_kernel_address;
778 unsigned long count;
779
780 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
781
782 /* set start and end pages and num pages */
783 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
784 start_page = kernel_virt_addr >> PAGE_SHIFT;
785 num_pages = end_page - start_page + 1;
786
787 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
788 edbg("SEP Driver: data_size is %lu\n", data_size);
789 edbg("SEP Driver: start_page is %lx\n", start_page);
790 edbg("SEP Driver: end_page is %lx\n", end_page);
791 edbg("SEP Driver: num_pages is %lu\n", num_pages);
792
793 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
794 if (!lli_array) {
795 edbg("SEP Driver: kmalloc for lli_array failed\n");
796 error = -ENOMEM;
797 goto end_function;
798 }
799
800 /* set the start address of the first page - app data may start not at
801 the beginning of the page */
802 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
803
804 /* check that not all the data is in the first page only */
805 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
806 lli_array[0].block_size = data_size;
807 else
808 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
809
810 /* debug print */
811 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
812
813 /* advance the address to the start of the next page */
814 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
815
816 /* go from the second page to the prev before last */
817 for (count = 1; count < (num_pages - 1); count++) {
818 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
819 lli_array[count].block_size = PAGE_SIZE;
820
821 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
822 next_kernel_address += PAGE_SIZE;
823 }
824
825 /* if more then 1 pages locked - then update for the last page size needed */
826 if (num_pages > 1) {
827 /* update the address of the last page */
828 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
829
830 /* set the size of the last page */
831 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
832
833 if (lli_array[count].block_size == 0) {
834 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
835 dbg("data_size is %lu\n", data_size);
836 while (1);
837 }
838
839 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
840 }
841 /* set output params */
842 *lli_array_ptr = lli_array;
843 *num_pages_ptr = num_pages;
844 *page_array_ptr = 0;
845end_function:
846 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
847 return 0;
848}
849
850/*
851 This function locks all the physical pages of the application virtual buffer
852 and construct a basic lli array, where each entry holds the physical page
853 address and the size that application data holds in this physical pages
854*/
855static int sep_lock_user_pages(struct sep_device *sep,
856 unsigned long app_virt_addr,
857 unsigned long data_size,
858 unsigned long *num_pages_ptr,
859 struct sep_lli_entry_t **lli_array_ptr,
860 struct page ***page_array_ptr)
861{
862 int error = 0;
863 /* the the page of the end address of the user space buffer */
864 unsigned long end_page;
865 /* the page of the start address of the user space buffer */
866 unsigned long start_page;
867 /* the range in pages */
868 unsigned long num_pages;
869 struct page **page_array;
870 struct sep_lli_entry_t *lli_array;
871 unsigned long count;
872 int result;
873
874 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
875
876 /* set start and end pages and num pages */
877 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
878 start_page = app_virt_addr >> PAGE_SHIFT;
879 num_pages = end_page - start_page + 1;
880
881 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
882 edbg("SEP Driver: data_size is %lu\n", data_size);
883 edbg("SEP Driver: start_page is %lu\n", start_page);
884 edbg("SEP Driver: end_page is %lu\n", end_page);
885 edbg("SEP Driver: num_pages is %lu\n", num_pages);
886
887 /* allocate array of pages structure pointers */
888 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
889 if (!page_array) {
890 edbg("SEP Driver: kmalloc for page_array failed\n");
891
892 error = -ENOMEM;
893 goto end_function;
894 }
895
896 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
897 if (!lli_array) {
898 edbg("SEP Driver: kmalloc for lli_array failed\n");
899
900 error = -ENOMEM;
901 goto end_function_with_error1;
902 }
903
904 /* convert the application virtual address into a set of physical */
905 down_read(&current->mm->mmap_sem);
906 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
907 up_read(&current->mm->mmap_sem);
908
909 /* check the number of pages locked - if not all then exit with error */
910 if (result != num_pages) {
911 dbg("SEP Driver: not all pages locked by get_user_pages\n");
912
913 error = -ENOMEM;
914 goto end_function_with_error2;
915 }
916
917 /* flush the cache */
918 for (count = 0; count < num_pages; count++)
919 flush_dcache_page(page_array[count]);
920
921 /* set the start address of the first page - app data may start not at
922 the beginning of the page */
923 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
924
925 /* check that not all the data is in the first page only */
926 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
927 lli_array[0].block_size = data_size;
928 else
929 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
930
931 /* debug print */
932 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
933
934 /* go from the second page to the prev before last */
935 for (count = 1; count < (num_pages - 1); count++) {
936 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
937 lli_array[count].block_size = PAGE_SIZE;
938
939 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
940 }
941
942 /* if more then 1 pages locked - then update for the last page size needed */
943 if (num_pages > 1) {
944 /* update the address of the last page */
945 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
946
947 /* set the size of the last page */
948 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
949
950 if (lli_array[count].block_size == 0) {
951 dbg("app_virt_addr is %08lx\n", app_virt_addr);
952 dbg("data_size is %lu\n", data_size);
953 while (1);
954 }
955 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n",
956 count, lli_array[count].physical_address,
957 count, lli_array[count].block_size);
958 }
959
960 /* set output params */
961 *lli_array_ptr = lli_array;
962 *num_pages_ptr = num_pages;
963 *page_array_ptr = page_array;
964 goto end_function;
965
966end_function_with_error2:
967 /* release the cache */
968 for (count = 0; count < num_pages; count++)
969 page_cache_release(page_array[count]);
970 kfree(lli_array);
971end_function_with_error1:
972 kfree(page_array);
973end_function:
974 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
975 return 0;
976}
977
978
979/*
980 this function calculates the size of data that can be inserted into the lli
981 table from this array the condition is that either the table is full
982 (all etnries are entered), or there are no more entries in the lli array
983*/
984static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
985{
986 unsigned long table_data_size = 0;
987 unsigned long counter;
988
989 /* calculate the data in the out lli table if till we fill the whole
990 table or till the data has ended */
991 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
992 table_data_size += lli_in_array_ptr[counter].block_size;
993 return table_data_size;
994}
995
996/*
997 this functions builds ont lli table from the lli_array according to
998 the given size of data
999*/
1000static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
1001{
1002 unsigned long curr_table_data_size;
1003 /* counter of lli array entry */
1004 unsigned long array_counter;
1005
1006 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1007
1008 /* init currrent table data size and lli array entry counter */
1009 curr_table_data_size = 0;
1010 array_counter = 0;
1011 *num_table_entries_ptr = 1;
1012
1013 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1014
1015 /* fill the table till table size reaches the needed amount */
1016 while (curr_table_data_size < table_data_size) {
1017 /* update the number of entries in table */
1018 (*num_table_entries_ptr)++;
1019
1020 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1021 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1022 curr_table_data_size += lli_table_ptr->block_size;
1023
1024 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1025 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1026 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1027
1028 /* check for overflow of the table data */
1029 if (curr_table_data_size > table_data_size) {
1030 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1031
1032 /* update the size of block in the table */
1033 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1034
1035 /* update the physical address in the lli array */
1036 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1037
1038 /* update the block size left in the lli array */
1039 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1040 } else
1041 /* advance to the next entry in the lli_array */
1042 array_counter++;
1043
1044 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1045 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1046
1047 /* move to the next entry in table */
1048 lli_table_ptr++;
1049 }
1050
1051 /* set the info entry to default */
1052 lli_table_ptr->physical_address = 0xffffffff;
1053 lli_table_ptr->block_size = 0;
1054
1055 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1056 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1057 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1058
1059 /* set the output parameter */
1060 *num_processed_entries_ptr += array_counter;
1061
1062 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1063 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1064 return;
1065}
1066
1067/*
1068 this function goes over the list of the print created tables and
1069 prints all the data
1070*/
1071static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1072{
1073 unsigned long table_count;
1074 unsigned long entries_count;
1075
1076 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1077
1078 table_count = 1;
1079 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1080 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1081 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1082
1083 /* print entries of the table (without info entry) */
1084 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1085 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1086 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1087 }
1088
1089 /* point to the info entry */
1090 lli_table_ptr--;
1091
1092 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1093 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1094
1095
1096 table_data_size = lli_table_ptr->block_size & 0xffffff;
1097 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1098 lli_table_ptr = (struct sep_lli_entry_t *)
1099 (lli_table_ptr->physical_address);
1100
1101 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1102
1103 if ((unsigned long) lli_table_ptr != 0xffffffff)
1104 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1105
1106 table_count++;
1107 }
1108 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1109}
1110
1111
1112/*
1113 This function prepares only input DMA table for synhronic symmetric
1114 operations (HASH)
1115*/
1116static int sep_prepare_input_dma_table(struct sep_device *sep,
1117 unsigned long app_virt_addr,
1118 unsigned long data_size,
1119 unsigned long block_size,
1120 unsigned long *lli_table_ptr,
1121 unsigned long *num_entries_ptr,
1122 unsigned long *table_data_size_ptr,
1123 bool isKernelVirtualAddress)
1124{
1125 /* pointer to the info entry of the table - the last entry */
1126 struct sep_lli_entry_t *info_entry_ptr;
1127 /* array of pointers ot page */
1128 struct sep_lli_entry_t *lli_array_ptr;
1129 /* points to the first entry to be processed in the lli_in_array */
1130 unsigned long current_entry;
1131 /* num entries in the virtual buffer */
1132 unsigned long sep_lli_entries;
1133 /* lli table pointer */
1134 struct sep_lli_entry_t *in_lli_table_ptr;
1135 /* the total data in one table */
1136 unsigned long table_data_size;
1137 /* number of entries in lli table */
1138 unsigned long num_entries_in_table;
1139 /* next table address */
1140 void *lli_table_alloc_addr;
1141 unsigned long result;
1142
1143 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1144
1145 edbg("SEP Driver:data_size is %lu\n", data_size);
1146 edbg("SEP Driver:block_size is %lu\n", block_size);
1147
1148 /* initialize the pages pointers */
1149 sep->in_page_array = 0;
1150 sep->in_num_pages = 0;
1151
1152 if (data_size == 0) {
1153 /* special case - created 2 entries table with zero data */
1154 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1155 /* FIXME: Should the entry below not be for _bus */
1156 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1157 in_lli_table_ptr->block_size = 0;
1158
1159 in_lli_table_ptr++;
1160 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1161 in_lli_table_ptr->block_size = 0;
1162
1163 *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1164 *num_entries_ptr = 2;
1165 *table_data_size_ptr = 0;
1166
1167 goto end_function;
1168 }
1169
1170 /* check if the pages are in Kernel Virtual Address layout */
1171 if (isKernelVirtualAddress == true)
1172 /* lock the pages of the kernel buffer and translate them to pages */
1173 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1174 else
1175 /* lock the pages of the user buffer and translate them to pages */
1176 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1177
1178 if (result)
1179 return result;
1180
1181 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1182
1183 current_entry = 0;
1184 info_entry_ptr = 0;
1185 sep_lli_entries = sep->in_num_pages;
1186
1187 /* initiate to point after the message area */
1188 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1189
1190 /* loop till all the entries in in array are not processed */
1191 while (current_entry < sep_lli_entries) {
1192 /* set the new input and output tables */
1193 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1194
1195 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1196
1197 /* calculate the maximum size of data for input table */
1198 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1199
1200 /* now calculate the table size so that it will be module block size */
1201 table_data_size = (table_data_size / block_size) * block_size;
1202
1203 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1204
1205 /* construct input lli table */
1206 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1207
1208 if (info_entry_ptr == 0) {
1209 /* set the output parameters to physical addresses */
1210 *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1211 *num_entries_ptr = num_entries_in_table;
1212 *table_data_size_ptr = table_data_size;
1213
1214 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1215 } else {
1216 /* update the info entry of the previous in table */
1217 info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1218 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1219 }
1220
1221 /* save the pointer to the info entry of the current tables */
1222 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1223 }
1224
1225 /* print input tables */
1226 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1227 sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1228
1229 /* the array of the pages */
1230 kfree(lli_array_ptr);
1231end_function:
1232 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1233 return 0;
1234
1235}
1236
1237/*
1238 This function creates the input and output dma tables for
1239 symmetric operations (AES/DES) according to the block size from LLI arays
1240*/
1241static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1242 struct sep_lli_entry_t *lli_in_array,
1243 unsigned long sep_in_lli_entries,
1244 struct sep_lli_entry_t *lli_out_array,
1245 unsigned long sep_out_lli_entries,
1246 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1247{
1248 /* points to the area where next lli table can be allocated: keep void *
1249 as there is pointer scaling to fix otherwise */
1250 void *lli_table_alloc_addr;
1251 /* input lli table */
1252 struct sep_lli_entry_t *in_lli_table_ptr;
1253 /* output lli table */
1254 struct sep_lli_entry_t *out_lli_table_ptr;
1255 /* pointer to the info entry of the table - the last entry */
1256 struct sep_lli_entry_t *info_in_entry_ptr;
1257 /* pointer to the info entry of the table - the last entry */
1258 struct sep_lli_entry_t *info_out_entry_ptr;
1259 /* points to the first entry to be processed in the lli_in_array */
1260 unsigned long current_in_entry;
1261 /* points to the first entry to be processed in the lli_out_array */
1262 unsigned long current_out_entry;
1263 /* max size of the input table */
1264 unsigned long in_table_data_size;
1265 /* max size of the output table */
1266 unsigned long out_table_data_size;
1267 /* flag te signifies if this is the first tables build from the arrays */
1268 unsigned long first_table_flag;
1269 /* the data size that should be in table */
1270 unsigned long table_data_size;
1271 /* number of etnries in the input table */
1272 unsigned long num_entries_in_table;
1273 /* number of etnries in the output table */
1274 unsigned long num_entries_out_table;
1275
1276 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1277
1278 /* initiate to pint after the message area */
1279 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1280
1281 current_in_entry = 0;
1282 current_out_entry = 0;
1283 first_table_flag = 1;
1284 info_in_entry_ptr = 0;
1285 info_out_entry_ptr = 0;
1286
1287 /* loop till all the entries in in array are not processed */
1288 while (current_in_entry < sep_in_lli_entries) {
1289 /* set the new input and output tables */
1290 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1291
1292 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1293
1294 /* set the first output tables */
1295 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1296
1297 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1298
1299 /* calculate the maximum size of data for input table */
1300 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1301
1302 /* calculate the maximum size of data for output table */
1303 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1304
1305 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1306 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1307
1308 /* check where the data is smallest */
1309 table_data_size = in_table_data_size;
1310 if (table_data_size > out_table_data_size)
1311 table_data_size = out_table_data_size;
1312
1313 /* now calculate the table size so that it will be module block size */
1314 table_data_size = (table_data_size / block_size) * block_size;
1315
1316 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1317
1318 /* construct input lli table */
1319 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1320
1321 /* construct output lli table */
1322 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1323
1324 /* if info entry is null - this is the first table built */
1325 if (info_in_entry_ptr == 0) {
1326 /* set the output parameters to physical addresses */
1327 *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1328 *in_num_entries_ptr = num_entries_in_table;
1329 *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1330 *out_num_entries_ptr = num_entries_out_table;
1331 *table_data_size_ptr = table_data_size;
1332
1333 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1334 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1335 } else {
1336 /* update the info entry of the previous in table */
1337 info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1338 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1339
1340 /* update the info entry of the previous in table */
1341 info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1342 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1343 }
1344
1345 /* save the pointer to the info entry of the current tables */
1346 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1347 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1348
1349 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1350 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1351 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1352 }
1353
1354 /* print input tables */
1355 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1356 sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1357 /* print output tables */
1358 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1359 sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1360 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1361 return 0;
1362}
1363
1364
1365/*
1366 This function builds input and output DMA tables for synhronic
1367 symmetric operations (AES, DES). It also checks that each table
1368 is of the modular block size
1369*/
1370static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1371 unsigned long app_virt_in_addr,
1372 unsigned long app_virt_out_addr,
1373 unsigned long data_size,
1374 unsigned long block_size,
1375 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1376{
1377 /* array of pointers of page */
1378 struct sep_lli_entry_t *lli_in_array;
1379 /* array of pointers of page */
1380 struct sep_lli_entry_t *lli_out_array;
1381 int result = 0;
1382
1383 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1384
1385 /* initialize the pages pointers */
1386 sep->in_page_array = 0;
1387 sep->out_page_array = 0;
1388
1389 /* check if the pages are in Kernel Virtual Address layout */
1390 if (isKernelVirtualAddress == true) {
1391 /* lock the pages of the kernel buffer and translate them to pages */
1392 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1393 if (result) {
1394 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1395 goto end_function;
1396 }
1397 } else {
1398 /* lock the pages of the user buffer and translate them to pages */
1399 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1400 if (result) {
1401 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1402 goto end_function;
1403 }
1404 }
1405
1406 if (isKernelVirtualAddress == true) {
1407 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1408 if (result) {
1409 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1410 goto end_function_with_error1;
1411 }
1412 } else {
1413 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1414 if (result) {
1415 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1416 goto end_function_with_error1;
1417 }
1418 }
1419 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1420 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1421 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1422
1423
1424 /* call the fucntion that creates table from the lli arrays */
1425 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1426 if (result) {
1427 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1428 goto end_function_with_error2;
1429 }
1430
1431 /* fall through - free the lli entry arrays */
1432 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1433 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1434 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1435end_function_with_error2:
1436 kfree(lli_out_array);
1437end_function_with_error1:
1438 kfree(lli_in_array);
1439end_function:
1440 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1441 return result;
1442
1443}
1444
1445/*
1446 this function handles tha request for creation of the DMA table
1447 for the synchronic symmetric operations (AES,DES)
1448*/
1449static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1450 unsigned long arg)
1451{
1452 int error;
1453 /* command arguments */
1454 struct sep_driver_build_sync_table_t command_args;
1455
1456 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1457
1458 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1459 if (error) {
1460 error = -EFAULT;
1461 goto end_function;
1462 }
1463
1464 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1465 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1466 edbg("data_size is %lu\n", command_args.data_in_size);
1467 edbg("block_size is %lu\n", command_args.block_size);
1468
1469 /* check if we need to build only input table or input/output */
1470 if (command_args.app_out_address)
1471 /* prepare input and output tables */
1472 error = sep_prepare_input_output_dma_table(sep,
1473 command_args.app_in_address,
1474 command_args.app_out_address,
1475 command_args.data_in_size,
1476 command_args.block_size,
1477 &command_args.in_table_address,
1478 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1479 else
1480 /* prepare input tables */
1481 error = sep_prepare_input_dma_table(sep,
1482 command_args.app_in_address,
1483 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1484
1485 if (error)
1486 goto end_function;
1487 /* copy to user */
1488 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1489 error = -EFAULT;
1490end_function:
1491 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1492 return error;
1493}
1494
1495/*
1496 this function handles the request for freeing dma table for synhronic actions
1497*/
1498static int sep_free_dma_table_data_handler(struct sep_device *sep)
1499{
1500 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1501
1502 /* free input pages array */
1503 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1504
1505 /* free output pages array if needed */
1506 if (sep->out_page_array)
1507 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1508
1509 /* reset all the values */
1510 sep->in_page_array = 0;
1511 sep->out_page_array = 0;
1512 sep->in_num_pages = 0;
1513 sep->out_num_pages = 0;
1514 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1515 return 0;
1516}
1517
1518/*
1519 this function find a space for the new flow dma table
1520*/
1521static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1522 unsigned long **table_address_ptr)
1523{
1524 int error = 0;
1525 /* pointer to the id field of the flow dma table */
1526 unsigned long *start_table_ptr;
1527 /* Do not make start_addr unsigned long * unless fixing the offset
1528 computations ! */
1529 void *flow_dma_area_start_addr;
1530 unsigned long *flow_dma_area_end_addr;
1531 /* maximum table size in words */
1532 unsigned long table_size_in_words;
1533
1534 /* find the start address of the flow DMA table area */
1535 flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1536
1537 /* set end address of the flow table area */
1538 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1539
1540 /* set table size in words */
1541 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1542
1543 /* set the pointer to the start address of DMA area */
1544 start_table_ptr = flow_dma_area_start_addr;
1545
1546 /* find the space for the next table */
1547 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1548 start_table_ptr += table_size_in_words;
1549
1550 /* check if we reached the end of floa tables area */
1551 if (start_table_ptr >= flow_dma_area_end_addr)
1552 error = -1;
1553 else
1554 *table_address_ptr = start_table_ptr;
1555
1556 return error;
1557}
1558
1559/*
1560 This function creates one DMA table for flow and returns its data,
1561 and pointer to its info entry
1562*/
1563static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1564 unsigned long virt_buff_addr,
1565 unsigned long virt_buff_size,
1566 struct sep_lli_entry_t *table_data,
1567 struct sep_lli_entry_t **info_entry_ptr,
1568 struct sep_flow_context_t *flow_data_ptr,
1569 bool isKernelVirtualAddress)
1570{
1571 int error;
1572 /* the range in pages */
1573 unsigned long lli_array_size;
1574 struct sep_lli_entry_t *lli_array;
1575 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1576 unsigned long *start_dma_table_ptr;
1577 /* total table data counter */
1578 unsigned long dma_table_data_count;
1579 /* pointer that will keep the pointer to the pages of the virtual buffer */
1580 struct page **page_array_ptr;
1581 unsigned long entry_count;
1582
1583 /* find the space for the new table */
1584 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1585 if (error)
1586 goto end_function;
1587
1588 /* check if the pages are in Kernel Virtual Address layout */
1589 if (isKernelVirtualAddress == true)
1590 /* lock kernel buffer in the memory */
1591 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1592 else
1593 /* lock user buffer in the memory */
1594 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1595
1596 if (error)
1597 goto end_function;
1598
1599 /* set the pointer to page array at the beginning of table - this table is
1600 now considered taken */
1601 *start_dma_table_ptr = lli_array_size;
1602
1603 /* point to the place of the pages pointers of the table */
1604 start_dma_table_ptr++;
1605
1606 /* set the pages pointer */
1607 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1608
1609 /* set the pointer to the first entry */
1610 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1611
1612 /* now create the entries for table */
1613 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1614 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1615
1616 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1617
1618 /* set the total data of a table */
1619 dma_table_data_count += lli_array[entry_count].block_size;
1620
1621 flow_dma_table_entry_ptr++;
1622 }
1623
1624 /* set the physical address */
1625 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1626
1627 /* set the num_entries and total data size */
1628 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1629
1630 /* set the info entry */
1631 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1632 flow_dma_table_entry_ptr->block_size = 0;
1633
1634 /* set the pointer to info entry */
1635 *info_entry_ptr = flow_dma_table_entry_ptr;
1636
1637 /* the array of the lli entries */
1638 kfree(lli_array);
1639end_function:
1640 return error;
1641}
1642
1643
1644
1645/*
1646 This function creates a list of tables for flow and returns the data for
1647 the first and last tables of the list
1648*/
1649static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1650 unsigned long num_virtual_buffers,
1651 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1652{
1653 int error;
1654 unsigned long virt_buff_addr;
1655 unsigned long virt_buff_size;
1656 struct sep_lli_entry_t table_data;
1657 struct sep_lli_entry_t *info_entry_ptr;
1658 struct sep_lli_entry_t *prev_info_entry_ptr;
1659 unsigned long i;
1660
1661 /* init vars */
1662 error = 0;
1663 prev_info_entry_ptr = 0;
1664
1665 /* init the first table to default */
1666 table_data.physical_address = 0xffffffff;
1667 first_table_data_ptr->physical_address = 0xffffffff;
1668 table_data.block_size = 0;
1669
1670 for (i = 0; i < num_virtual_buffers; i++) {
1671 /* get the virtual buffer address */
1672 error = get_user(virt_buff_addr, &first_buff_addr);
1673 if (error)
1674 goto end_function;
1675
1676 /* get the virtual buffer size */
1677 first_buff_addr++;
1678 error = get_user(virt_buff_size, &first_buff_addr);
1679 if (error)
1680 goto end_function;
1681
1682 /* advance the address to point to the next pair of address|size */
1683 first_buff_addr++;
1684
1685 /* now prepare the one flow LLI table from the data */
1686 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1687 if (error)
1688 goto end_function;
1689
1690 if (i == 0) {
1691 /* if this is the first table - save it to return to the user
1692 application */
1693 *first_table_data_ptr = table_data;
1694
1695 /* set the pointer to info entry */
1696 prev_info_entry_ptr = info_entry_ptr;
1697 } else {
1698 /* not first table - the previous table info entry should
1699 be updated */
1700 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1701
1702 /* set the pointer to info entry */
1703 prev_info_entry_ptr = info_entry_ptr;
1704 }
1705 }
1706
1707 /* set the last table data */
1708 *last_table_data_ptr = table_data;
1709end_function:
1710 return error;
1711}
1712
1713/*
1714 this function goes over all the flow tables connected to the given
1715 table and deallocate them
1716*/
1717static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1718{
1719 /* id pointer */
1720 unsigned long *table_ptr;
1721 /* end address of the flow dma area */
1722 unsigned long num_entries;
1723 unsigned long num_pages;
1724 struct page **pages_ptr;
1725 /* maximum table size in words */
1726 struct sep_lli_entry_t *info_entry_ptr;
1727
1728 /* set the pointer to the first table */
1729 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1730
1731 /* set the num of entries */
1732 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1733 & SEP_NUM_ENTRIES_MASK;
1734
1735 /* go over all the connected tables */
1736 while (*table_ptr != 0xffffffff) {
1737 /* get number of pages */
1738 num_pages = *(table_ptr - 2);
1739
1740 /* get the pointer to the pages */
1741 pages_ptr = (struct page **) (*(table_ptr - 1));
1742
1743 /* free the pages */
1744 sep_free_dma_pages(pages_ptr, num_pages, 1);
1745
1746 /* goto to the info entry */
1747 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1748
1749 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1750 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1751 }
1752
1753 return;
1754}
1755
1756/**
1757 * sep_find_flow_context - find a flow
1758 * @sep: the SEP we are working with
1759 * @flow_id: flow identifier
1760 *
1761 * Returns a pointer the matching flow, or NULL if the flow does not
1762 * exist.
1763 */
1764
1765static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1766 unsigned long flow_id)
1767{
1768 int count;
1769 /*
1770 * always search for flow with id default first - in case we
1771 * already started working on the flow there can be no situation
1772 * when 2 flows are with default flag
1773 */
1774 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1775 if (sep->flows[count].flow_id == flow_id)
1776 return &sep->flows[count];
1777 }
1778 return NULL;
1779}
1780
1781
1782/*
1783 this function handles the request to create the DMA tables for flow
1784*/
1785static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1786 unsigned long arg)
1787{
1788 int error = -ENOENT;
1789 struct sep_driver_build_flow_table_t command_args;
1790 /* first table - output */
1791 struct sep_lli_entry_t first_table_data;
1792 /* dma table data */
1793 struct sep_lli_entry_t last_table_data;
1794 /* pointer to the info entry of the previuos DMA table */
1795 struct sep_lli_entry_t *prev_info_entry_ptr;
1796 /* pointer to the flow data strucutre */
1797 struct sep_flow_context_t *flow_context_ptr;
1798
1799 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1800
1801 /* init variables */
1802 prev_info_entry_ptr = 0;
1803 first_table_data.physical_address = 0xffffffff;
1804
1805 /* find the free structure for flow data */
1806 error = -EINVAL;
1807 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1808 if (flow_context_ptr == NULL)
1809 goto end_function;
1810
1811 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1812 if (error) {
1813 error = -EFAULT;
1814 goto end_function;
1815 }
1816
1817 /* create flow tables */
1818 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1819 if (error)
1820 goto end_function_with_error;
1821
1822 /* check if flow is static */
1823 if (!command_args.flow_type)
1824 /* point the info entry of the last to the info entry of the first */
1825 last_table_data = first_table_data;
1826
1827 /* set output params */
1828 command_args.first_table_addr = first_table_data.physical_address;
1829 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1830 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1831
1832 /* send the parameters to user application */
1833 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1834 if (error) {
1835 error = -EFAULT;
1836 goto end_function_with_error;
1837 }
1838
1839 /* all the flow created - update the flow entry with temp id */
1840 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1841
1842 /* set the processing tables data in the context */
1843 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1844 flow_context_ptr->input_tables_in_process = first_table_data;
1845 else
1846 flow_context_ptr->output_tables_in_process = first_table_data;
1847
1848 goto end_function;
1849
1850end_function_with_error:
1851 /* free the allocated tables */
1852 sep_deallocated_flow_tables(&first_table_data);
1853end_function:
1854 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1855 return error;
1856}
1857
1858/*
1859 this function handles add tables to flow
1860*/
1861static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1862{
1863 int error;
1864 unsigned long num_entries;
1865 struct sep_driver_add_flow_table_t command_args;
1866 struct sep_flow_context_t *flow_context_ptr;
1867 /* first dma table data */
1868 struct sep_lli_entry_t first_table_data;
1869 /* last dma table data */
1870 struct sep_lli_entry_t last_table_data;
1871 /* pointer to the info entry of the current DMA table */
1872 struct sep_lli_entry_t *info_entry_ptr;
1873
1874 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1875
1876 /* get input parameters */
1877 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1878 if (error) {
1879 error = -EFAULT;
1880 goto end_function;
1881 }
1882
1883 /* find the flow structure for the flow id */
1884 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1885 if (flow_context_ptr == NULL)
1886 goto end_function;
1887
1888 /* prepare the flow dma tables */
1889 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1890 if (error)
1891 goto end_function_with_error;
1892
1893 /* now check if there is already an existing add table for this flow */
1894 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1895 /* this buffer was for input buffers */
1896 if (flow_context_ptr->input_tables_flag) {
1897 /* add table already exists - add the new tables to the end
1898 of the previous */
1899 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1900
1901 info_entry_ptr = (struct sep_lli_entry_t *)
1902 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1903
1904 /* connect to list of tables */
1905 *info_entry_ptr = first_table_data;
1906
1907 /* set the first table data */
1908 first_table_data = flow_context_ptr->first_input_table;
1909 } else {
1910 /* set the input flag */
1911 flow_context_ptr->input_tables_flag = 1;
1912
1913 /* set the first table data */
1914 flow_context_ptr->first_input_table = first_table_data;
1915 }
1916 /* set the last table data */
1917 flow_context_ptr->last_input_table = last_table_data;
1918 } else { /* this is output tables */
1919
1920 /* this buffer was for input buffers */
1921 if (flow_context_ptr->output_tables_flag) {
1922 /* add table already exists - add the new tables to
1923 the end of the previous */
1924 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1925
1926 info_entry_ptr = (struct sep_lli_entry_t *)
1927 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1928
1929 /* connect to list of tables */
1930 *info_entry_ptr = first_table_data;
1931
1932 /* set the first table data */
1933 first_table_data = flow_context_ptr->first_output_table;
1934 } else {
1935 /* set the input flag */
1936 flow_context_ptr->output_tables_flag = 1;
1937
1938 /* set the first table data */
1939 flow_context_ptr->first_output_table = first_table_data;
1940 }
1941 /* set the last table data */
1942 flow_context_ptr->last_output_table = last_table_data;
1943 }
1944
1945 /* set output params */
1946 command_args.first_table_addr = first_table_data.physical_address;
1947 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1948 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1949
1950 /* send the parameters to user application */
1951 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1952 if (error)
1953 error = -EFAULT;
1954end_function_with_error:
1955 /* free the allocated tables */
1956 sep_deallocated_flow_tables(&first_table_data);
1957end_function:
1958 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1959 return error;
1960}
1961
1962/*
1963 this function add the flow add message to the specific flow
1964*/
1965static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1966{
1967 int error;
1968 struct sep_driver_add_message_t command_args;
1969 struct sep_flow_context_t *flow_context_ptr;
1970
1971 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1972
1973 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1974 if (error) {
1975 error = -EFAULT;
1976 goto end_function;
1977 }
1978
1979 /* check input */
1980 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1981 error = -ENOMEM;
1982 goto end_function;
1983 }
1984
1985 /* find the flow context */
1986 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1987 if (flow_context_ptr == NULL)
1988 goto end_function;
1989
1990 /* copy the message into context */
1991 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1992 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1993 if (error)
1994 error = -EFAULT;
1995end_function:
1996 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1997 return error;
1998}
1999
2000
2001/*
2002 this function returns the bus and virtual addresses of the static pool
2003*/
2004static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
2005{
2006 int error;
2007 struct sep_driver_static_pool_addr_t command_args;
2008
2009 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
2010
2011 /*prepare the output parameters in the struct */
2012 command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2013 command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2014
2015 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
2016
2017 /* send the parameters to user application */
2018 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
2019 if (error)
2020 error = -EFAULT;
2021 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2022 return error;
2023}
2024
2025/*
2026 this address gets the offset of the physical address from the start
2027 of the mapped area
2028*/
2029static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2030{
2031 int error;
2032 struct sep_driver_get_mapped_offset_t command_args;
2033
2034 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2035
2036 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2037 if (error) {
2038 error = -EFAULT;
2039 goto end_function;
2040 }
2041
2042 if (command_args.physical_address < sep->shared_bus) {
2043 error = -EINVAL;
2044 goto end_function;
2045 }
2046
2047 /*prepare the output parameters in the struct */
2048 command_args.offset = command_args.physical_address - sep->shared_bus;
2049
2050 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2051
2052 /* send the parameters to user application */
2053 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2054 if (error)
2055 error = -EFAULT;
2056end_function:
2057 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2058 return error;
2059}
2060
2061
2062/*
2063 ?
2064*/
2065static int sep_start_handler(struct sep_device *sep)
2066{
2067 unsigned long reg_val;
2068 unsigned long error = 0;
2069
2070 dbg("SEP Driver:--------> sep_start_handler start\n");
2071
2072 /* wait in polling for message from SEP */
2073 do
2074 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2075 while (!reg_val);
2076
2077 /* check the value */
2078 if (reg_val == 0x1)
2079 /* fatal error - read error status from GPRO */
2080 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2081 dbg("SEP Driver:<-------- sep_start_handler end\n");
2082 return error;
2083}
2084
2085/*
2086 this function handles the request for SEP initialization
2087*/
2088static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2089{
2090 unsigned long message_word;
2091 unsigned long *message_ptr;
2092 struct sep_driver_init_t command_args;
2093 unsigned long counter;
2094 unsigned long error;
2095 unsigned long reg_val;
2096
2097 dbg("SEP Driver:--------> sep_init_handler start\n");
2098 error = 0;
2099
2100 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2101 if (error) {
2102 error = -EFAULT;
2103 goto end_function;
2104 }
2105 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user\n");
2106
2107 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2108 /*sep_configure_dma_burst(); */
2109
2110 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2111
2112 message_ptr = (unsigned long *) command_args.message_addr;
2113
2114 /* set the base address of the SRAM */
2115 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2116
2117 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2118 get_user(message_word, message_ptr);
2119 /* write data to SRAM */
2120 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2121 edbg("SEP Driver:message_word is %lu\n", message_word);
2122 /* wait for write complete */
2123 sep_wait_sram_write(sep);
2124 }
2125 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2126 /* signal SEP */
2127 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2128
2129 do
2130 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2131 while (!(reg_val & 0xFFFFFFFD));
2132
2133 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2134
2135 /* check the value */
2136 if (reg_val == 0x1) {
2137 edbg("SEP Driver:init failed\n");
2138
2139 error = sep_read_reg(sep, 0x8060);
2140 edbg("SEP Driver:sw monitor is %lu\n", error);
2141
2142 /* fatal error - read erro status from GPRO */
2143 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2144 edbg("SEP Driver:error is %lu\n", error);
2145 }
2146end_function:
2147 dbg("SEP Driver:<-------- sep_init_handler end\n");
2148 return error;
2149
2150}
2151
2152/*
2153 this function handles the request cache and resident reallocation
2154*/
2155static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2156 unsigned long arg)
2157{
2158 struct sep_driver_realloc_cache_resident_t command_args;
2159 int error;
2160
2161 /* copy cache and resident to the their intended locations */
2162 error = sep_load_firmware(sep);
2163 if (error)
2164 return error;
2165
2166 command_args.new_base_addr = sep->shared_bus;
2167
2168 /* find the new base address according to the lowest address between
2169 cache, resident and shared area */
2170 if (sep->resident_bus < command_args.new_base_addr)
2171 command_args.new_base_addr = sep->resident_bus;
2172 if (sep->rar_bus < command_args.new_base_addr)
2173 command_args.new_base_addr = sep->rar_bus;
2174
2175 /* set the return parameters */
2176 command_args.new_cache_addr = sep->rar_bus;
2177 command_args.new_resident_addr = sep->resident_bus;
2178
2179 /* set the new shared area */
2180 command_args.new_shared_area_addr = sep->shared_bus;
2181
2182 edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
2183 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2184 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2185 edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
2186
2187 /* return to user */
2188 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2189 return -EFAULT;
2190 return 0;
2191}
2192
2193/**
2194 * sep_get_time_handler - time request from user space
2195 * @sep: sep we are to set the time for
2196 * @arg: pointer to user space arg buffer
2197 *
2198 * This function reports back the time and the address in the SEP
2199 * shared buffer at which it has been placed. (Do we really need this!!!)
2200 */
2201
2202static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2203{
2204 struct sep_driver_get_time_t command_args;
2205
2206 mutex_lock(&sep_mutex);
2207 command_args.time_value = sep_set_time(sep);
2208 command_args.time_physical_address = (unsigned long)sep_time_address(sep);
2209 mutex_unlock(&sep_mutex);
2210 if (copy_to_user((void __user *)arg,
2211 &command_args, sizeof(struct sep_driver_get_time_t)))
2212 return -EFAULT;
2213 return 0;
2214
2215}
2216
2217/*
2218 This API handles the end transaction request
2219*/
2220static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2221{
2222 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2223
2224#if 0 /*!SEP_DRIVER_POLLING_MODE */
2225 /* close IMR */
2226 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2227
2228 /* release IRQ line */
2229 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2230
2231 /* lock the sep mutex */
2232 mutex_unlock(&sep_mutex);
2233#endif
2234
2235 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2236
2237 return 0;
2238}
2239
2240
2241/**
2242 * sep_set_flow_id_handler - handle flow setting
2243 * @sep: the SEP we are configuring
2244 * @flow_id: the flow we are setting
2245 *
2246 * This function handler the set flow id command
2247 */
2248static int sep_set_flow_id_handler(struct sep_device *sep,
2249 unsigned long flow_id)
2250{
2251 int error = 0;
2252 struct sep_flow_context_t *flow_data_ptr;
2253
2254 /* find the flow data structure that was just used for creating new flow
2255 - its id should be default */
2256
2257 mutex_lock(&sep_mutex);
2258 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2259 if (flow_data_ptr)
2260 flow_data_ptr->flow_id = flow_id; /* set flow id */
2261 else
2262 error = -EINVAL;
2263 mutex_unlock(&sep_mutex);
2264 return error;
2265}
2266
2267static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2268{
2269 int error = 0;
2270 struct sep_device *sep = filp->private_data;
2271
2272 dbg("------------>SEP Driver: ioctl start\n");
2273
2274 edbg("SEP Driver: cmd is %x\n", cmd);
2275
2276 switch (cmd) {
2277 case SEP_IOCSENDSEPCOMMAND:
2278 /* send command to SEP */
2279 sep_send_command_handler(sep);
2280 edbg("SEP Driver: after sep_send_command_handler\n");
2281 break;
2282 case SEP_IOCSENDSEPRPLYCOMMAND:
2283 /* send reply command to SEP */
2284 sep_send_reply_command_handler(sep);
2285 break;
2286 case SEP_IOCALLOCDATAPOLL:
2287 /* allocate data pool */
2288 error = sep_allocate_data_pool_memory_handler(sep, arg);
2289 break;
2290 case SEP_IOCWRITEDATAPOLL:
2291 /* write data into memory pool */
2292 error = sep_write_into_data_pool_handler(sep, arg);
2293 break;
2294 case SEP_IOCREADDATAPOLL:
2295 /* read data from data pool into application memory */
2296 error = sep_read_from_data_pool_handler(sep, arg);
2297 break;
2298 case SEP_IOCCREATESYMDMATABLE:
2299 /* create dma table for synhronic operation */
2300 error = sep_create_sync_dma_tables_handler(sep, arg);
2301 break;
2302 case SEP_IOCCREATEFLOWDMATABLE:
2303 /* create flow dma tables */
2304 error = sep_create_flow_dma_tables_handler(sep, arg);
2305 break;
2306 case SEP_IOCFREEDMATABLEDATA:
2307 /* free the pages */
2308 error = sep_free_dma_table_data_handler(sep);
2309 break;
2310 case SEP_IOCSETFLOWID:
2311 /* set flow id */
2312 error = sep_set_flow_id_handler(sep, (unsigned long)arg);
2313 break;
2314 case SEP_IOCADDFLOWTABLE:
2315 /* add tables to the dynamic flow */
2316 error = sep_add_flow_tables_handler(sep, arg);
2317 break;
2318 case SEP_IOCADDFLOWMESSAGE:
2319 /* add message of add tables to flow */
2320 error = sep_add_flow_tables_message_handler(sep, arg);
2321 break;
2322 case SEP_IOCSEPSTART:
2323 /* start command to sep */
2324 error = sep_start_handler(sep);
2325 break;
2326 case SEP_IOCSEPINIT:
2327 /* init command to sep */
2328 error = sep_init_handler(sep, arg);
2329 break;
2330 case SEP_IOCGETSTATICPOOLADDR:
2331 /* get the physical and virtual addresses of the static pool */
2332 error = sep_get_static_pool_addr_handler(sep, arg);
2333 break;
2334 case SEP_IOCENDTRANSACTION:
2335 error = sep_end_transaction_handler(sep, arg);
2336 break;
2337 case SEP_IOCREALLOCCACHERES:
2338 error = sep_realloc_cache_resident_handler(sep, arg);
2339 break;
2340 case SEP_IOCGETMAPPEDADDROFFSET:
2341 error = sep_get_physical_mapped_offset_handler(sep, arg);
2342 break;
2343 case SEP_IOCGETIME:
2344 error = sep_get_time_handler(sep, arg);
2345 break;
2346 default:
2347 error = -ENOTTY;
2348 break;
2349 }
2350 dbg("SEP Driver:<-------- ioctl end\n");
2351 return error;
2352}
2353
2354
2355
2356#if !SEP_DRIVER_POLLING_MODE
2357
2358/* handler for flow done interrupt */
2359
2360static void sep_flow_done_handler(struct work_struct *work)
2361{
2362 struct sep_flow_context_t *flow_data_ptr;
2363
2364 /* obtain the mutex */
2365 mutex_lock(&sep_mutex);
2366
2367 /* get the pointer to context */
2368 flow_data_ptr = (struct sep_flow_context_t *) work;
2369
2370 /* free all the current input tables in sep */
2371 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2372
2373 /* free all the current tables output tables in SEP (if needed) */
2374 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2375 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2376
2377 /* check if we have additional tables to be sent to SEP only input
2378 flag may be checked */
2379 if (flow_data_ptr->input_tables_flag) {
2380 /* copy the message to the shared RAM and signal SEP */
2381 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
2382
2383 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2384 }
2385 mutex_unlock(&sep_mutex);
2386}
2387/*
2388 interrupt handler function
2389*/
2390static irqreturn_t sep_inthandler(int irq, void *dev_id)
2391{
2392 irqreturn_t int_error;
2393 unsigned long reg_val;
2394 unsigned long flow_id;
2395 struct sep_flow_context_t *flow_context_ptr;
2396 struct sep_device *sep = dev_id;
2397
2398 int_error = IRQ_HANDLED;
2399
2400 /* read the IRR register to check if this is SEP interrupt */
2401 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2402 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2403
2404 /* check if this is the flow interrupt */
2405 if (0 /*reg_val & (0x1 << 11) */ ) {
2406 /* read GPRO to find out the which flow is done */
2407 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2408
2409 /* find the contex of the flow */
2410 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2411 if (flow_context_ptr == NULL)
2412 goto end_function_with_error;
2413
2414 /* queue the work */
2415 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2416 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2417
2418 } else {
2419 /* check if this is reply interrupt from SEP */
2420 if (reg_val & (0x1 << 13)) {
2421 /* update the counter of reply messages */
2422 sep->reply_ct++;
2423 /* wake up the waiting process */
2424 wake_up(&sep_event);
2425 } else {
2426 int_error = IRQ_NONE;
2427 goto end_function;
2428 }
2429 }
2430end_function_with_error:
2431 /* clear the interrupt */
2432 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2433end_function:
2434 return int_error;
2435}
2436
2437#endif
2438
2439
2440
2441#if 0
2442
2443static void sep_wait_busy(struct sep_device *sep)
2444{
2445 u32 reg;
2446
2447 do {
2448 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2449 } while (reg);
2450}
2451
2452/*
2453 PATCH for configuring the DMA to single burst instead of multi-burst
2454*/
2455static void sep_configure_dma_burst(struct sep_device *sep)
2456{
2457#define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2458
2459 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2460
2461 /* request access to registers from SEP */
2462 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2463
2464 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2465
2466 sep_wait_busy(sep);
2467
2468 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2469
2470 /* set the DMA burst register to single burst */
2471 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2472
2473 /* release the sep busy */
2474 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2475 sep_wait_busy(sep);
2476
2477 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2478
2479}
2480
2481#endif
2482
2483/*
2484 Function that is activated on the successful probe of the SEP device
2485*/
2486static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2487{
2488 int error = 0;
2489 struct sep_device *sep;
2490 int counter;
2491 int size; /* size of memory for allocation */
2492
2493 edbg("Sep pci probe starting\n");
2494 if (sep_dev != NULL) {
2495 dev_warn(&pdev->dev, "only one SEP supported.\n");
2496 return -EBUSY;
2497 }
2498
2499 /* enable the device */
2500 error = pci_enable_device(pdev);
2501 if (error) {
2502 edbg("error enabling pci device\n");
2503 goto end_function;
2504 }
2505
2506 /* set the pci dev pointer */
2507 sep_dev = &sep_instance;
2508 sep = &sep_instance;
2509
2510 edbg("sep->shared_addr = %p\n", sep->shared_addr);
2511 /* transaction counter that coordinates the transactions between SEP
2512 and HOST */
2513 sep->send_ct = 0;
2514 /* counter for the messages from sep */
2515 sep->reply_ct = 0;
2516 /* counter for the number of bytes allocated in the pool
2517 for the current transaction */
2518 sep->data_pool_bytes_allocated = 0;
2519
2520 /* calculate the total size for allocation */
2521 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2522 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2523
2524 /* allocate the shared area */
2525 if (sep_map_and_alloc_shared_area(sep, size)) {
2526 error = -ENOMEM;
2527 /* allocation failed */
2528 goto end_function_error;
2529 }
2530 /* now set the memory regions */
2531#if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2532 /* Note: this test section will need moving before it could ever
2533 work as the registers are not yet mapped ! */
2534 /* send the new SHARED MESSAGE AREA to the SEP */
2535 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2536
2537 /* poll for SEP response */
2538 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2539 while (retval != 0xffffffff && retval != sep->shared_bus)
2540 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2541
2542 /* check the return value (register) */
2543 if (retval != sep->shared_bus) {
2544 error = -ENOMEM;
2545 goto end_function_deallocate_sep_shared_area;
2546 }
2547#endif
2548 /* init the flow contextes */
2549 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2550 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2551
2552 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2553 if (sep->flow_wq == NULL) {
2554 error = -ENOMEM;
2555 edbg("sep_driver:flow queue creation failed\n");
2556 goto end_function_deallocate_sep_shared_area;
2557 }
2558 edbg("SEP Driver: create flow workqueue \n");
2559 sep->pdev = pci_dev_get(pdev);
2560
2561 sep->reg_addr = pci_ioremap_bar(pdev, 0);
2562 if (!sep->reg_addr) {
2563 edbg("sep: ioremap of registers failed.\n");
2564 goto end_function_deallocate_sep_shared_area;
2565 }
2566 edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
2567
2568 /* load the rom code */
2569 sep_load_rom_code(sep);
2570
2571 /* set up system base address and shared memory location */
2572 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2573 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2574 &sep->rar_bus, GFP_KERNEL);
2575
2576 if (!sep->rar_addr) {
2577 edbg("SEP Driver:can't allocate rar\n");
2578 goto end_function_uniomap;
2579 }
2580
2581
2582 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2583 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2584
2585#if !SEP_DRIVER_POLLING_MODE
2586
2587 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2588
2589 /* clear ICR register */
2590 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2591
2592 /* set the IMR register - open only GPR 2 */
2593 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2594
2595 edbg("SEP Driver: about to call request_irq\n");
2596 /* get the interrupt line */
2597 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2598 if (error)
2599 goto end_function_free_res;
2600 return 0;
2601 edbg("SEP Driver: about to write IMR REG_ADDR");
2602
2603 /* set the IMR register - open only GPR 2 */
2604 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2605
2606end_function_free_res:
2607 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2608 sep->rar_addr, sep->rar_bus);
2609#endif /* SEP_DRIVER_POLLING_MODE */
2610end_function_uniomap:
2611 iounmap(sep->reg_addr);
2612end_function_deallocate_sep_shared_area:
2613 /* de-allocate shared area */
2614 sep_unmap_and_free_shared_area(sep, size);
2615end_function_error:
2616 sep_dev = NULL;
2617end_function:
2618 return error;
2619}
2620
2621static const struct pci_device_id sep_pci_id_tbl[] = {
2622 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2623 {0}
2624};
2625
2626MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2627
2628/* field for registering driver to PCI device */
2629static struct pci_driver sep_pci_driver = {
2630 .name = "sep_sec_driver",
2631 .id_table = sep_pci_id_tbl,
2632 .probe = sep_probe
2633 /* FIXME: remove handler */
2634};
2635
2636/* major and minor device numbers */
2637static dev_t sep_devno;
2638
2639/* the files operations structure of the driver */
2640static struct file_operations sep_file_operations = {
2641 .owner = THIS_MODULE,
2642 .unlocked_ioctl = sep_ioctl,
2643 .poll = sep_poll,
2644 .open = sep_open,
2645 .release = sep_release,
2646 .mmap = sep_mmap,
2647};
2648
2649
2650/* cdev struct of the driver */
2651static struct cdev sep_cdev;
2652
2653/*
2654 this function registers the driver to the file system
2655*/
2656static int sep_register_driver_to_fs(void)
2657{
2658 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2659 if (ret_val) {
2660 edbg("sep: major number allocation failed, retval is %d\n",
2661 ret_val);
2662 return ret_val;
2663 }
2664 /* init cdev */
2665 cdev_init(&sep_cdev, &sep_file_operations);
2666 sep_cdev.owner = THIS_MODULE;
2667
2668 /* register the driver with the kernel */
2669 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2670 if (ret_val) {
2671 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2672 /* unregister dev numbers */
2673 unregister_chrdev_region(sep_devno, 1);
2674 }
2675 return ret_val;
2676}
2677
2678
2679/*--------------------------------------------------------------
2680 init function
2681----------------------------------------------------------------*/
2682static int __init sep_init(void)
2683{
2684 int ret_val = 0;
2685 dbg("SEP Driver:-------->Init start\n");
2686 /* FIXME: Probe can occur before we are ready to survive a probe */
2687 ret_val = pci_register_driver(&sep_pci_driver);
2688 if (ret_val) {
2689 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2690 goto end_function_unregister_from_fs;
2691 }
2692 /* register driver to fs */
2693 ret_val = sep_register_driver_to_fs();
2694 if (ret_val)
2695 goto end_function_unregister_pci;
2696 goto end_function;
2697end_function_unregister_pci:
2698 pci_unregister_driver(&sep_pci_driver);
2699end_function_unregister_from_fs:
2700 /* unregister from fs */
2701 cdev_del(&sep_cdev);
2702 /* unregister dev numbers */
2703 unregister_chrdev_region(sep_devno, 1);
2704end_function:
2705 dbg("SEP Driver:<-------- Init end\n");
2706 return ret_val;
2707}
2708
2709
2710/*-------------------------------------------------------------
2711 exit function
2712--------------------------------------------------------------*/
2713static void __exit sep_exit(void)
2714{
2715 int size;
2716
2717 dbg("SEP Driver:--------> Exit start\n");
2718
2719 /* unregister from fs */
2720 cdev_del(&sep_cdev);
2721 /* unregister dev numbers */
2722 unregister_chrdev_region(sep_devno, 1);
2723 /* calculate the total size for de-allocation */
2724 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2725 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2726 /* FIXME: We need to do this in the unload for the device */
2727 /* free shared area */
2728 if (sep_dev) {
2729 sep_unmap_and_free_shared_area(sep_dev, size);
2730 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2731 iounmap((void *) sep_dev->reg_addr);
2732 edbg("SEP Driver: iounmap \n");
2733 }
2734 edbg("SEP Driver: release_mem_region \n");
2735 dbg("SEP Driver:<-------- Exit end\n");
2736}
2737
2738
2739module_init(sep_init);
2740module_exit(sep_exit);
2741
2742MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/sep_driver_api.h b/drivers/staging/sep/sep_driver_api.h
deleted file mode 100644
index 7ef16da7c4ef..000000000000
--- a/drivers/staging/sep/sep_driver_api.h
+++ /dev/null
@@ -1,425 +0,0 @@
1/*
2 *
3 * sep_driver_api.h - Security Processor Driver api definitions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#ifndef __SEP_DRIVER_API_H__
33#define __SEP_DRIVER_API_H__
34
35
36
37/*----------------------------------------------------------------
38 IOCTL command defines
39 -----------------------------------------------------------------*/
40
41/* magic number 1 of the sep IOCTL command */
42#define SEP_IOC_MAGIC_NUMBER 's'
43
44/* sends interrupt to sep that message is ready */
45#define SEP_IOCSENDSEPCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 0)
46
47/* sends interrupt to sep that message is ready */
48#define SEP_IOCSENDSEPRPLYCOMMAND _IO(SEP_IOC_MAGIC_NUMBER , 1)
49
50/* allocate memory in data pool */
51#define SEP_IOCALLOCDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 2)
52
53/* write to pre-allocated memory in data pool */
54#define SEP_IOCWRITEDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 3)
55
56/* read from pre-allocated memory in data pool */
57#define SEP_IOCREADDATAPOLL _IO(SEP_IOC_MAGIC_NUMBER , 4)
58
59/* create sym dma lli tables */
60#define SEP_IOCCREATESYMDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 5)
61
62/* create flow dma lli tables */
63#define SEP_IOCCREATEFLOWDMATABLE _IO(SEP_IOC_MAGIC_NUMBER , 6)
64
65/* free dynamic data aalocated during table creation */
66#define SEP_IOCFREEDMATABLEDATA _IO(SEP_IOC_MAGIC_NUMBER , 7)
67
68/* get the static pool area addresses (physical and virtual) */
69#define SEP_IOCGETSTATICPOOLADDR _IO(SEP_IOC_MAGIC_NUMBER , 8)
70
71/* set flow id command */
72#define SEP_IOCSETFLOWID _IO(SEP_IOC_MAGIC_NUMBER , 9)
73
74/* add tables to the dynamic flow */
75#define SEP_IOCADDFLOWTABLE _IO(SEP_IOC_MAGIC_NUMBER , 10)
76
77/* add flow add tables message */
78#define SEP_IOCADDFLOWMESSAGE _IO(SEP_IOC_MAGIC_NUMBER , 11)
79
80/* start sep command */
81#define SEP_IOCSEPSTART _IO(SEP_IOC_MAGIC_NUMBER , 12)
82
83/* init sep command */
84#define SEP_IOCSEPINIT _IO(SEP_IOC_MAGIC_NUMBER , 13)
85
86/* end transaction command */
87#define SEP_IOCENDTRANSACTION _IO(SEP_IOC_MAGIC_NUMBER , 15)
88
89/* reallocate cache and resident */
90#define SEP_IOCREALLOCCACHERES _IO(SEP_IOC_MAGIC_NUMBER , 16)
91
92/* get the offset of the address starting from the beginnnig of the map area */
93#define SEP_IOCGETMAPPEDADDROFFSET _IO(SEP_IOC_MAGIC_NUMBER , 17)
94
95/* get time address and value */
96#define SEP_IOCGETIME _IO(SEP_IOC_MAGIC_NUMBER , 19)
97
98/*-------------------------------------------
99 TYPEDEFS
100----------------------------------------------*/
101
102/*
103 init command struct
104*/
105struct sep_driver_init_t {
106 /* start of the 1G of the host memory address that SEP can access */
107 unsigned long message_addr;
108
109 /* start address of resident */
110 unsigned long message_size_in_words;
111
112};
113
114
115/*
116 realloc cache resident command
117*/
118struct sep_driver_realloc_cache_resident_t {
119 /* new cache address */
120 u64 new_cache_addr;
121 /* new resident address */
122 u64 new_resident_addr;
123 /* new resident address */
124 u64 new_shared_area_addr;
125 /* new base address */
126 u64 new_base_addr;
127};
128
129struct sep_driver_alloc_t {
130 /* virtual address of allocated space */
131 unsigned long offset;
132
133 /* physical address of allocated space */
134 unsigned long phys_address;
135
136 /* number of bytes to allocate */
137 unsigned long num_bytes;
138};
139
140/*
141 */
142struct sep_driver_write_t {
143 /* application space address */
144 unsigned long app_address;
145
146 /* address of the data pool */
147 unsigned long datapool_address;
148
149 /* number of bytes to write */
150 unsigned long num_bytes;
151};
152
153/*
154 */
155struct sep_driver_read_t {
156 /* application space address */
157 unsigned long app_address;
158
159 /* address of the data pool */
160 unsigned long datapool_address;
161
162 /* number of bytes to read */
163 unsigned long num_bytes;
164};
165
166/*
167*/
168struct sep_driver_build_sync_table_t {
169 /* address value of the data in */
170 unsigned long app_in_address;
171
172 /* size of data in */
173 unsigned long data_in_size;
174
175 /* address of the data out */
176 unsigned long app_out_address;
177
178 /* the size of the block of the operation - if needed,
179 every table will be modulo this parameter */
180 unsigned long block_size;
181
182 /* the physical address of the first input DMA table */
183 unsigned long in_table_address;
184
185 /* number of entries in the first input DMA table */
186 unsigned long in_table_num_entries;
187
188 /* the physical address of the first output DMA table */
189 unsigned long out_table_address;
190
191 /* number of entries in the first output DMA table */
192 unsigned long out_table_num_entries;
193
194 /* data in the first input table */
195 unsigned long table_data_size;
196
197 /* distinct user/kernel layout */
198 bool isKernelVirtualAddress;
199
200};
201
202/*
203*/
204struct sep_driver_build_flow_table_t {
205 /* flow type */
206 unsigned long flow_type;
207
208 /* flag for input output */
209 unsigned long input_output_flag;
210
211 /* address value of the data in */
212 unsigned long virt_buff_data_addr;
213
214 /* size of data in */
215 unsigned long num_virtual_buffers;
216
217 /* the physical address of the first input DMA table */
218 unsigned long first_table_addr;
219
220 /* number of entries in the first input DMA table */
221 unsigned long first_table_num_entries;
222
223 /* data in the first input table */
224 unsigned long first_table_data_size;
225
226 /* distinct user/kernel layout */
227 bool isKernelVirtualAddress;
228};
229
230
231struct sep_driver_add_flow_table_t {
232 /* flow id */
233 unsigned long flow_id;
234
235 /* flag for input output */
236 unsigned long inputOutputFlag;
237
238 /* address value of the data in */
239 unsigned long virt_buff_data_addr;
240
241 /* size of data in */
242 unsigned long num_virtual_buffers;
243
244 /* address of the first table */
245 unsigned long first_table_addr;
246
247 /* number of entries in the first table */
248 unsigned long first_table_num_entries;
249
250 /* data size of the first table */
251 unsigned long first_table_data_size;
252
253 /* distinct user/kernel layout */
254 bool isKernelVirtualAddress;
255
256};
257
258/*
259 command struct for set flow id
260*/
261struct sep_driver_set_flow_id_t {
262 /* flow id to set */
263 unsigned long flow_id;
264};
265
266
267/* command struct for add tables message */
268struct sep_driver_add_message_t {
269 /* flow id to set */
270 unsigned long flow_id;
271
272 /* message size in bytes */
273 unsigned long message_size_in_bytes;
274
275 /* address of the message */
276 unsigned long message_address;
277};
278
279/* command struct for static pool addresses */
280struct sep_driver_static_pool_addr_t {
281 /* physical address of the static pool */
282 unsigned long physical_static_address;
283
284 /* virtual address of the static pool */
285 unsigned long virtual_static_address;
286};
287
288/* command struct for getiing offset of the physical address from
289 the start of the mapped area */
290struct sep_driver_get_mapped_offset_t {
291 /* physical address of the static pool */
292 unsigned long physical_address;
293
294 /* virtual address of the static pool */
295 unsigned long offset;
296};
297
298/* command struct for getting time value and address */
299struct sep_driver_get_time_t {
300 /* physical address of stored time */
301 unsigned long time_physical_address;
302
303 /* value of the stored time */
304 unsigned long time_value;
305};
306
307
308/*
309 structure that represent one entry in the DMA LLI table
310*/
311struct sep_lli_entry_t {
312 /* physical address */
313 unsigned long physical_address;
314
315 /* block size */
316 unsigned long block_size;
317};
318
319/*
320 structure that reperesents data needed for lli table construction
321*/
322struct sep_lli_prepare_table_data_t {
323 /* pointer to the memory where the first lli entry to be built */
324 struct sep_lli_entry_t *lli_entry_ptr;
325
326 /* pointer to the array of lli entries from which the table is to be built */
327 struct sep_lli_entry_t *lli_array_ptr;
328
329 /* number of elements in lli array */
330 int lli_array_size;
331
332 /* number of entries in the created table */
333 int num_table_entries;
334
335 /* number of array entries processed during table creation */
336 int num_array_entries_processed;
337
338 /* the totatl data size in the created table */
339 int lli_table_total_data_size;
340};
341
342/*
343 structure that represent tone table - it is not used in code, jkust
344 to show what table looks like
345*/
346struct sep_lli_table_t {
347 /* number of pages mapped in this tables. If 0 - means that the table
348 is not defined (used as a valid flag) */
349 unsigned long num_pages;
350 /*
351 pointer to array of page pointers that represent the mapping of the
352 virtual buffer defined by the table to the physical memory. If this
353 pointer is NULL, it means that the table is not defined
354 (used as a valid flag)
355 */
356 struct page **table_page_array_ptr;
357
358 /* maximum flow entries in table */
359 struct sep_lli_entry_t lli_entries[SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE];
360};
361
362
363/*
364 structure for keeping the mapping of the virtual buffer into physical pages
365*/
366struct sep_flow_buffer_data {
367 /* pointer to the array of page structs pointers to the pages of the
368 virtual buffer */
369 struct page **page_array_ptr;
370
371 /* number of pages taken by the virtual buffer */
372 unsigned long num_pages;
373
374 /* this flag signals if this page_array is the last one among many that were
375 sent in one setting to SEP */
376 unsigned long last_page_array_flag;
377};
378
379/*
380 struct that keeps all the data for one flow
381*/
382struct sep_flow_context_t {
383 /*
384 work struct for handling the flow done interrupt in the workqueue
385 this structure must be in the first place, since it will be used
386 forcasting to the containing flow context
387 */
388 struct work_struct flow_wq;
389
390 /* flow id */
391 unsigned long flow_id;
392
393 /* additional input tables exists */
394 unsigned long input_tables_flag;
395
396 /* additional output tables exists */
397 unsigned long output_tables_flag;
398
399 /* data of the first input file */
400 struct sep_lli_entry_t first_input_table;
401
402 /* data of the first output table */
403 struct sep_lli_entry_t first_output_table;
404
405 /* last input table data */
406 struct sep_lli_entry_t last_input_table;
407
408 /* last output table data */
409 struct sep_lli_entry_t last_output_table;
410
411 /* first list of table */
412 struct sep_lli_entry_t input_tables_in_process;
413
414 /* output table in process (in sep) */
415 struct sep_lli_entry_t output_tables_in_process;
416
417 /* size of messages in bytes */
418 unsigned long message_size_in_bytes;
419
420 /* message */
421 unsigned char message[SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES];
422};
423
424
425#endif
diff --git a/drivers/staging/sep/sep_driver_config.h b/drivers/staging/sep/sep_driver_config.h
deleted file mode 100644
index 6008fe5eca09..000000000000
--- a/drivers/staging/sep/sep_driver_config.h
+++ /dev/null
@@ -1,225 +0,0 @@
1/*
2 *
3 * sep_driver_config.h - Security Processor Driver configuration
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#ifndef __SEP_DRIVER_CONFIG_H__
33#define __SEP_DRIVER_CONFIG_H__
34
35
36/*--------------------------------------
37 DRIVER CONFIGURATION FLAGS
38 -------------------------------------*/
39
40/* if flag is on , then the driver is running in polling and
41 not interrupt mode */
42#define SEP_DRIVER_POLLING_MODE 1
43
44/* flag which defines if the shared area address should be
45 reconfiged (send to SEP anew) during init of the driver */
46#define SEP_DRIVER_RECONFIG_MESSAGE_AREA 0
47
48/* the mode for running on the ARM1172 Evaluation platform (flag is 1) */
49#define SEP_DRIVER_ARM_DEBUG_MODE 0
50
51/*-------------------------------------------
52 INTERNAL DATA CONFIGURATION
53 -------------------------------------------*/
54
55/* flag for the input array */
56#define SEP_DRIVER_IN_FLAG 0
57
58/* flag for output array */
59#define SEP_DRIVER_OUT_FLAG 1
60
61/* maximum number of entries in one LLI tables */
62#define SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP 8
63
64
65/*--------------------------------------------------------
66 SHARED AREA memory total size is 36K
67 it is divided is following:
68
69 SHARED_MESSAGE_AREA 8K }
70 }
71 STATIC_POOL_AREA 4K } MAPPED AREA ( 24 K)
72 }
73 DATA_POOL_AREA 12K }
74
75 SYNCHRONIC_DMA_TABLES_AREA 5K
76
77 FLOW_DMA_TABLES_AREA 4K
78
79 SYSTEM_MEMORY_AREA 3k
80
81 SYSTEM_MEMORY total size is 3k
82 it is divided as following:
83
84 TIME_MEMORY_AREA 8B
85-----------------------------------------------------------*/
86
87
88
89/*
90 the maximum length of the message - the rest of the message shared
91 area will be dedicated to the dma lli tables
92*/
93#define SEP_DRIVER_MAX_MESSAGE_SIZE_IN_BYTES (8 * 1024)
94
95/* the size of the message shared area in pages */
96#define SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES (8 * 1024)
97
98/* the size of the data pool static area in pages */
99#define SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES (4 * 1024)
100
101/* the size of the data pool shared area size in pages */
102#define SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES (12 * 1024)
103
104/* the size of the message shared area in pages */
105#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 5)
106
107
108/* the size of the data pool shared area size in pages */
109#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES (1024 * 4)
110
111/* system data (time, caller id etc') pool */
112#define SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES 100
113
114
115/* area size that is mapped - we map the MESSAGE AREA, STATIC POOL and
116 DATA POOL areas. area must be module 4k */
117#define SEP_DRIVER_MMMAP_AREA_SIZE (1024 * 24)
118
119
120/*-----------------------------------------------
121 offsets of the areas starting from the shared area start address
122*/
123
124/* message area offset */
125#define SEP_DRIVER_MESSAGE_AREA_OFFSET_IN_BYTES 0
126
127/* static pool area offset */
128#define SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES \
129 (SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES)
130
131/* data pool area offset */
132#define SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES \
133 (SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES + \
134 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES)
135
136/* synhronic dma tables area offset */
137#define SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES \
138 (SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + \
139 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)
140
141/* sep driver flow dma tables area offset */
142#define SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES \
143 (SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
144 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES)
145
146/* system memory offset in bytes */
147#define SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES \
148 (SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES + \
149 SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES)
150
151/* offset of the time area */
152#define SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES \
153 (SEP_DRIVER_SYSTEM_DATA_MEMORY_OFFSET_IN_BYTES)
154
155
156
157/* start physical address of the SEP registers memory in HOST */
158#define SEP_IO_MEM_REGION_START_ADDRESS 0x80000000
159
160/* size of the SEP registers memory region in HOST (for now 100 registers) */
161#define SEP_IO_MEM_REGION_SIZE (2 * 0x100000)
162
163/* define the number of IRQ for SEP interrupts */
164#define SEP_DIRVER_IRQ_NUM 1
165
166/* maximum number of add buffers */
167#define SEP_MAX_NUM_ADD_BUFFERS 100
168
169/* number of flows */
170#define SEP_DRIVER_NUM_FLOWS 4
171
172/* maximum number of entries in flow table */
173#define SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE 25
174
175/* offset of the num entries in the block length entry of the LLI */
176#define SEP_NUM_ENTRIES_OFFSET_IN_BITS 24
177
178/* offset of the interrupt flag in the block length entry of the LLI */
179#define SEP_INT_FLAG_OFFSET_IN_BITS 31
180
181/* mask for extracting data size from LLI */
182#define SEP_TABLE_DATA_SIZE_MASK 0xFFFFFF
183
184/* mask for entries after being shifted left */
185#define SEP_NUM_ENTRIES_MASK 0x7F
186
187/* default flow id */
188#define SEP_FREE_FLOW_ID 0xFFFFFFFF
189
190/* temp flow id used during cretiong of new flow until receiving
191 real flow id from sep */
192#define SEP_TEMP_FLOW_ID (SEP_DRIVER_NUM_FLOWS + 1)
193
194/* maximum add buffers message length in bytes */
195#define SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES (7 * 4)
196
197/* maximum number of concurrent virtual buffers */
198#define SEP_MAX_VIRT_BUFFERS_CONCURRENT 100
199
200/* the token that defines the start of time address */
201#define SEP_TIME_VAL_TOKEN 0x12345678
202
203/* DEBUG LEVEL MASKS */
204#define SEP_DEBUG_LEVEL_BASIC 0x1
205
206#define SEP_DEBUG_LEVEL_EXTENDED 0x4
207
208
209/* Debug helpers */
210
211#define dbg(fmt, args...) \
212do {\
213 if (debug & SEP_DEBUG_LEVEL_BASIC) \
214 printk(KERN_DEBUG fmt, ##args); \
215} while(0);
216
217#define edbg(fmt, args...) \
218do { \
219 if (debug & SEP_DEBUG_LEVEL_EXTENDED) \
220 printk(KERN_DEBUG fmt, ##args); \
221} while(0);
222
223
224
225#endif
diff --git a/drivers/staging/sep/sep_driver_hw_defs.h b/drivers/staging/sep/sep_driver_hw_defs.h
deleted file mode 100644
index ea6abd8a14b4..000000000000
--- a/drivers/staging/sep/sep_driver_hw_defs.h
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 *
3 * sep_driver_hw_defs.h - Security Processor Driver hardware definitions
4 *
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
22 * CONTACTS:
23 *
24 * Mark Allyn mark.a.allyn@intel.com
25 *
26 * CHANGES:
27 *
28 * 2009.06.26 Initial publish
29 *
30 */
31
32#ifndef SEP_DRIVER_HW_DEFS__H
33#define SEP_DRIVER_HW_DEFS__H
34
35/*--------------------------------------------------------------------------*/
36/* Abstract: HW Registers Defines. */
37/* */
38/* Note: This file was automatically created !!! */
39/* DO NOT EDIT THIS FILE !!! */
40/*--------------------------------------------------------------------------*/
41
42
43/* cf registers */
44#define HW_R0B_ADDR_0_REG_ADDR 0x0000UL
45#define HW_R0B_ADDR_1_REG_ADDR 0x0004UL
46#define HW_R0B_ADDR_2_REG_ADDR 0x0008UL
47#define HW_R0B_ADDR_3_REG_ADDR 0x000cUL
48#define HW_R0B_ADDR_4_REG_ADDR 0x0010UL
49#define HW_R0B_ADDR_5_REG_ADDR 0x0014UL
50#define HW_R0B_ADDR_6_REG_ADDR 0x0018UL
51#define HW_R0B_ADDR_7_REG_ADDR 0x001cUL
52#define HW_R0B_ADDR_8_REG_ADDR 0x0020UL
53#define HW_R2B_ADDR_0_REG_ADDR 0x0080UL
54#define HW_R2B_ADDR_1_REG_ADDR 0x0084UL
55#define HW_R2B_ADDR_2_REG_ADDR 0x0088UL
56#define HW_R2B_ADDR_3_REG_ADDR 0x008cUL
57#define HW_R2B_ADDR_4_REG_ADDR 0x0090UL
58#define HW_R2B_ADDR_5_REG_ADDR 0x0094UL
59#define HW_R2B_ADDR_6_REG_ADDR 0x0098UL
60#define HW_R2B_ADDR_7_REG_ADDR 0x009cUL
61#define HW_R2B_ADDR_8_REG_ADDR 0x00a0UL
62#define HW_R3B_REG_ADDR 0x00C0UL
63#define HW_R4B_REG_ADDR 0x0100UL
64#define HW_CSA_ADDR_0_REG_ADDR 0x0140UL
65#define HW_CSA_ADDR_1_REG_ADDR 0x0144UL
66#define HW_CSA_ADDR_2_REG_ADDR 0x0148UL
67#define HW_CSA_ADDR_3_REG_ADDR 0x014cUL
68#define HW_CSA_ADDR_4_REG_ADDR 0x0150UL
69#define HW_CSA_ADDR_5_REG_ADDR 0x0154UL
70#define HW_CSA_ADDR_6_REG_ADDR 0x0158UL
71#define HW_CSA_ADDR_7_REG_ADDR 0x015cUL
72#define HW_CSA_ADDR_8_REG_ADDR 0x0160UL
73#define HW_CSA_REG_ADDR 0x0140UL
74#define HW_SINB_REG_ADDR 0x0180UL
75#define HW_SOUTB_REG_ADDR 0x0184UL
76#define HW_PKI_CONTROL_REG_ADDR 0x01C0UL
77#define HW_PKI_STATUS_REG_ADDR 0x01C4UL
78#define HW_PKI_BUSY_REG_ADDR 0x01C8UL
79#define HW_PKI_A_1025_REG_ADDR 0x01CCUL
80#define HW_PKI_SDMA_CTL_REG_ADDR 0x01D0UL
81#define HW_PKI_SDMA_OFFSET_REG_ADDR 0x01D4UL
82#define HW_PKI_SDMA_POINTERS_REG_ADDR 0x01D8UL
83#define HW_PKI_SDMA_DLENG_REG_ADDR 0x01DCUL
84#define HW_PKI_SDMA_EXP_POINTERS_REG_ADDR 0x01E0UL
85#define HW_PKI_SDMA_RES_POINTERS_REG_ADDR 0x01E4UL
86#define HW_PKI_CLR_REG_ADDR 0x01E8UL
87#define HW_PKI_SDMA_BUSY_REG_ADDR 0x01E8UL
88#define HW_PKI_SDMA_FIRST_EXP_N_REG_ADDR 0x01ECUL
89#define HW_PKI_SDMA_MUL_BY1_REG_ADDR 0x01F0UL
90#define HW_PKI_SDMA_RMUL_SEL_REG_ADDR 0x01F4UL
91#define HW_DES_KEY_0_REG_ADDR 0x0208UL
92#define HW_DES_KEY_1_REG_ADDR 0x020CUL
93#define HW_DES_KEY_2_REG_ADDR 0x0210UL
94#define HW_DES_KEY_3_REG_ADDR 0x0214UL
95#define HW_DES_KEY_4_REG_ADDR 0x0218UL
96#define HW_DES_KEY_5_REG_ADDR 0x021CUL
97#define HW_DES_CONTROL_0_REG_ADDR 0x0220UL
98#define HW_DES_CONTROL_1_REG_ADDR 0x0224UL
99#define HW_DES_IV_0_REG_ADDR 0x0228UL
100#define HW_DES_IV_1_REG_ADDR 0x022CUL
101#define HW_AES_KEY_0_ADDR_0_REG_ADDR 0x0400UL
102#define HW_AES_KEY_0_ADDR_1_REG_ADDR 0x0404UL
103#define HW_AES_KEY_0_ADDR_2_REG_ADDR 0x0408UL
104#define HW_AES_KEY_0_ADDR_3_REG_ADDR 0x040cUL
105#define HW_AES_KEY_0_ADDR_4_REG_ADDR 0x0410UL
106#define HW_AES_KEY_0_ADDR_5_REG_ADDR 0x0414UL
107#define HW_AES_KEY_0_ADDR_6_REG_ADDR 0x0418UL
108#define HW_AES_KEY_0_ADDR_7_REG_ADDR 0x041cUL
109#define HW_AES_KEY_0_REG_ADDR 0x0400UL
110#define HW_AES_IV_0_ADDR_0_REG_ADDR 0x0440UL
111#define HW_AES_IV_0_ADDR_1_REG_ADDR 0x0444UL
112#define HW_AES_IV_0_ADDR_2_REG_ADDR 0x0448UL
113#define HW_AES_IV_0_ADDR_3_REG_ADDR 0x044cUL
114#define HW_AES_IV_0_REG_ADDR 0x0440UL
115#define HW_AES_CTR1_ADDR_0_REG_ADDR 0x0460UL
116#define HW_AES_CTR1_ADDR_1_REG_ADDR 0x0464UL
117#define HW_AES_CTR1_ADDR_2_REG_ADDR 0x0468UL
118#define HW_AES_CTR1_ADDR_3_REG_ADDR 0x046cUL
119#define HW_AES_CTR1_REG_ADDR 0x0460UL
120#define HW_AES_SK_REG_ADDR 0x0478UL
121#define HW_AES_MAC_OK_REG_ADDR 0x0480UL
122#define HW_AES_PREV_IV_0_ADDR_0_REG_ADDR 0x0490UL
123#define HW_AES_PREV_IV_0_ADDR_1_REG_ADDR 0x0494UL
124#define HW_AES_PREV_IV_0_ADDR_2_REG_ADDR 0x0498UL
125#define HW_AES_PREV_IV_0_ADDR_3_REG_ADDR 0x049cUL
126#define HW_AES_PREV_IV_0_REG_ADDR 0x0490UL
127#define HW_AES_CONTROL_REG_ADDR 0x04C0UL
128#define HW_HASH_H0_REG_ADDR 0x0640UL
129#define HW_HASH_H1_REG_ADDR 0x0644UL
130#define HW_HASH_H2_REG_ADDR 0x0648UL
131#define HW_HASH_H3_REG_ADDR 0x064CUL
132#define HW_HASH_H4_REG_ADDR 0x0650UL
133#define HW_HASH_H5_REG_ADDR 0x0654UL
134#define HW_HASH_H6_REG_ADDR 0x0658UL
135#define HW_HASH_H7_REG_ADDR 0x065CUL
136#define HW_HASH_H8_REG_ADDR 0x0660UL
137#define HW_HASH_H9_REG_ADDR 0x0664UL
138#define HW_HASH_H10_REG_ADDR 0x0668UL
139#define HW_HASH_H11_REG_ADDR 0x066CUL
140#define HW_HASH_H12_REG_ADDR 0x0670UL
141#define HW_HASH_H13_REG_ADDR 0x0674UL
142#define HW_HASH_H14_REG_ADDR 0x0678UL
143#define HW_HASH_H15_REG_ADDR 0x067CUL
144#define HW_HASH_CONTROL_REG_ADDR 0x07C0UL
145#define HW_HASH_PAD_EN_REG_ADDR 0x07C4UL
146#define HW_HASH_PAD_CFG_REG_ADDR 0x07C8UL
147#define HW_HASH_CUR_LEN_0_REG_ADDR 0x07CCUL
148#define HW_HASH_CUR_LEN_1_REG_ADDR 0x07D0UL
149#define HW_HASH_CUR_LEN_2_REG_ADDR 0x07D4UL
150#define HW_HASH_CUR_LEN_3_REG_ADDR 0x07D8UL
151#define HW_HASH_PARAM_REG_ADDR 0x07DCUL
152#define HW_HASH_INT_BUSY_REG_ADDR 0x07E0UL
153#define HW_HASH_SW_RESET_REG_ADDR 0x07E4UL
154#define HW_HASH_ENDIANESS_REG_ADDR 0x07E8UL
155#define HW_HASH_DATA_REG_ADDR 0x07ECUL
156#define HW_DRNG_CONTROL_REG_ADDR 0x0800UL
157#define HW_DRNG_VALID_REG_ADDR 0x0804UL
158#define HW_DRNG_DATA_REG_ADDR 0x0808UL
159#define HW_RND_SRC_EN_REG_ADDR 0x080CUL
160#define HW_AES_CLK_ENABLE_REG_ADDR 0x0810UL
161#define HW_DES_CLK_ENABLE_REG_ADDR 0x0814UL
162#define HW_HASH_CLK_ENABLE_REG_ADDR 0x0818UL
163#define HW_PKI_CLK_ENABLE_REG_ADDR 0x081CUL
164#define HW_CLK_STATUS_REG_ADDR 0x0824UL
165#define HW_CLK_ENABLE_REG_ADDR 0x0828UL
166#define HW_DRNG_SAMPLE_REG_ADDR 0x0850UL
167#define HW_RND_SRC_CTL_REG_ADDR 0x0858UL
168#define HW_CRYPTO_CTL_REG_ADDR 0x0900UL
169#define HW_CRYPTO_STATUS_REG_ADDR 0x090CUL
170#define HW_CRYPTO_BUSY_REG_ADDR 0x0910UL
171#define HW_AES_BUSY_REG_ADDR 0x0914UL
172#define HW_DES_BUSY_REG_ADDR 0x0918UL
173#define HW_HASH_BUSY_REG_ADDR 0x091CUL
174#define HW_CONTENT_REG_ADDR 0x0924UL
175#define HW_VERSION_REG_ADDR 0x0928UL
176#define HW_CONTEXT_ID_REG_ADDR 0x0930UL
177#define HW_DIN_BUFFER_REG_ADDR 0x0C00UL
178#define HW_DIN_MEM_DMA_BUSY_REG_ADDR 0x0c20UL
179#define HW_SRC_LLI_MEM_ADDR_REG_ADDR 0x0c24UL
180#define HW_SRC_LLI_WORD0_REG_ADDR 0x0C28UL
181#define HW_SRC_LLI_WORD1_REG_ADDR 0x0C2CUL
182#define HW_SRAM_SRC_ADDR_REG_ADDR 0x0c30UL
183#define HW_DIN_SRAM_BYTES_LEN_REG_ADDR 0x0c34UL
184#define HW_DIN_SRAM_DMA_BUSY_REG_ADDR 0x0C38UL
185#define HW_WRITE_ALIGN_REG_ADDR 0x0C3CUL
186#define HW_OLD_DATA_REG_ADDR 0x0C48UL
187#define HW_WRITE_ALIGN_LAST_REG_ADDR 0x0C4CUL
188#define HW_DOUT_BUFFER_REG_ADDR 0x0C00UL
189#define HW_DST_LLI_WORD0_REG_ADDR 0x0D28UL
190#define HW_DST_LLI_WORD1_REG_ADDR 0x0D2CUL
191#define HW_DST_LLI_MEM_ADDR_REG_ADDR 0x0D24UL
192#define HW_DOUT_MEM_DMA_BUSY_REG_ADDR 0x0D20UL
193#define HW_SRAM_DEST_ADDR_REG_ADDR 0x0D30UL
194#define HW_DOUT_SRAM_BYTES_LEN_REG_ADDR 0x0D34UL
195#define HW_DOUT_SRAM_DMA_BUSY_REG_ADDR 0x0D38UL
196#define HW_READ_ALIGN_REG_ADDR 0x0D3CUL
197#define HW_READ_LAST_DATA_REG_ADDR 0x0D44UL
198#define HW_RC4_THRU_CPU_REG_ADDR 0x0D4CUL
199#define HW_AHB_SINGLE_REG_ADDR 0x0E00UL
200#define HW_SRAM_DATA_REG_ADDR 0x0F00UL
201#define HW_SRAM_ADDR_REG_ADDR 0x0F04UL
202#define HW_SRAM_DATA_READY_REG_ADDR 0x0F08UL
203#define HW_HOST_IRR_REG_ADDR 0x0A00UL
204#define HW_HOST_IMR_REG_ADDR 0x0A04UL
205#define HW_HOST_ICR_REG_ADDR 0x0A08UL
206#define HW_HOST_SEP_SRAM_THRESHOLD_REG_ADDR 0x0A10UL
207#define HW_HOST_SEP_BUSY_REG_ADDR 0x0A14UL
208#define HW_HOST_SEP_LCS_REG_ADDR 0x0A18UL
209#define HW_HOST_CC_SW_RST_REG_ADDR 0x0A40UL
210#define HW_HOST_SEP_SW_RST_REG_ADDR 0x0A44UL
211#define HW_HOST_FLOW_DMA_SW_INT0_REG_ADDR 0x0A80UL
212#define HW_HOST_FLOW_DMA_SW_INT1_REG_ADDR 0x0A84UL
213#define HW_HOST_FLOW_DMA_SW_INT2_REG_ADDR 0x0A88UL
214#define HW_HOST_FLOW_DMA_SW_INT3_REG_ADDR 0x0A8cUL
215#define HW_HOST_FLOW_DMA_SW_INT4_REG_ADDR 0x0A90UL
216#define HW_HOST_FLOW_DMA_SW_INT5_REG_ADDR 0x0A94UL
217#define HW_HOST_FLOW_DMA_SW_INT6_REG_ADDR 0x0A98UL
218#define HW_HOST_FLOW_DMA_SW_INT7_REG_ADDR 0x0A9cUL
219#define HW_HOST_SEP_HOST_GPR0_REG_ADDR 0x0B00UL
220#define HW_HOST_SEP_HOST_GPR1_REG_ADDR 0x0B04UL
221#define HW_HOST_SEP_HOST_GPR2_REG_ADDR 0x0B08UL
222#define HW_HOST_SEP_HOST_GPR3_REG_ADDR 0x0B0CUL
223#define HW_HOST_HOST_SEP_GPR0_REG_ADDR 0x0B80UL
224#define HW_HOST_HOST_SEP_GPR1_REG_ADDR 0x0B84UL
225#define HW_HOST_HOST_SEP_GPR2_REG_ADDR 0x0B88UL
226#define HW_HOST_HOST_SEP_GPR3_REG_ADDR 0x0B8CUL
227#define HW_HOST_HOST_ENDIAN_REG_ADDR 0x0B90UL
228#define HW_HOST_HOST_COMM_CLK_EN_REG_ADDR 0x0B94UL
229#define HW_CLR_SRAM_BUSY_REG_REG_ADDR 0x0F0CUL
230#define HW_CC_SRAM_BASE_ADDRESS 0x5800UL
231
232#endif /* ifndef HW_DEFS */