aboutsummaryrefslogtreecommitdiffstats
path: root/security/tf_driver/tf_comm_tz.c
diff options
context:
space:
mode:
Diffstat (limited to 'security/tf_driver/tf_comm_tz.c')
-rw-r--r--security/tf_driver/tf_comm_tz.c885
1 files changed, 885 insertions, 0 deletions
diff --git a/security/tf_driver/tf_comm_tz.c b/security/tf_driver/tf_comm_tz.c
new file mode 100644
index 00000000000..4c89de84acc
--- /dev/null
+++ b/security/tf_driver/tf_comm_tz.c
@@ -0,0 +1,885 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#include <asm/div64.h>
21#include <asm/system.h>
22#include <linux/version.h>
23#include <asm/cputype.h>
24#include <linux/interrupt.h>
25#include <linux/page-flags.h>
26#include <linux/pagemap.h>
27#include <linux/vmalloc.h>
28#include <linux/jiffies.h>
29
30#include "tf_defs.h"
31#include "tf_comm.h"
32#include "tf_protocol.h"
33#include "tf_util.h"
34#include "tf_conn.h"
35
36/*
37 * Structure common to all SMC operations
38 */
39struct tf_generic_smc {
40 u32 reg0;
41 u32 reg1;
42 u32 reg2;
43 u32 reg3;
44 u32 reg4;
45};
46
47/*----------------------------------------------------------------------------
48 * SMC operations
49 *----------------------------------------------------------------------------*/
50
51static inline void tf_smc_generic_call(
52 struct tf_generic_smc *generic_smc)
53{
54#ifdef CONFIG_SMP
55 long ret;
56 cpumask_t saved_cpu_mask;
57 cpumask_t local_cpu_mask = CPU_MASK_NONE;
58
59 cpu_set(0, local_cpu_mask);
60 sched_getaffinity(0, &saved_cpu_mask);
61 ret = sched_setaffinity(0, &local_cpu_mask);
62 if (ret != 0)
63 dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
64#endif
65
66 __asm__ volatile(
67 "mov r0, %2\n"
68 "mov r1, %3\n"
69 "mov r2, %4\n"
70 "mov r3, %5\n"
71 "mov r4, %6\n"
72 ".word 0xe1600070 @ SMC 0\n"
73 "mov %0, r0\n"
74 "mov %1, r1\n"
75 : "=r" (generic_smc->reg0), "=r" (generic_smc->reg1)
76 : "r" (generic_smc->reg0), "r" (generic_smc->reg1),
77 "r" (generic_smc->reg2), "r" (generic_smc->reg3),
78 "r" (generic_smc->reg4)
79 : "r0", "r1", "r2", "r3", "r4");
80
81#ifdef CONFIG_SMP
82 ret = sched_setaffinity(0, &saved_cpu_mask);
83 if (ret != 0)
84 dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
85#endif
86}
87
88/*
89 * Calls the get protocol version SMC.
90 * Fills the parameter pProtocolVersion with the version number returned by the
91 * SMC
92 */
93static inline void tf_smc_get_protocol_version(u32 *protocol_version)
94{
95 struct tf_generic_smc generic_smc;
96
97 generic_smc.reg0 = TF_SMC_GET_PROTOCOL_VERSION;
98 generic_smc.reg1 = 0;
99 generic_smc.reg2 = 0;
100 generic_smc.reg3 = 0;
101 generic_smc.reg4 = 0;
102
103 tf_smc_generic_call(&generic_smc);
104 *protocol_version = generic_smc.reg1;
105}
106
107
108/*
109 * Calls the init SMC with the specified parameters.
110 * Returns zero upon successful completion, or an appropriate error code upon
111 * failure.
112 */
113static inline int tf_smc_init(u32 shared_page_descriptor)
114{
115 struct tf_generic_smc generic_smc;
116
117 generic_smc.reg0 = TF_SMC_INIT;
118 /* Descriptor for the layer 1 shared buffer */
119 generic_smc.reg1 = shared_page_descriptor;
120 generic_smc.reg2 = 0;
121 generic_smc.reg3 = 0;
122 generic_smc.reg4 = 0;
123
124 tf_smc_generic_call(&generic_smc);
125 if (generic_smc.reg0 != S_SUCCESS)
126 printk(KERN_ERR "tf_smc_init:"
127 " r0=0x%08X upon return (expected 0x%08X)!\n",
128 generic_smc.reg0,
129 S_SUCCESS);
130
131 return generic_smc.reg0;
132}
133
134
135/*
136 * Calls the reset irq SMC.
137 */
138static inline void tf_smc_reset_irq(void)
139{
140 struct tf_generic_smc generic_smc;
141
142 generic_smc.reg0 = TF_SMC_RESET_IRQ;
143 generic_smc.reg1 = 0;
144 generic_smc.reg2 = 0;
145 generic_smc.reg3 = 0;
146 generic_smc.reg4 = 0;
147
148 tf_smc_generic_call(&generic_smc);
149}
150
151
152/*
153 * Calls the WAKE_UP SMC.
154 * Returns zero upon successful completion, or an appropriate error code upon
155 * failure.
156 */
157static inline int tf_smc_wake_up(u32 l1_shared_buffer_descriptor,
158 u32 shared_mem_start_offset,
159 u32 shared_mem_size)
160{
161 struct tf_generic_smc generic_smc;
162
163 generic_smc.reg0 = TF_SMC_WAKE_UP;
164 generic_smc.reg1 = shared_mem_start_offset;
165 /* long form command */
166 generic_smc.reg2 = shared_mem_size | 0x80000000;
167 generic_smc.reg3 = l1_shared_buffer_descriptor;
168 generic_smc.reg4 = 0;
169
170 tf_smc_generic_call(&generic_smc);
171
172 if (generic_smc.reg0 != S_SUCCESS)
173 printk(KERN_ERR "tf_smc_wake_up:"
174 " r0=0x%08X upon return (expected 0x%08X)!\n",
175 generic_smc.reg0,
176 S_SUCCESS);
177
178 return generic_smc.reg0;
179}
180
181/*
182 * Calls the N-Yield SMC.
183 */
184static inline void tf_smc_nyield(void)
185{
186 struct tf_generic_smc generic_smc;
187
188 generic_smc.reg0 = TF_SMC_N_YIELD;
189 generic_smc.reg1 = 0;
190 generic_smc.reg2 = 0;
191 generic_smc.reg3 = 0;
192 generic_smc.reg4 = 0;
193
194 tf_smc_generic_call(&generic_smc);
195}
196
197/* Yields the Secure World */
198int tf_schedule_secure_world(struct tf_comm *comm)
199{
200 tf_set_current_time(comm);
201
202 /* yield to the Secure World */
203 tf_smc_nyield();
204
205 return 0;
206}
207
208/*
209 * Returns the L2 descriptor for the specified user page.
210 */
211
212#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
213#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
214
215static u32 tf_get_l2init_descriptor(u32 vaddr)
216{
217 struct page *page;
218 u32 paddr;
219 u32 descriptor;
220
221 descriptor = L2_INIT_DESCRIPTOR_BASE;
222
223 /* get physical address and add to descriptor */
224 page = virt_to_page(vaddr);
225 paddr = page_to_phys(page);
226 descriptor |= (paddr & L2_DESCRIPTOR_ADDR_MASK);
227
228 /* Add virtual address v[13:12] bits to descriptor */
229 descriptor |= (DESCRIPTOR_V13_12_GET(vaddr)
230 << L2_INIT_DESCRIPTOR_V13_12_SHIFT);
231
232 descriptor |= tf_get_l2_descriptor_common(vaddr, &init_mm);
233
234
235 return descriptor;
236}
237
238
239/*----------------------------------------------------------------------------
240 * Power management
241 *----------------------------------------------------------------------------*/
242
243/*
244 * Free the memory used by the W3B buffer for the specified comm.
245 * This function does nothing if no W3B buffer is allocated for the device.
246 */
247static inline void tf_free_w3b(struct tf_comm *comm)
248{
249 tf_cleanup_shared_memory(
250 &(comm->w3b_cpt_alloc_context),
251 &(comm->w3b_shmem_desc),
252 0);
253
254 tf_release_coarse_page_table_allocator(&(comm->w3b_cpt_alloc_context));
255
256 internal_vfree((void *)comm->w3b);
257 comm->w3b = 0;
258 comm->w3b_shmem_size = 0;
259 clear_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
260}
261
262
263/*
264 * Allocates the W3B buffer for the specified comm.
265 * Returns zero upon successful completion, or an appropriate error code upon
266 * failure.
267 */
268static inline int tf_allocate_w3b(struct tf_comm *comm)
269{
270 int error;
271 u32 flags;
272 u32 config_flag_s;
273 u32 *w3b_descriptors;
274 u32 w3b_descriptor_count;
275 u32 w3b_current_size;
276
277 config_flag_s = tf_read_reg32(&comm->l1_buffer->config_flag_s);
278
279retry:
280 if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags))) == 0) {
281 /*
282 * Initialize the shared memory for the W3B
283 */
284 tf_init_coarse_page_table_allocator(
285 &comm->w3b_cpt_alloc_context);
286 } else {
287 /*
288 * The W3B is allocated but do we have to reallocate a bigger
289 * one?
290 */
291 /* Check H bit */
292 if ((config_flag_s & (1<<4)) != 0) {
293 /* The size of the W3B may change after SMC_INIT */
294 /* Read the current value */
295 w3b_current_size = tf_read_reg32(
296 &comm->l1_buffer->w3b_size_current_s);
297 if (comm->w3b_shmem_size > w3b_current_size)
298 return 0;
299
300 tf_free_w3b(comm);
301 goto retry;
302 } else {
303 return 0;
304 }
305 }
306
307 /* check H bit */
308 if ((config_flag_s & (1<<4)) != 0)
309 /* The size of the W3B may change after SMC_INIT */
310 /* Read the current value */
311 comm->w3b_shmem_size = tf_read_reg32(
312 &comm->l1_buffer->w3b_size_current_s);
313 else
314 comm->w3b_shmem_size = tf_read_reg32(
315 &comm->l1_buffer->w3b_size_max_s);
316
317 comm->w3b = (u32) internal_vmalloc(comm->w3b_shmem_size);
318 if (comm->w3b == 0) {
319 printk(KERN_ERR "tf_allocate_w3b():"
320 " Out of memory for W3B buffer (%u bytes)!\n",
321 (unsigned int)(comm->w3b_shmem_size));
322 error = -ENOMEM;
323 goto error;
324 }
325
326 /* initialize the w3b_shmem_desc structure */
327 comm->w3b_shmem_desc.type = TF_SHMEM_TYPE_PM_HIBERNATE;
328 INIT_LIST_HEAD(&(comm->w3b_shmem_desc.list));
329
330 flags = (TF_SHMEM_TYPE_READ | TF_SHMEM_TYPE_WRITE);
331
332 /* directly point to the L1 shared buffer W3B descriptors */
333 w3b_descriptors = comm->l1_buffer->w3b_descriptors;
334
335 /*
336 * tf_fill_descriptor_table uses the following parameter as an
337 * IN/OUT
338 */
339
340 error = tf_fill_descriptor_table(
341 &(comm->w3b_cpt_alloc_context),
342 &(comm->w3b_shmem_desc),
343 comm->w3b,
344 NULL,
345 w3b_descriptors,
346 comm->w3b_shmem_size,
347 &(comm->w3b_shmem_offset),
348 false,
349 flags,
350 &w3b_descriptor_count);
351 if (error != 0) {
352 printk(KERN_ERR "tf_allocate_w3b():"
353 " tf_fill_descriptor_table failed with "
354 "error code 0x%08x!\n",
355 error);
356 goto error;
357 }
358
359 set_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
360
361 /* successful completion */
362 return 0;
363
364error:
365 tf_free_w3b(comm);
366
367 return error;
368}
369
370/*
371 * Perform a Secure World shutdown operation.
372 * The routine does not return if the operation succeeds.
373 * the routine returns an appropriate error code if
374 * the operation fails.
375 */
376int tf_pm_shutdown(struct tf_comm *comm)
377{
378#ifdef CONFIG_TFN
379 /* this function is useless for the TEGRA product */
380 return 0;
381#else
382 int error;
383 union tf_command command;
384 union tf_answer answer;
385
386 dprintk(KERN_INFO "tf_pm_shutdown()\n");
387
388 memset(&command, 0, sizeof(command));
389
390 command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
391 command.header.message_size =
392 (sizeof(struct tf_command_management) -
393 sizeof(struct tf_command_header))/sizeof(u32);
394
395 command.management.command = TF_MANAGEMENT_SHUTDOWN;
396
397 error = tf_send_receive(
398 comm,
399 &command,
400 &answer,
401 NULL,
402 false);
403
404 if (error != 0) {
405 dprintk(KERN_ERR "tf_pm_shutdown(): "
406 "tf_send_receive failed (error %d)!\n",
407 error);
408 return error;
409 }
410
411#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
412 if (answer.header.error_code != 0)
413 dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
414 else
415 dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
416#endif
417
418 return answer.header.error_code;
419#endif
420}
421
422
423/*
424 * Perform a Secure World hibernate operation.
425 * The routine does not return if the operation succeeds.
426 * the routine returns an appropriate error code if
427 * the operation fails.
428 */
429int tf_pm_hibernate(struct tf_comm *comm)
430{
431#ifdef CONFIG_TFN
432 /* this function is useless for the TEGRA product */
433 return 0;
434#else
435 int error;
436 union tf_command command;
437 union tf_answer answer;
438 u32 first_command;
439 u32 first_free_command;
440
441 dprintk(KERN_INFO "tf_pm_hibernate()\n");
442
443 error = tf_allocate_w3b(comm);
444 if (error != 0) {
445 dprintk(KERN_ERR "tf_pm_hibernate(): "
446 "tf_allocate_w3b failed (error %d)!\n",
447 error);
448 return error;
449 }
450
451 /*
452 * As the polling thread is already hibernating, we
453 * should send the message and receive the answer ourself
454 */
455
456 /* build the "prepare to hibernate" message */
457 command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
458 command.management.command = TF_MANAGEMENT_HIBERNATE;
459 /* Long Form Command */
460 command.management.shared_mem_descriptors[0] = 0;
461 command.management.shared_mem_descriptors[1] = 0;
462 command.management.w3b_size =
463 comm->w3b_shmem_size | 0x80000000;
464 command.management.w3b_start_offset =
465 comm->w3b_shmem_offset;
466 command.header.operation_id = (u32) &answer;
467
468 tf_dump_command(&command);
469
470 /* find a slot to send the message in */
471
472 /* AFY: why not use the function tf_send_receive?? We are
473 * duplicating a lot of subtle code here. And it's not going to be
474 * tested because power management is currently not supported by the
475 * secure world. */
476 for (;;) {
477 int queue_words_count, command_size;
478
479 spin_lock(&(comm->lock));
480
481 first_command = tf_read_reg32(
482 &comm->l1_buffer->first_command);
483 first_free_command = tf_read_reg32(
484 &comm->l1_buffer->first_free_command);
485
486 queue_words_count = first_free_command - first_command;
487 command_size = command.header.message_size
488 + sizeof(struct tf_command_header);
489 if ((queue_words_count + command_size) <
490 TF_N_MESSAGE_QUEUE_CAPACITY) {
491 /* Command queue is not full */
492 memcpy(&comm->l1_buffer->command_queue[
493 first_free_command %
494 TF_N_MESSAGE_QUEUE_CAPACITY],
495 &command,
496 command_size * sizeof(u32));
497
498 tf_write_reg32(&comm->l1_buffer->first_free_command,
499 first_free_command + command_size);
500
501 spin_unlock(&(comm->lock));
502 break;
503 }
504
505 spin_unlock(&(comm->lock));
506 (void)tf_schedule_secure_world(comm);
507 }
508
509 /* now wait for the answer, dispatching other answers */
510 while (1) {
511 u32 first_answer;
512 u32 first_free_answer;
513
514 /* check all the answers */
515 first_free_answer = tf_read_reg32(
516 &comm->l1_buffer->first_free_answer);
517 first_answer = tf_read_reg32(
518 &comm->l1_buffer->first_answer);
519
520 if (first_answer != first_free_answer) {
521 int bFoundAnswer = 0;
522
523 do {
524 /* answer queue not empty */
525 union tf_answer tmp_answer;
526 struct tf_answer_header header;
527 /* size of the command in words of 32bit */
528 int command_size;
529
530 /* get the message_size */
531 memcpy(&header,
532 &comm->l1_buffer->answer_queue[
533 first_answer %
534 TF_S_ANSWER_QUEUE_CAPACITY],
535 sizeof(struct tf_answer_header));
536 command_size = header.message_size +
537 sizeof(struct tf_answer_header);
538
539 /*
540 * NOTE: message_size is the number of words
541 * following the first word
542 */
543 memcpy(&tmp_answer,
544 &comm->l1_buffer->answer_queue[
545 first_answer %
546 TF_S_ANSWER_QUEUE_CAPACITY],
547 command_size * sizeof(u32));
548
549 tf_dump_answer(&tmp_answer);
550
551 if (tmp_answer.header.operation_id ==
552 (u32) &answer) {
553 /*
554 * this is the answer to the "prepare to
555 * hibernate" message
556 */
557 memcpy(&answer,
558 &tmp_answer,
559 command_size * sizeof(u32));
560
561 bFoundAnswer = 1;
562 tf_write_reg32(
563 &comm->l1_buffer->first_answer,
564 first_answer + command_size);
565 break;
566 } else {
567 /*
568 * this is a standard message answer,
569 * dispatch it
570 */
571 struct tf_answer_struct
572 *answerStructure;
573
574 answerStructure =
575 (struct tf_answer_struct *)
576 tmp_answer.header.operation_id;
577
578 memcpy(answerStructure->answer,
579 &tmp_answer,
580 command_size * sizeof(u32));
581
582 answerStructure->answer_copied = true;
583 }
584
585 tf_write_reg32(
586 &comm->l1_buffer->first_answer,
587 first_answer + command_size);
588 } while (first_answer != first_free_answer);
589
590 if (bFoundAnswer)
591 break;
592 }
593
594 /*
595 * since the Secure World is at least running the "prepare to
596 * hibernate" message, its timeout must be immediate So there is
597 * no need to check its timeout and schedule() the current
598 * thread
599 */
600 (void)tf_schedule_secure_world(comm);
601 } /* while (1) */
602
603 printk(KERN_INFO "tf_driver: hibernate.\n");
604 return 0;
605#endif
606}
607
608
609/*
610 * Perform a Secure World resume operation.
611 * The routine returns once the Secure World is active again
612 * or if an error occurs during the "resume" process
613 */
614int tf_pm_resume(struct tf_comm *comm)
615{
616#ifdef CONFIG_TFN
617 /* this function is useless for the TEGRA product */
618 return 0;
619#else
620 int error;
621 u32 status;
622
623 dprintk(KERN_INFO "tf_pm_resume()\n");
624
625 error = tf_smc_wake_up(
626 tf_get_l2init_descriptor((u32)comm->l1_buffer),
627 comm->w3b_shmem_offset,
628 comm->w3b_shmem_size);
629
630 if (error != 0) {
631 dprintk(KERN_ERR "tf_pm_resume(): "
632 "tf_smc_wake_up failed (error %d)!\n",
633 error);
634 return error;
635 }
636
637 status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
638 & TF_STATUS_POWER_STATE_MASK)
639 >> TF_STATUS_POWER_STATE_SHIFT);
640
641 while ((status != TF_POWER_MODE_ACTIVE)
642 && (status != TF_POWER_MODE_PANIC)) {
643 tf_smc_nyield();
644
645 status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
646 & TF_STATUS_POWER_STATE_MASK)
647 >> TF_STATUS_POWER_STATE_SHIFT);
648
649 /*
650 * As this may last quite a while, call the kernel scheduler to
651 * hand over CPU for other operations
652 */
653 schedule();
654 }
655
656 switch (status) {
657 case TF_POWER_MODE_ACTIVE:
658 break;
659
660 case TF_POWER_MODE_PANIC:
661 dprintk(KERN_ERR "tf_pm_resume(): "
662 "Secure World POWER_MODE_PANIC!\n");
663 return -EINVAL;
664
665 default:
666 dprintk(KERN_ERR "tf_pm_resume(): "
667 "unexpected Secure World POWER_MODE (%d)!\n", status);
668 return -EINVAL;
669 }
670
671 dprintk(KERN_INFO "tf_pm_resume() succeeded\n");
672 return 0;
673#endif
674}
675
676/*----------------------------------------------------------------------------
677 * Communication initialization and termination
678 *----------------------------------------------------------------------------*/
679
680/*
681 * Handles the software interrupts issued by the Secure World.
682 */
683static irqreturn_t tf_soft_int_handler(int irq, void *dev_id)
684{
685 struct tf_comm *comm = (struct tf_comm *) dev_id;
686
687 if (comm->l1_buffer == NULL)
688 return IRQ_NONE;
689
690 if ((tf_read_reg32(&comm->l1_buffer->status_s) &
691 TF_STATUS_P_MASK) == 0)
692 /* interrupt not issued by the Trusted Foundations Software */
693 return IRQ_NONE;
694
695 tf_smc_reset_irq();
696
697 /* signal N_SM_EVENT */
698 wake_up(&comm->wait_queue);
699
700 return IRQ_HANDLED;
701}
702
703/*
704 * Initializes the communication with the Secure World.
705 * The L1 shared buffer is allocated and the Secure World
706 * is yielded for the first time.
707 * returns successfuly once the communication with
708 * the Secure World is up and running
709 *
710 * Returns 0 upon success or appropriate error code
711 * upon failure
712 */
713int tf_init(struct tf_comm *comm)
714{
715 int error;
716 struct page *buffer_page;
717 u32 protocol_version;
718
719 dprintk(KERN_INFO "tf_init()\n");
720
721 spin_lock_init(&(comm->lock));
722 comm->flags = 0;
723 comm->l1_buffer = NULL;
724 init_waitqueue_head(&(comm->wait_queue));
725
726 /*
727 * Check the Secure World protocol version is the expected one.
728 */
729 tf_smc_get_protocol_version(&protocol_version);
730
731 if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
732 != TF_S_PROTOCOL_MAJOR_VERSION) {
733 printk(KERN_ERR "tf_init():"
734 " Unsupported Secure World Major Version "
735 "(0x%02X, expected 0x%02X)!\n",
736 GET_PROTOCOL_MAJOR_VERSION(protocol_version),
737 TF_S_PROTOCOL_MAJOR_VERSION);
738 error = -EIO;
739 goto error;
740 }
741
742 /*
743 * Register the software interrupt handler if required to.
744 */
745 if (comm->soft_int_irq != -1) {
746 dprintk(KERN_INFO "tf_init(): "
747 "Registering software interrupt handler (IRQ %d)\n",
748 comm->soft_int_irq);
749
750 error = request_irq(comm->soft_int_irq,
751 tf_soft_int_handler,
752 IRQF_SHARED,
753 TF_DEVICE_BASE_NAME,
754 comm);
755 if (error != 0) {
756 dprintk(KERN_ERR "tf_init(): "
757 "request_irq failed for irq %d (error %d)\n",
758 comm->soft_int_irq, error);
759 goto error;
760 }
761 set_bit(TF_COMM_FLAG_IRQ_REQUESTED, &(comm->flags));
762 }
763
764 /*
765 * Allocate and initialize the L1 shared buffer.
766 */
767 comm->l1_buffer = (void *) internal_get_zeroed_page(GFP_KERNEL);
768 if (comm->l1_buffer == NULL) {
769 printk(KERN_ERR "tf_init():"
770 " get_zeroed_page failed for L1 shared buffer!\n");
771 error = -ENOMEM;
772 goto error;
773 }
774
775 /*
776 * Ensure the page storing the L1 shared buffer is mapped.
777 */
778 buffer_page = virt_to_page(comm->l1_buffer);
779 trylock_page(buffer_page);
780
781 dprintk(KERN_INFO "tf_init(): "
782 "L1 shared buffer allocated at virtual:%p, "
783 "physical:%p (page:%p)\n",
784 comm->l1_buffer,
785 (void *)virt_to_phys(comm->l1_buffer),
786 buffer_page);
787
788 set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags));
789
790 /*
791 * Init SMC
792 */
793 error = tf_smc_init(
794 tf_get_l2init_descriptor((u32)comm->l1_buffer));
795 if (error != S_SUCCESS) {
796 dprintk(KERN_ERR "tf_init(): "
797 "tf_smc_init failed (error 0x%08X)!\n",
798 error);
799 goto error;
800 }
801
802 /*
803 * check whether the interrupts are actually enabled
804 * If not, remove irq handler
805 */
806 if ((tf_read_reg32(&comm->l1_buffer->config_flag_s) &
807 TF_CONFIG_FLAG_S) == 0) {
808 if (test_and_clear_bit(TF_COMM_FLAG_IRQ_REQUESTED,
809 &(comm->flags)) != 0) {
810 dprintk(KERN_INFO "tf_init(): "
811 "Interrupts not used, unregistering "
812 "softint (IRQ %d)\n",
813 comm->soft_int_irq);
814
815 free_irq(comm->soft_int_irq, comm);
816 }
817 } else {
818 if (test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
819 &(comm->flags)) == 0) {
820 /*
821 * Interrupts are enabled in the Secure World, but not
822 * handled by driver
823 */
824 dprintk(KERN_ERR "tf_init(): "
825 "soft_interrupt argument not provided\n");
826 error = -EINVAL;
827 goto error;
828 }
829 }
830
831 /*
832 * Successful completion.
833 */
834
835 /* yield for the first time */
836 (void)tf_schedule_secure_world(comm);
837
838 dprintk(KERN_INFO "tf_init(): Success\n");
839 return S_SUCCESS;
840
841error:
842 /*
843 * Error handling.
844 */
845 dprintk(KERN_INFO "tf_init(): Failure (error %d)\n",
846 error);
847 tf_terminate(comm);
848 return error;
849}
850
851
852/*
853 * Attempt to terminate the communication with the Secure World.
854 * The L1 shared buffer is freed.
855 * Calling this routine terminates definitaly the communication
856 * with the Secure World : there is no way to inform the Secure World of a new
857 * L1 shared buffer to be used once it has been initialized.
858 */
859void tf_terminate(struct tf_comm *comm)
860{
861 dprintk(KERN_INFO "tf_terminate()\n");
862
863 set_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags));
864
865 if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED,
866 &(comm->flags))) != 0) {
867 dprintk(KERN_INFO "tf_terminate(): "
868 "Freeing the W3B buffer...\n");
869 tf_free_w3b(comm);
870 }
871
872 if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
873 &(comm->flags))) != 0) {
874 __clear_page_locked(virt_to_page(comm->l1_buffer));
875 internal_free_page((unsigned long) comm->l1_buffer);
876 }
877
878 if ((test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
879 &(comm->flags))) != 0) {
880 dprintk(KERN_INFO "tf_terminate(): "
881 "Unregistering softint (IRQ %d)\n",
882 comm->soft_int_irq);
883 free_irq(comm->soft_int_irq, comm);
884 }
885}