aboutsummaryrefslogtreecommitdiffstats
path: root/security/tf_driver
diff options
context:
space:
mode:
Diffstat (limited to 'security/tf_driver')
-rw-r--r--security/tf_driver/Kconfig8
-rw-r--r--security/tf_driver/Makefile36
-rw-r--r--security/tf_driver/s_version.h92
-rw-r--r--security/tf_driver/tf_comm.c1745
-rw-r--r--security/tf_driver/tf_comm.h202
-rw-r--r--security/tf_driver/tf_comm_tz.c885
-rw-r--r--security/tf_driver/tf_conn.c1574
-rw-r--r--security/tf_driver/tf_conn.h106
-rw-r--r--security/tf_driver/tf_defs.h538
-rw-r--r--security/tf_driver/tf_device.c796
-rw-r--r--security/tf_driver/tf_protocol.h690
-rw-r--r--security/tf_driver/tf_util.c1143
-rw-r--r--security/tf_driver/tf_util.h122
13 files changed, 7937 insertions, 0 deletions
diff --git a/security/tf_driver/Kconfig b/security/tf_driver/Kconfig
new file mode 100644
index 00000000000..2a980c5ade4
--- /dev/null
+++ b/security/tf_driver/Kconfig
@@ -0,0 +1,8 @@
1config TRUSTED_FOUNDATIONS
2 bool "Enable TF Driver"
3 default n
4 select CRYPTO_SHA1
5 help
6 This option adds kernel support for communication with the Trusted Foundations.
7 If you are unsure how to answer this question, answer N.
8
diff --git a/security/tf_driver/Makefile b/security/tf_driver/Makefile
new file mode 100644
index 00000000000..dfadb7d9740
--- /dev/null
+++ b/security/tf_driver/Makefile
@@ -0,0 +1,36 @@
1#
2# Copyright (c) 2006-2010 Trusted Logic S.A.
3# All Rights Reserved.
4#
5# This program is free software; you can redistribute it and/or
6# modify it under the terms of the GNU General Public License as
7# published by the Free Software Foundation; either version 2 of
8# the License, or (at your option) any later version.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License
16# along with this program; if not, write to the Free Software
17# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
18# MA 02111-1307 USA
19#
20
21# debug options
22#EXTRA_CFLAGS += -O0 -DDEBUG -D_DEBUG -DCONFIG_TF_DRIVER_DEBUG_SUPPORT
23EXTRA_CFLAGS += -DNDEBUG
24EXTRA_CFLAGS += -DLINUX -DCONFIG_TF_TRUSTZONE -DCONFIG_TFN
25
26ifdef S_VERSION_BUILD
27EXTRA_CFLAGS += -DS_VERSION_BUILD=$(S_VERSION_BUILD)
28endif
29
30tf_driver-objs += tf_util.o
31tf_driver-objs += tf_conn.o
32tf_driver-objs += tf_device.o
33tf_driver-objs += tf_comm.o
34tf_driver-objs += tf_comm_tz.o
35
36obj-$(CONFIG_TRUSTED_FOUNDATIONS) += tf_driver.o
diff --git a/security/tf_driver/s_version.h b/security/tf_driver/s_version.h
new file mode 100644
index 00000000000..6244d3fe7e8
--- /dev/null
+++ b/security/tf_driver/s_version.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#ifndef __S_VERSION_H__
21#define __S_VERSION_H__
22
23/*
24 * Usage: define S_VERSION_BUILD on the compiler's command line.
25 *
26 * Then set:
27 * - S_VERSION_OS
28 * - S_VERSION_PLATFORM
29 * - S_VERSION_MAIN
30 * - S_VERSION_ENG is optional
31 * - S_VERSION_PATCH is optional
32 * - S_VERSION_BUILD = 0 if S_VERSION_BUILD not defined or empty
33 */
34
35#define S_VERSION_OS "A" /* "A" for all Android */
36#define S_VERSION_PLATFORM "B" /* "B" for Tegra3 */
37
38/*
39 * This version number must be updated for each new release
40 */
41#define S_VERSION_MAIN "01.03"
42
43/*
44* If this is a patch or engineering version use the following
45* defines to set the version number. Else set these values to 0.
46*/
47#define S_VERSION_ENG 0
48#define S_VERSION_PATCH 0
49
50#ifdef S_VERSION_BUILD
51/* TRICK: detect if S_VERSION is defined but empty */
52#if 0 == S_VERSION_BUILD-0
53#undef S_VERSION_BUILD
54#define S_VERSION_BUILD 0
55#endif
56#else
57/* S_VERSION_BUILD is not defined */
58#define S_VERSION_BUILD 0
59#endif
60
61#define __STRINGIFY(X) #X
62#define __STRINGIFY2(X) __STRINGIFY(X)
63
64#if S_VERSION_ENG != 0
65#define _S_VERSION_ENG "e" __STRINGIFY2(S_VERSION_ENG)
66#else
67#define _S_VERSION_ENG ""
68#endif
69
70#if S_VERSION_PATCH != 0
71#define _S_VERSION_PATCH "p" __STRINGIFY2(S_VERSION_PATCH)
72#else
73#define _S_VERSION_PATCH ""
74#endif
75
76#if !defined(NDEBUG) || defined(_DEBUG)
77#define S_VERSION_VARIANT "D "
78#else
79#define S_VERSION_VARIANT " "
80#endif
81
82#define S_VERSION_STRING \
83 "TFN" \
84 S_VERSION_OS \
85 S_VERSION_PLATFORM \
86 S_VERSION_MAIN \
87 _S_VERSION_ENG \
88 _S_VERSION_PATCH \
89 "." __STRINGIFY2(S_VERSION_BUILD) " " \
90 S_VERSION_VARIANT
91
92#endif /* __S_VERSION_H__ */
diff --git a/security/tf_driver/tf_comm.c b/security/tf_driver/tf_comm.c
new file mode 100644
index 00000000000..16915beb406
--- /dev/null
+++ b/security/tf_driver/tf_comm.c
@@ -0,0 +1,1745 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#include <asm/div64.h>
21#include <asm/system.h>
22#include <linux/version.h>
23#include <asm/cputype.h>
24#include <linux/interrupt.h>
25#include <linux/page-flags.h>
26#include <linux/pagemap.h>
27#include <linux/vmalloc.h>
28#include <linux/jiffies.h>
29#include <linux/freezer.h>
30
31#include "tf_defs.h"
32#include "tf_comm.h"
33#include "tf_protocol.h"
34#include "tf_util.h"
35#include "tf_conn.h"
36
37#ifdef CONFIG_TF_ZEBRA
38#include "tf_zebra.h"
39#endif
40
41/*---------------------------------------------------------------------------
42 * Internal Constants
43 *---------------------------------------------------------------------------*/
44
45/*
46 * shared memories descriptor constants
47 */
48#define DESCRIPTOR_B_MASK (1 << 2)
49#define DESCRIPTOR_C_MASK (1 << 3)
50#define DESCRIPTOR_S_MASK (1 << 10)
51
52#define L1_COARSE_DESCRIPTOR_BASE (0x00000001)
53#define L1_COARSE_DESCRIPTOR_ADDR_MASK (0xFFFFFC00)
54#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
55
56#define L2_PAGE_DESCRIPTOR_BASE (0x00000003)
57#define L2_PAGE_DESCRIPTOR_AP_APX_READ (0x220)
58#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
59
60#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
61#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
62
63/*
64 * Reject an attempt to share a strongly-Ordered or Device memory
65 * Strongly-Ordered: TEX=0b000, C=0, B=0
66 * Shared Device: TEX=0b000, C=0, B=1
67 * Non-Shared Device: TEX=0b010, C=0, B=0
68 */
69#define L2_TEX_C_B_MASK \
70 ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
71#define L2_TEX_C_B_STRONGLY_ORDERED \
72 ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
73#define L2_TEX_C_B_SHARED_DEVICE \
74 ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
75#define L2_TEX_C_B_NON_SHARED_DEVICE \
76 ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
77
78#define CACHE_S(x) ((x) & (1 << 24))
79#define CACHE_DSIZE(x) (((x) >> 12) & 4095)
80
81#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
82#define TIME_INFINITE ((u64) 0xFFFFFFFFFFFFFFFFULL)
83
84/*---------------------------------------------------------------------------
85 * atomic operation definitions
86 *---------------------------------------------------------------------------*/
87
88/*
89 * Atomically updates the sync_serial_n and time_n register
90 * sync_serial_n and time_n modifications are thread safe
91 */
92void tf_set_current_time(struct tf_comm *comm)
93{
94 u32 new_sync_serial;
95 struct timeval now;
96 u64 time64;
97
98 /*
99 * lock the structure while updating the L1 shared memory fields
100 */
101 spin_lock(&comm->lock);
102
103 /* read sync_serial_n and change the TimeSlot bit field */
104 new_sync_serial =
105 tf_read_reg32(&comm->l1_buffer->sync_serial_n) + 1;
106
107 do_gettimeofday(&now);
108 time64 = now.tv_sec;
109 time64 = (time64 * 1000) + (now.tv_usec / 1000);
110
111 /* Write the new time64 and nSyncSerial into shared memory */
112 tf_write_reg64(&comm->l1_buffer->time_n[new_sync_serial &
113 TF_SYNC_SERIAL_TIMESLOT_N], time64);
114 tf_write_reg32(&comm->l1_buffer->sync_serial_n,
115 new_sync_serial);
116
117 spin_unlock(&comm->lock);
118}
119
120/*
121 * Performs the specific read timeout operation
122 * The difficulty here is to read atomically 2 u32
123 * values from the L1 shared buffer.
124 * This is guaranteed by reading before and after the operation
125 * the timeslot given by the Secure World
126 */
127static inline void tf_read_timeout(struct tf_comm *comm, u64 *time)
128{
129 u32 sync_serial_s_initial = 0;
130 u32 sync_serial_s_final = 1;
131 u64 time64;
132
133 spin_lock(&comm->lock);
134
135 while (sync_serial_s_initial != sync_serial_s_final) {
136 sync_serial_s_initial = tf_read_reg32(
137 &comm->l1_buffer->sync_serial_s);
138 time64 = tf_read_reg64(
139 &comm->l1_buffer->timeout_s[sync_serial_s_initial&1]);
140
141 sync_serial_s_final = tf_read_reg32(
142 &comm->l1_buffer->sync_serial_s);
143 }
144
145 spin_unlock(&comm->lock);
146
147 *time = time64;
148}
149
150/*----------------------------------------------------------------------------
151 * SIGKILL signal handling
152 *----------------------------------------------------------------------------*/
153
154static bool sigkill_pending(void)
155{
156 if (signal_pending(current)) {
157 dprintk(KERN_INFO "A signal is pending\n");
158 if (sigismember(&current->pending.signal, SIGKILL)) {
159 dprintk(KERN_INFO "A SIGKILL is pending\n");
160 return true;
161 } else if (sigismember(
162 &current->signal->shared_pending.signal, SIGKILL)) {
163 dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
164 return true;
165 }
166 }
167 return false;
168}
169
170/*----------------------------------------------------------------------------
171 * Shared memory related operations
172 *----------------------------------------------------------------------------*/
173
174struct tf_coarse_page_table *tf_alloc_coarse_page_table(
175 struct tf_coarse_page_table_allocation_context *alloc_context,
176 u32 type)
177{
178 struct tf_coarse_page_table *coarse_pg_table = NULL;
179
180 spin_lock(&(alloc_context->lock));
181
182 if (!(list_empty(&(alloc_context->free_coarse_page_tables)))) {
183 /*
184 * The free list can provide us a coarse page table
185 * descriptor
186 */
187 coarse_pg_table = list_first_entry(
188 &alloc_context->free_coarse_page_tables,
189 struct tf_coarse_page_table, list);
190 list_del(&(coarse_pg_table->list));
191
192 coarse_pg_table->parent->ref_count++;
193 } else {
194 /* no array of coarse page tables, create a new one */
195 struct tf_coarse_page_table_array *array;
196 void *page;
197 int i;
198
199 spin_unlock(&(alloc_context->lock));
200
201 /* first allocate a new page descriptor */
202 array = internal_kmalloc(sizeof(*array), GFP_KERNEL);
203 if (array == NULL) {
204 dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
205 " failed to allocate a table array\n",
206 alloc_context);
207 return NULL;
208 }
209
210 array->type = type;
211 INIT_LIST_HEAD(&(array->list));
212
213 /* now allocate the actual page the page descriptor describes */
214 page = (void *) internal_get_zeroed_page(GFP_KERNEL);
215 if (page == NULL) {
216 dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
217 " failed allocate a page\n",
218 alloc_context);
219 internal_kfree(array);
220 return NULL;
221 }
222
223 spin_lock(&(alloc_context->lock));
224
225 /* initialize the coarse page table descriptors */
226 for (i = 0; i < 4; i++) {
227 INIT_LIST_HEAD(&(array->coarse_page_tables[i].list));
228 array->coarse_page_tables[i].descriptors =
229 page + (i * SIZE_1KB);
230 array->coarse_page_tables[i].parent = array;
231
232 if (i == 0) {
233 /*
234 * the first element is kept for the current
235 * coarse page table allocation
236 */
237 coarse_pg_table =
238 &(array->coarse_page_tables[i]);
239 array->ref_count = 1;
240 } else {
241 /*
242 * The other elements are added to the free list
243 */
244 list_add(&(array->coarse_page_tables[i].list),
245 &(alloc_context->
246 free_coarse_page_tables));
247 }
248 }
249
250 list_add(&(array->list),
251 &(alloc_context->coarse_page_table_arrays));
252 }
253 spin_unlock(&(alloc_context->lock));
254
255 return coarse_pg_table;
256}
257
258
259void tf_free_coarse_page_table(
260 struct tf_coarse_page_table_allocation_context *alloc_context,
261 struct tf_coarse_page_table *coarse_pg_table,
262 int force)
263{
264 struct tf_coarse_page_table_array *array;
265
266 spin_lock(&(alloc_context->lock));
267
268 array = coarse_pg_table->parent;
269
270 (array->ref_count)--;
271
272 if (array->ref_count == 0) {
273 /*
274 * no coarse page table descriptor is used
275 * check if we should free the whole page
276 */
277
278 if ((array->type == TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
279 && (force == 0))
280 /*
281 * This is a preallocated page,
282 * add the page back to the free list
283 */
284 list_add(&(coarse_pg_table->list),
285 &(alloc_context->free_coarse_page_tables));
286 else {
287 /*
288 * None of the page's coarse page table descriptors
289 * are in use, free the whole page
290 */
291 int i;
292 u32 *descriptors;
293
294 /*
295 * remove the page's associated coarse page table
296 * descriptors from the free list
297 */
298 for (i = 0; i < 4; i++)
299 if (&(array->coarse_page_tables[i]) !=
300 coarse_pg_table)
301 list_del(&(array->
302 coarse_page_tables[i].list));
303
304 descriptors =
305 array->coarse_page_tables[0].descriptors;
306 array->coarse_page_tables[0].descriptors = NULL;
307
308 /* remove the coarse page table from the array */
309 list_del(&(array->list));
310
311 spin_unlock(&(alloc_context->lock));
312 /*
313 * Free the page.
314 * The address of the page is contained in the first
315 * element
316 */
317 internal_free_page((unsigned long) descriptors);
318 /* finaly free the array */
319 internal_kfree(array);
320
321 spin_lock(&(alloc_context->lock));
322 }
323 } else {
324 /*
325 * Some coarse page table descriptors are in use.
326 * Add the descriptor to the free list
327 */
328 list_add(&(coarse_pg_table->list),
329 &(alloc_context->free_coarse_page_tables));
330 }
331
332 spin_unlock(&(alloc_context->lock));
333}
334
335
336void tf_init_coarse_page_table_allocator(
337 struct tf_coarse_page_table_allocation_context *alloc_context)
338{
339 spin_lock_init(&(alloc_context->lock));
340 INIT_LIST_HEAD(&(alloc_context->coarse_page_table_arrays));
341 INIT_LIST_HEAD(&(alloc_context->free_coarse_page_tables));
342}
343
344void tf_release_coarse_page_table_allocator(
345 struct tf_coarse_page_table_allocation_context *alloc_context)
346{
347 spin_lock(&(alloc_context->lock));
348
349 /* now clean up the list of page descriptors */
350 while (!list_empty(&(alloc_context->coarse_page_table_arrays))) {
351 struct tf_coarse_page_table_array *page_desc;
352 u32 *descriptors;
353
354 page_desc = list_first_entry(
355 &alloc_context->coarse_page_table_arrays,
356 struct tf_coarse_page_table_array, list);
357
358 descriptors = page_desc->coarse_page_tables[0].descriptors;
359 list_del(&(page_desc->list));
360
361 spin_unlock(&(alloc_context->lock));
362
363 if (descriptors != NULL)
364 internal_free_page((unsigned long)descriptors);
365
366 internal_kfree(page_desc);
367
368 spin_lock(&(alloc_context->lock));
369 }
370
371 spin_unlock(&(alloc_context->lock));
372}
373
374/*
375 * Returns the L1 coarse page descriptor for
376 * a coarse page table located at address coarse_pg_table_descriptors
377 */
378u32 tf_get_l1_coarse_descriptor(
379 u32 coarse_pg_table_descriptors[256])
380{
381 u32 descriptor = L1_COARSE_DESCRIPTOR_BASE;
382 unsigned int info = read_cpuid(CPUID_CACHETYPE);
383
384 descriptor |= (virt_to_phys((void *) coarse_pg_table_descriptors)
385 & L1_COARSE_DESCRIPTOR_ADDR_MASK);
386
387 if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
388 dprintk(KERN_DEBUG "tf_get_l1_coarse_descriptor "
389 "V31-12 added to descriptor\n");
390 /* the 16k alignment restriction applies */
391 descriptor |= (DESCRIPTOR_V13_12_GET(
392 (u32)coarse_pg_table_descriptors) <<
393 L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
394 }
395
396 return descriptor;
397}
398
399
400#define dprintk_desc(...)
401/*
402 * Returns the L2 descriptor for the specified user page.
403 */
404u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm)
405{
406 pgd_t *pgd;
407 pud_t *pud;
408 pmd_t *pmd;
409 pte_t *ptep;
410 u32 *hwpte;
411 u32 tex = 0;
412 u32 descriptor = 0;
413
414 dprintk_desc(KERN_INFO "VirtAddr = %x\n", vaddr);
415 pgd = pgd_offset(mm, vaddr);
416 dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
417 (unsigned int) *pgd);
418 if (pgd_none(*pgd))
419 goto error;
420 pud = pud_offset(pgd, vaddr);
421 dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
422 (unsigned int) *pud);
423 if (pud_none(*pud))
424 goto error;
425 pmd = pmd_offset(pud, vaddr);
426 dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
427 (unsigned int) *pmd);
428 if (pmd_none(*pmd))
429 goto error;
430
431 if (PMD_TYPE_SECT&(*pmd)) {
432 /* We have a section */
433 dprintk_desc(KERN_INFO "Section descr=%x\n",
434 (unsigned int)*pmd);
435 if ((*pmd) & PMD_SECT_BUFFERABLE)
436 descriptor |= DESCRIPTOR_B_MASK;
437 if ((*pmd) & PMD_SECT_CACHEABLE)
438 descriptor |= DESCRIPTOR_C_MASK;
439 if ((*pmd) & PMD_SECT_S)
440 descriptor |= DESCRIPTOR_S_MASK;
441 tex = ((*pmd) >> 12) & 7;
442 } else {
443 /* We have a table */
444 ptep = pte_offset_map(pmd, vaddr);
445 if (pte_present(*ptep)) {
446 dprintk_desc(KERN_INFO "L2 descr=%x\n",
447 (unsigned int) *ptep);
448 if ((*ptep) & L_PTE_MT_BUFFERABLE)
449 descriptor |= DESCRIPTOR_B_MASK;
450 if ((*ptep) & L_PTE_MT_WRITETHROUGH)
451 descriptor |= DESCRIPTOR_C_MASK;
452 if ((*ptep) & L_PTE_MT_DEV_SHARED)
453 descriptor |= DESCRIPTOR_S_MASK;
454
455 /*
456 * Linux's pte doesn't keep track of TEX value.
457 * Have to jump to hwpte see include/asm/pgtable.h
458 * (-2k before 2.6.38, then +2k)
459 */
460#ifdef PTE_HWTABLE_SIZE
461 hwpte = (u32 *) (ptep+PTE_HWTABLE_PTRS);
462#else
463 hwpte = (u32 *) (ptep-PTRS_PER_PTE);
464#endif
465 if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
466 ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
467 goto error;
468 dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
469 tex = ((*hwpte) >> 6) & 7;
470 pte_unmap(ptep);
471 } else {
472 pte_unmap(ptep);
473 goto error;
474 }
475 }
476
477 descriptor |= (tex << 6);
478
479 return descriptor;
480
481error:
482 dprintk(KERN_ERR "Error occured in %s\n", __func__);
483 return 0;
484}
485
486
487/*
488 * Changes an L2 page descriptor back to a pointer to a physical page
489 */
490inline struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor)
491{
492 return pte_page(l2_page_descriptor & L2_DESCRIPTOR_ADDR_MASK);
493}
494
495
496/*
497 * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
498 * must be in the kernel address space.
499 */
500static void tf_get_l2_page_descriptor(
501 u32 *l2_page_descriptor,
502 u32 flags, struct mm_struct *mm)
503{
504 unsigned long page_vaddr;
505 u32 descriptor;
506 struct page *page;
507 bool unmap_page = false;
508
509#if 0
510 dprintk(KERN_INFO
511 "tf_get_l2_page_descriptor():"
512 "*l2_page_descriptor=%x\n",
513 *l2_page_descriptor);
514#endif
515
516 if (*l2_page_descriptor == L2_DESCRIPTOR_FAULT)
517 return;
518
519 page = (struct page *) (*l2_page_descriptor);
520
521 page_vaddr = (unsigned long) page_address(page);
522 if (page_vaddr == 0) {
523 dprintk(KERN_INFO "page_address returned 0\n");
524 /* Should we use kmap_atomic(page, KM_USER0) instead ? */
525 page_vaddr = (unsigned long) kmap(page);
526 if (page_vaddr == 0) {
527 *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
528 dprintk(KERN_ERR "kmap returned 0\n");
529 return;
530 }
531 unmap_page = true;
532 }
533
534 descriptor = tf_get_l2_descriptor_common(page_vaddr, mm);
535 if (descriptor == 0) {
536 *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
537 return;
538 }
539 descriptor |= L2_PAGE_DESCRIPTOR_BASE;
540
541 descriptor |= (page_to_phys(page) & L2_DESCRIPTOR_ADDR_MASK);
542
543 if (!(flags & TF_SHMEM_TYPE_WRITE))
544 /* only read access */
545 descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
546 else
547 /* read and write access */
548 descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
549
550 if (unmap_page)
551 kunmap(page);
552
553 *l2_page_descriptor = descriptor;
554}
555
556
557/*
558 * Unlocks the physical memory pages
559 * and frees the coarse pages that need to
560 */
561void tf_cleanup_shared_memory(
562 struct tf_coarse_page_table_allocation_context *alloc_context,
563 struct tf_shmem_desc *shmem_desc,
564 u32 full_cleanup)
565{
566 u32 coarse_page_index;
567
568 dprintk(KERN_INFO "tf_cleanup_shared_memory(%p)\n",
569 shmem_desc);
570
571#ifdef DEBUG_COARSE_TABLES
572 printk(KERN_DEBUG "tf_cleanup_shared_memory "
573 "- number of coarse page tables=%d\n",
574 shmem_desc->coarse_pg_table_count);
575
576 for (coarse_page_index = 0;
577 coarse_page_index < shmem_desc->coarse_pg_table_count;
578 coarse_page_index++) {
579 u32 j;
580
581 printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
582 shmem_desc->coarse_pg_table[coarse_page_index],
583 shmem_desc->coarse_pg_table[coarse_page_index]->
584 descriptors,
585 coarse_page_index);
586 if (shmem_desc->coarse_pg_table[coarse_page_index] != NULL) {
587 for (j = 0;
588 j < TF_DESCRIPTOR_TABLE_CAPACITY;
589 j += 8) {
590 int k;
591 printk(KERN_DEBUG " ");
592 for (k = j; k < j + 8; k++)
593 printk(KERN_DEBUG "%p ",
594 shmem_desc->coarse_pg_table[
595 coarse_page_index]->
596 descriptors);
597 printk(KERN_DEBUG "\n");
598 }
599 }
600 }
601 printk(KERN_DEBUG "tf_cleanup_shared_memory() - done\n\n");
602#endif
603
604 /* Parse the coarse page descriptors */
605 for (coarse_page_index = 0;
606 coarse_page_index < shmem_desc->coarse_pg_table_count;
607 coarse_page_index++) {
608 u32 j;
609 u32 found = 0;
610
611 /* parse the page descriptors of the coarse page */
612 for (j = 0; j < TF_DESCRIPTOR_TABLE_CAPACITY; j++) {
613 u32 l2_page_descriptor = (u32) (shmem_desc->
614 coarse_pg_table[coarse_page_index]->
615 descriptors[j]);
616
617 if (l2_page_descriptor != L2_DESCRIPTOR_FAULT) {
618 struct page *page =
619 tf_l2_page_descriptor_to_page(
620 l2_page_descriptor);
621
622 if (!PageReserved(page))
623 SetPageDirty(page);
624 internal_page_cache_release(page);
625
626 found = 1;
627 } else if (found == 1) {
628 break;
629 }
630 }
631
632 /*
633 * Only free the coarse pages of descriptors not preallocated
634 */
635 if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
636 (full_cleanup != 0))
637 tf_free_coarse_page_table(alloc_context,
638 shmem_desc->coarse_pg_table[coarse_page_index],
639 0);
640 }
641
642 shmem_desc->coarse_pg_table_count = 0;
643 dprintk(KERN_INFO "tf_cleanup_shared_memory(%p) done\n",
644 shmem_desc);
645}
646
647/*
648 * Make sure the coarse pages are allocated. If not allocated, do it.
649 * Locks down the physical memory pages.
650 * Verifies the memory attributes depending on flags.
651 */
652int tf_fill_descriptor_table(
653 struct tf_coarse_page_table_allocation_context *alloc_context,
654 struct tf_shmem_desc *shmem_desc,
655 u32 buffer,
656 struct vm_area_struct **vmas,
657 u32 descriptors[TF_MAX_COARSE_PAGES],
658 u32 buffer_size,
659 u32 *buffer_start_offset,
660 bool in_user_space,
661 u32 flags,
662 u32 *descriptor_count)
663{
664 u32 coarse_page_index;
665 u32 coarse_page_count;
666 u32 page_count;
667 u32 page_shift = 0;
668 int ret = 0;
669 unsigned int info = read_cpuid(CPUID_CACHETYPE);
670
671 dprintk(KERN_INFO "tf_fill_descriptor_table"
672 "(%p, buffer=0x%08X, size=0x%08X, user=%01x "
673 "flags = 0x%08x)\n",
674 shmem_desc,
675 buffer,
676 buffer_size,
677 in_user_space,
678 flags);
679
680 /*
681 * Compute the number of pages
682 * Compute the number of coarse pages
683 * Compute the page offset
684 */
685 page_count = ((buffer & ~PAGE_MASK) +
686 buffer_size + ~PAGE_MASK) >> PAGE_SHIFT;
687
688 /* check whether the 16k alignment restriction applies */
689 if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
690 /*
691 * The 16k alignment restriction applies.
692 * Shift data to get them 16k aligned
693 */
694 page_shift = DESCRIPTOR_V13_12_GET(buffer);
695 page_count += page_shift;
696
697
698 /*
699 * Check the number of pages fit in the coarse pages
700 */
701 if (page_count > (TF_DESCRIPTOR_TABLE_CAPACITY *
702 TF_MAX_COARSE_PAGES)) {
703 dprintk(KERN_ERR "tf_fill_descriptor_table(%p): "
704 "%u pages required to map shared memory!\n",
705 shmem_desc, page_count);
706 ret = -ENOMEM;
707 goto error;
708 }
709
710 /* coarse page describe 256 pages */
711 coarse_page_count = ((page_count +
712 TF_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
713 TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
714
715 /*
716 * Compute the buffer offset
717 */
718 *buffer_start_offset = (buffer & ~PAGE_MASK) |
719 (page_shift << PAGE_SHIFT);
720
721 /* map each coarse page */
722 for (coarse_page_index = 0;
723 coarse_page_index < coarse_page_count;
724 coarse_page_index++) {
725 u32 j;
726 struct tf_coarse_page_table *coarse_pg_table;
727
728 /* compute a virtual address with appropriate offset */
729 u32 buffer_offset_vaddr = buffer +
730 (coarse_page_index * TF_MAX_COARSE_PAGE_MAPPED_SIZE);
731 u32 pages_to_get;
732
733 /*
734 * Compute the number of pages left for this coarse page.
735 * Decrement page_count each time
736 */
737 pages_to_get = (page_count >>
738 TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
739 TF_DESCRIPTOR_TABLE_CAPACITY : page_count;
740 page_count -= pages_to_get;
741
742 /*
743 * Check if the coarse page has already been allocated
744 * If not, do it now
745 */
746 if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM)
747 || (shmem_desc->type ==
748 TF_SHMEM_TYPE_PM_HIBERNATE)) {
749 coarse_pg_table = tf_alloc_coarse_page_table(
750 alloc_context,
751 TF_PAGE_DESCRIPTOR_TYPE_NORMAL);
752
753 if (coarse_pg_table == NULL) {
754 dprintk(KERN_ERR
755 "tf_fill_descriptor_table(%p): "
756 "tf_alloc_coarse_page_table "
757 "failed for coarse page %d\n",
758 shmem_desc, coarse_page_index);
759 ret = -ENOMEM;
760 goto error;
761 }
762
763 shmem_desc->coarse_pg_table[coarse_page_index] =
764 coarse_pg_table;
765 } else {
766 coarse_pg_table =
767 shmem_desc->coarse_pg_table[coarse_page_index];
768 }
769
770 /*
771 * The page is not necessarily filled with zeroes.
772 * Set the fault descriptors ( each descriptor is 4 bytes long)
773 */
774 memset(coarse_pg_table->descriptors, 0x00,
775 TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
776
777 if (in_user_space) {
778 int pages;
779
780 /*
781 * TRICK: use pCoarsePageDescriptor->descriptors to
782 * hold the (struct page*) items before getting their
783 * physical address
784 */
785 down_read(&(current->mm->mmap_sem));
786 pages = internal_get_user_pages(
787 current,
788 current->mm,
789 buffer_offset_vaddr,
790 /*
791 * page_shift is cleared after retrieving first
792 * coarse page
793 */
794 (pages_to_get - page_shift),
795 (flags & TF_SHMEM_TYPE_WRITE) ? 1 : 0,
796 0,
797 (struct page **) (coarse_pg_table->descriptors
798 + page_shift),
799 vmas);
800 up_read(&(current->mm->mmap_sem));
801
802 if ((pages <= 0) ||
803 (pages != (pages_to_get - page_shift))) {
804 dprintk(KERN_ERR "tf_fill_descriptor_table:"
805 " get_user_pages got %d pages while "
806 "trying to get %d pages!\n",
807 pages, pages_to_get - page_shift);
808 ret = -EFAULT;
809 goto error;
810 }
811
812 for (j = page_shift;
813 j < page_shift + pages;
814 j++) {
815 /* Get the actual L2 descriptors */
816 tf_get_l2_page_descriptor(
817 &coarse_pg_table->descriptors[j],
818 flags,
819 current->mm);
820 /*
821 * Reject Strongly-Ordered or Device Memory
822 */
823#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
824 ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
825 (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
826 (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
827
828 if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
829 coarse_pg_table->
830 descriptors[j])) {
831 dprintk(KERN_ERR
832 "tf_fill_descriptor_table:"
833 " descriptor 0x%08X use "
834 "strongly-ordered or device "
835 "memory. Rejecting!\n",
836 coarse_pg_table->
837 descriptors[j]);
838 ret = -EFAULT;
839 goto error;
840 }
841 }
842 } else if (is_vmalloc_addr((void *)buffer_offset_vaddr)) {
843 /* Kernel-space memory obtained through vmalloc */
844 dprintk(KERN_INFO
845 "tf_fill_descriptor_table: "
846 "vmalloc'ed buffer starting at %p\n",
847 (void *)buffer_offset_vaddr);
848 for (j = page_shift; j < pages_to_get; j++) {
849 struct page *page;
850 void *addr =
851 (void *)(buffer_offset_vaddr +
852 (j - page_shift) * PAGE_SIZE);
853 page = vmalloc_to_page(addr);
854 if (page == NULL) {
855 dprintk(KERN_ERR
856 "tf_fill_descriptor_table: "
857 "cannot map %p (vmalloc) "
858 "to page\n",
859 addr);
860 ret = -EFAULT;
861 goto error;
862 }
863 coarse_pg_table->descriptors[j] = (u32)page;
864 get_page(page);
865
866 /* change coarse page "page address" */
867 tf_get_l2_page_descriptor(
868 &coarse_pg_table->descriptors[j],
869 flags,
870 &init_mm);
871 }
872 } else {
873 /* Kernel-space memory given by a virtual address */
874 dprintk(KERN_INFO
875 "tf_fill_descriptor_table: "
876 "buffer starting at virtual address %p\n",
877 (void *)buffer_offset_vaddr);
878 for (j = page_shift; j < pages_to_get; j++) {
879 struct page *page;
880 void *addr =
881 (void *)(buffer_offset_vaddr +
882 (j - page_shift) * PAGE_SIZE);
883 page = virt_to_page(addr);
884 if (page == NULL) {
885 dprintk(KERN_ERR
886 "tf_fill_descriptor_table: "
887 "cannot map %p (virtual) "
888 "to page\n",
889 addr);
890 ret = -EFAULT;
891 goto error;
892 }
893 coarse_pg_table->descriptors[j] = (u32)page;
894 get_page(page);
895
896 /* change coarse page "page address" */
897 tf_get_l2_page_descriptor(
898 &coarse_pg_table->descriptors[j],
899 flags,
900 &init_mm);
901 }
902 }
903
904 dmac_flush_range((void *)coarse_pg_table->descriptors,
905 (void *)(((u32)(coarse_pg_table->descriptors)) +
906 TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
907
908 outer_clean_range(
909 __pa(coarse_pg_table->descriptors),
910 __pa(coarse_pg_table->descriptors) +
911 TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
912 wmb();
913
914 /* Update the coarse page table address */
915 descriptors[coarse_page_index] =
916 tf_get_l1_coarse_descriptor(
917 coarse_pg_table->descriptors);
918
919 /*
920 * The next coarse page has no page shift, reset the
921 * page_shift
922 */
923 page_shift = 0;
924 }
925
926 *descriptor_count = coarse_page_count;
927 shmem_desc->coarse_pg_table_count = coarse_page_count;
928
929#ifdef DEBUG_COARSE_TABLES
930 printk(KERN_DEBUG "ntf_fill_descriptor_table - size=0x%08X "
931 "numberOfCoarsePages=%d\n", buffer_size,
932 shmem_desc->coarse_pg_table_count);
933 for (coarse_page_index = 0;
934 coarse_page_index < shmem_desc->coarse_pg_table_count;
935 coarse_page_index++) {
936 u32 j;
937 struct tf_coarse_page_table *coarse_page_table =
938 shmem_desc->coarse_pg_table[coarse_page_index];
939
940 printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
941 coarse_page_table,
942 coarse_page_table->descriptors,
943 coarse_page_index);
944 for (j = 0;
945 j < TF_DESCRIPTOR_TABLE_CAPACITY;
946 j += 8) {
947 int k;
948 printk(KERN_DEBUG " ");
949 for (k = j; k < j + 8; k++)
950 printk(KERN_DEBUG "0x%08X ",
951 coarse_page_table->descriptors[k]);
952 printk(KERN_DEBUG "\n");
953 }
954 }
955 printk(KERN_DEBUG "ntf_fill_descriptor_table() - done\n\n");
956#endif
957
958 return 0;
959
960error:
961 tf_cleanup_shared_memory(
962 alloc_context,
963 shmem_desc,
964 0);
965
966 return ret;
967}
968
969
970/*----------------------------------------------------------------------------
971 * Standard communication operations
972 *----------------------------------------------------------------------------*/
973
974u8 *tf_get_description(struct tf_comm *comm)
975{
976 if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
977 return comm->l1_buffer->version_description;
978
979 return NULL;
980}
981
982/*
983 * Returns a non-zero value if the specified S-timeout has expired, zero
984 * otherwise.
985 *
986 * The placeholder referenced to by relative_timeout_jiffies gives the relative
987 * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
988 * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
989 */
990static int tf_test_s_timeout(
991 u64 timeout,
992 signed long *relative_timeout_jiffies)
993{
994 struct timeval now;
995 u64 time64;
996
997 *relative_timeout_jiffies = 0;
998
999 /* immediate timeout */
1000 if (timeout == TIME_IMMEDIATE)
1001 return 1;
1002
1003 /* infinite timeout */
1004 if (timeout == TIME_INFINITE) {
1005 dprintk(KERN_DEBUG "tf_test_s_timeout: "
1006 "timeout is infinite\n");
1007 *relative_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
1008 return 0;
1009 }
1010
1011 do_gettimeofday(&now);
1012 time64 = now.tv_sec;
1013 /* will not overflow as operations are done on 64bit values */
1014 time64 = (time64 * 1000) + (now.tv_usec / 1000);
1015
1016 /* timeout expired */
1017 if (time64 >= timeout) {
1018 dprintk(KERN_DEBUG "tf_test_s_timeout: timeout expired\n");
1019 return 1;
1020 }
1021
1022 /*
1023 * finite timeout, compute relative_timeout_jiffies
1024 */
1025 /* will not overflow as time64 < timeout */
1026 timeout -= time64;
1027
1028 /* guarantee *relative_timeout_jiffies is a valid timeout */
1029 if ((timeout >> 32) != 0)
1030 *relative_timeout_jiffies = MAX_JIFFY_OFFSET;
1031 else
1032 *relative_timeout_jiffies =
1033 msecs_to_jiffies((unsigned int) timeout);
1034
1035 dprintk(KERN_DEBUG "tf_test_s_timeout: timeout is 0x%lx\n",
1036 *relative_timeout_jiffies);
1037 return 0;
1038}
1039
1040static void tf_copy_answers(struct tf_comm *comm)
1041{
1042 u32 first_answer;
1043 u32 first_free_answer;
1044 struct tf_answer_struct *answerStructureTemp;
1045
1046 if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
1047 spin_lock(&comm->lock);
1048 first_free_answer = tf_read_reg32(
1049 &comm->l1_buffer->first_free_answer);
1050 first_answer = tf_read_reg32(
1051 &comm->l1_buffer->first_answer);
1052
1053 while (first_answer != first_free_answer) {
1054 /* answer queue not empty */
1055 union tf_answer sComAnswer;
1056 struct tf_answer_header header;
1057
1058 /*
1059 * the size of the command in words of 32bit, not in
1060 * bytes
1061 */
1062 u32 command_size;
1063 u32 i;
1064 u32 *temp = (uint32_t *) &header;
1065
1066 dprintk(KERN_INFO
1067 "[pid=%d] tf_copy_answers(%p): "
1068 "Read answers from L1\n",
1069 current->pid, comm);
1070
1071 /* Read the answer header */
1072 for (i = 0;
1073 i < sizeof(struct tf_answer_header)/sizeof(u32);
1074 i++)
1075 temp[i] = comm->l1_buffer->answer_queue[
1076 (first_answer + i) %
1077 TF_S_ANSWER_QUEUE_CAPACITY];
1078
1079 /* Read the answer from the L1_Buffer*/
1080 command_size = header.message_size +
1081 sizeof(struct tf_answer_header)/sizeof(u32);
1082 temp = (uint32_t *) &sComAnswer;
1083 for (i = 0; i < command_size; i++)
1084 temp[i] = comm->l1_buffer->answer_queue[
1085 (first_answer + i) %
1086 TF_S_ANSWER_QUEUE_CAPACITY];
1087
1088 answerStructureTemp = (struct tf_answer_struct *)
1089 sComAnswer.header.operation_id;
1090
1091 tf_dump_answer(&sComAnswer);
1092
1093 memcpy(answerStructureTemp->answer, &sComAnswer,
1094 command_size * sizeof(u32));
1095 answerStructureTemp->answer_copied = true;
1096
1097 first_answer += command_size;
1098 tf_write_reg32(&comm->l1_buffer->first_answer,
1099 first_answer);
1100 }
1101 spin_unlock(&(comm->lock));
1102 }
1103}
1104
1105static void tf_copy_command(
1106 struct tf_comm *comm,
1107 union tf_command *command,
1108 struct tf_connection *connection,
1109 enum TF_COMMAND_STATE *command_status)
1110{
1111 if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
1112 && (command != NULL)) {
1113 /*
1114 * Write the message in the message queue.
1115 */
1116
1117 if (*command_status == TF_COMMAND_STATE_PENDING) {
1118 u32 command_size;
1119 u32 queue_words_count;
1120 u32 i;
1121 u32 first_free_command;
1122 u32 first_command;
1123
1124 spin_lock(&comm->lock);
1125
1126 first_command = tf_read_reg32(
1127 &comm->l1_buffer->first_command);
1128 first_free_command = tf_read_reg32(
1129 &comm->l1_buffer->first_free_command);
1130
1131 queue_words_count = first_free_command - first_command;
1132 command_size = command->header.message_size +
1133 sizeof(struct tf_command_header)/sizeof(u32);
1134 if ((queue_words_count + command_size) <
1135 TF_N_MESSAGE_QUEUE_CAPACITY) {
1136 /*
1137 * Command queue is not full.
1138 * If the Command queue is full,
1139 * the command will be copied at
1140 * another iteration
1141 * of the current function.
1142 */
1143
1144 /*
1145 * Change the conn state
1146 */
1147 if (connection == NULL)
1148 goto copy;
1149
1150 spin_lock(&(connection->state_lock));
1151
1152 if ((connection->state ==
1153 TF_CONN_STATE_NO_DEVICE_CONTEXT)
1154 &&
1155 (command->header.message_type ==
1156 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
1157
1158 dprintk(KERN_INFO
1159 "tf_copy_command(%p):"
1160 "Conn state is DEVICE_CONTEXT_SENT\n",
1161 connection);
1162 connection->state =
1163 TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
1164 } else if ((connection->state !=
1165 TF_CONN_STATE_VALID_DEVICE_CONTEXT)
1166 &&
1167 (command->header.message_type !=
1168 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
1169 /* The connection
1170 * is no longer valid.
1171 * We may not send any command on it,
1172 * not even another
1173 * DESTROY_DEVICE_CONTEXT.
1174 */
1175 dprintk(KERN_INFO
1176 "[pid=%d] tf_copy_command(%p): "
1177 "Connection no longer valid."
1178 "ABORT\n",
1179 current->pid, connection);
1180 *command_status =
1181 TF_COMMAND_STATE_ABORTED;
1182 spin_unlock(
1183 &(connection->state_lock));
1184 spin_unlock(
1185 &comm->lock);
1186 return;
1187 } else if (
1188 (command->header.message_type ==
1189 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
1190 (connection->state ==
1191 TF_CONN_STATE_VALID_DEVICE_CONTEXT)
1192 ) {
1193 dprintk(KERN_INFO
1194 "[pid=%d] tf_copy_command(%p): "
1195 "Conn state is "
1196 "DESTROY_DEVICE_CONTEXT_SENT\n",
1197 current->pid, connection);
1198 connection->state =
1199 TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
1200 }
1201 spin_unlock(&(connection->state_lock));
1202copy:
1203 /*
1204 * Copy the command to L1 Buffer
1205 */
1206 dprintk(KERN_INFO
1207 "[pid=%d] tf_copy_command(%p): "
1208 "Write Message in the queue\n",
1209 current->pid, command);
1210 tf_dump_command(command);
1211
1212 for (i = 0; i < command_size; i++)
1213 comm->l1_buffer->command_queue[
1214 (first_free_command + i) %
1215 TF_N_MESSAGE_QUEUE_CAPACITY] =
1216 ((uint32_t *) command)[i];
1217
1218 *command_status =
1219 TF_COMMAND_STATE_SENT;
1220 first_free_command += command_size;
1221
1222 tf_write_reg32(
1223 &comm->
1224 l1_buffer->first_free_command,
1225 first_free_command);
1226 }
1227 spin_unlock(&comm->lock);
1228 }
1229 }
1230}
1231
1232/*
1233 * Sends the specified message through the specified communication channel.
1234 *
1235 * This function sends the command and waits for the answer
1236 *
1237 * Returns zero upon successful completion, or an appropriate error code upon
1238 * failure.
1239 */
1240static int tf_send_recv(struct tf_comm *comm,
1241 union tf_command *command,
1242 struct tf_answer_struct *answerStruct,
1243 struct tf_connection *connection,
1244 int bKillable
1245 )
1246{
1247 int result;
1248 u64 timeout;
1249 signed long nRelativeTimeoutJiffies;
1250 bool wait_prepared = false;
1251 enum TF_COMMAND_STATE command_status = TF_COMMAND_STATE_PENDING;
1252 DEFINE_WAIT(wait);
1253#ifdef CONFIG_FREEZER
1254 unsigned long saved_flags;
1255#endif
1256 dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
1257 current->pid, command);
1258
1259#ifdef CONFIG_TF_ZEBRA
1260 tf_clock_timer_start();
1261#endif
1262
1263#ifdef CONFIG_FREEZER
1264 saved_flags = current->flags;
1265 current->flags |= PF_FREEZER_NOSIG;
1266#endif
1267
1268 /*
1269 * Read all answers from the answer queue
1270 */
1271copy_answers:
1272 tf_copy_answers(comm);
1273
1274 tf_copy_command(comm, command, connection, &command_status);
1275
1276 /*
1277 * Notify all waiting threads
1278 */
1279 wake_up(&(comm->wait_queue));
1280
1281#ifdef CONFIG_FREEZER
1282 if (unlikely(freezing(current))) {
1283
1284 dprintk(KERN_INFO
1285 "Entering refrigerator.\n");
1286 refrigerator();
1287 dprintk(KERN_INFO
1288 "Left refrigerator.\n");
1289 goto copy_answers;
1290 }
1291#endif
1292
1293#ifndef CONFIG_PREEMPT
1294 if (need_resched())
1295 schedule();
1296#endif
1297
1298#ifdef CONFIG_TF_ZEBRA
1299 /*
1300 * Handle RPC (if any)
1301 */
1302 if (tf_rpc_execute(comm) == RPC_NON_YIELD)
1303 goto schedule_secure_world;
1304#endif
1305
1306 /*
1307 * Join wait queue
1308 */
1309 /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
1310 current->pid, command);*/
1311 prepare_to_wait(&comm->wait_queue, &wait,
1312 bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1313 wait_prepared = true;
1314
1315 /*
1316 * Check if our answer is available
1317 */
1318 if (command_status == TF_COMMAND_STATE_ABORTED) {
1319 /* Not waiting for an answer, return error code */
1320 result = -EINTR;
1321 dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
1322 "Command status is ABORTED."
1323 "Exit with 0x%x\n",
1324 current->pid, result);
1325 goto exit;
1326 }
1327 if (answerStruct->answer_copied) {
1328 dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
1329 "Received answer (type 0x%02X)\n",
1330 current->pid,
1331 answerStruct->answer->header.message_type);
1332 result = 0;
1333 goto exit;
1334 }
1335
1336 /*
1337 * Check if a signal is pending
1338 */
1339 if (bKillable && (sigkill_pending())) {
1340 if (command_status == TF_COMMAND_STATE_PENDING)
1341 /*Command was not sent. */
1342 result = -EINTR;
1343 else
1344 /* Command was sent but no answer was received yet. */
1345 result = -EIO;
1346
1347 dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
1348 "Signal Pending. Return error %d\n",
1349 current->pid, result);
1350 goto exit;
1351 }
1352
1353 /*
1354 * Check if secure world is schedulable. It is schedulable if at
1355 * least one of the following conditions holds:
1356 * + it is still initializing (TF_COMM_FLAG_L1_SHARED_ALLOCATED
1357 * is not set);
1358 * + there is a command in the queue;
1359 * + the secure world timeout is zero.
1360 */
1361 if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
1362 u32 first_free_command;
1363 u32 first_command;
1364 spin_lock(&comm->lock);
1365 first_command = tf_read_reg32(
1366 &comm->l1_buffer->first_command);
1367 first_free_command = tf_read_reg32(
1368 &comm->l1_buffer->first_free_command);
1369 spin_unlock(&comm->lock);
1370 tf_read_timeout(comm, &timeout);
1371 if ((first_free_command == first_command) &&
1372 (tf_test_s_timeout(timeout,
1373 &nRelativeTimeoutJiffies) == 0))
1374 /*
1375 * If command queue is empty and if timeout has not
1376 * expired secure world is not schedulable
1377 */
1378 goto wait;
1379 }
1380
1381 finish_wait(&comm->wait_queue, &wait);
1382 wait_prepared = false;
1383
1384 /*
1385 * Yield to the Secure World
1386 */
1387#ifdef CONFIG_TF_ZEBRA
1388schedule_secure_world:
1389#endif
1390
1391 result = tf_schedule_secure_world(comm);
1392 if (result < 0)
1393 goto exit;
1394 goto copy_answers;
1395
1396wait:
1397 if (bKillable && (sigkill_pending())) {
1398 if (command_status == TF_COMMAND_STATE_PENDING)
1399 result = -EINTR; /* Command was not sent. */
1400 else
1401 /* Command was sent but no answer was received yet. */
1402 result = -EIO;
1403
1404 dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
1405 "Signal Pending while waiting. Return error %d\n",
1406 current->pid, result);
1407 goto exit;
1408 }
1409
1410 if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
1411 dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
1412 "prepare to sleep infinitely\n", current->pid);
1413 else
1414 dprintk(KERN_INFO "tf_send_recv: "
1415 "prepare to sleep 0x%lx jiffies\n",
1416 nRelativeTimeoutJiffies);
1417
1418 /* go to sleep */
1419 if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
1420 dprintk(KERN_INFO
1421 "tf_send_recv: timeout expired\n");
1422 else
1423 dprintk(KERN_INFO
1424 "tf_send_recv: signal delivered\n");
1425
1426 finish_wait(&comm->wait_queue, &wait);
1427 wait_prepared = false;
1428 goto copy_answers;
1429
1430exit:
1431 if (wait_prepared) {
1432 finish_wait(&comm->wait_queue, &wait);
1433 wait_prepared = false;
1434 }
1435
1436#ifdef CONFIG_FREEZER
1437 current->flags &= ~(PF_FREEZER_NOSIG);
1438 current->flags |= (saved_flags & PF_FREEZER_NOSIG);
1439#endif
1440
1441 return result;
1442}
1443
1444/*
1445 * Sends the specified message through the specified communication channel.
1446 *
1447 * This function sends the message and waits for the corresponding answer
1448 * It may return if a signal needs to be delivered.
1449 *
1450 * Returns zero upon successful completion, or an appropriate error code upon
1451 * failure.
1452 */
1453int tf_send_receive(struct tf_comm *comm,
1454 union tf_command *command,
1455 union tf_answer *answer,
1456 struct tf_connection *connection,
1457 bool bKillable)
1458{
1459 int error;
1460 struct tf_answer_struct answerStructure;
1461#ifdef CONFIG_SMP
1462 long ret_affinity;
1463 cpumask_t saved_cpu_mask;
1464 cpumask_t local_cpu_mask = CPU_MASK_NONE;
1465#endif
1466
1467 answerStructure.answer = answer;
1468 answerStructure.answer_copied = false;
1469
1470 if (command != NULL)
1471 command->header.operation_id = (u32) &answerStructure;
1472
1473 dprintk(KERN_INFO "tf_send_receive\n");
1474
1475#ifdef CONFIG_TF_ZEBRA
1476 if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
1477 dprintk(KERN_ERR "tf_send_receive(%p): "
1478 "Secure world not started\n", comm);
1479
1480 return -EFAULT;
1481 }
1482#endif
1483
1484 if (test_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags)) != 0) {
1485 dprintk(KERN_DEBUG
1486 "tf_send_receive: Flag Terminating is set\n");
1487 return 0;
1488 }
1489
1490#ifdef CONFIG_SMP
1491 cpu_set(0, local_cpu_mask);
1492 sched_getaffinity(0, &saved_cpu_mask);
1493 ret_affinity = sched_setaffinity(0, &local_cpu_mask);
1494 if (ret_affinity != 0)
1495 dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
1496#endif
1497
1498
1499 /*
1500 * Send the command
1501 */
1502 error = tf_send_recv(comm,
1503 command, &answerStructure, connection, bKillable);
1504
1505 if (!bKillable && sigkill_pending()) {
1506 if ((command->header.message_type ==
1507 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
1508 (answer->create_device_context.error_code ==
1509 S_SUCCESS)) {
1510
1511 /*
1512 * CREATE_DEVICE_CONTEXT was interrupted.
1513 */
1514 dprintk(KERN_INFO "tf_send_receive: "
1515 "sending DESTROY_DEVICE_CONTEXT\n");
1516 answerStructure.answer = answer;
1517 answerStructure.answer_copied = false;
1518
1519 command->header.message_type =
1520 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
1521 command->header.message_size =
1522 (sizeof(struct
1523 tf_command_destroy_device_context) -
1524 sizeof(struct tf_command_header))/sizeof(u32);
1525 command->header.operation_id =
1526 (u32) &answerStructure;
1527 command->destroy_device_context.device_context =
1528 answer->create_device_context.
1529 device_context;
1530
1531 goto destroy_context;
1532 }
1533 }
1534
1535 if (error == 0) {
1536 /*
1537 * tf_send_recv returned Success.
1538 */
1539 if (command->header.message_type ==
1540 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
1541 spin_lock(&(connection->state_lock));
1542 connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
1543 spin_unlock(&(connection->state_lock));
1544 } else if (command->header.message_type ==
1545 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
1546 spin_lock(&(connection->state_lock));
1547 connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1548 spin_unlock(&(connection->state_lock));
1549 }
1550 } else if (error == -EINTR) {
1551 /*
1552 * No command was sent, return failure.
1553 */
1554 dprintk(KERN_ERR
1555 "tf_send_receive: "
1556 "tf_send_recv failed (error %d) !\n",
1557 error);
1558 } else if (error == -EIO) {
1559 /*
1560 * A command was sent but its answer is still pending.
1561 */
1562
1563 /* means bKillable is true */
1564 dprintk(KERN_ERR
1565 "tf_send_receive: "
1566 "tf_send_recv interrupted (error %d)."
1567 "Send DESTROY_DEVICE_CONTEXT.\n", error);
1568
1569 /* Send the DESTROY_DEVICE_CONTEXT. */
1570 answerStructure.answer = answer;
1571 answerStructure.answer_copied = false;
1572
1573 command->header.message_type =
1574 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
1575 command->header.message_size =
1576 (sizeof(struct tf_command_destroy_device_context) -
1577 sizeof(struct tf_command_header))/sizeof(u32);
1578 command->header.operation_id =
1579 (u32) &answerStructure;
1580 command->destroy_device_context.device_context =
1581 connection->device_context;
1582
1583 error = tf_send_recv(comm,
1584 command, &answerStructure, connection, false);
1585 if (error == -EINTR) {
1586 /*
1587 * Another thread already sent
1588 * DESTROY_DEVICE_CONTEXT.
1589 * We must still wait for the answer
1590 * to the original command.
1591 */
1592 command = NULL;
1593 goto destroy_context;
1594 } else {
1595 /* An answer was received.
1596 * Check if it is the answer
1597 * to the DESTROY_DEVICE_CONTEXT.
1598 */
1599 spin_lock(&comm->lock);
1600 if (answer->header.message_type !=
1601 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
1602 answerStructure.answer_copied = false;
1603 }
1604 spin_unlock(&comm->lock);
1605 if (!answerStructure.answer_copied) {
1606 /* Answer to DESTROY_DEVICE_CONTEXT
1607 * was not yet received.
1608 * Wait for the answer.
1609 */
1610 dprintk(KERN_INFO
1611 "[pid=%d] tf_send_receive:"
1612 "Answer to DESTROY_DEVICE_CONTEXT"
1613 "not yet received.Retry\n",
1614 current->pid);
1615 command = NULL;
1616 goto destroy_context;
1617 }
1618 }
1619 }
1620
1621 dprintk(KERN_INFO "tf_send_receive(): Message answer ready\n");
1622 goto exit;
1623
1624destroy_context:
1625 error = tf_send_recv(comm,
1626 command, &answerStructure, connection, false);
1627
1628 /*
1629 * tf_send_recv cannot return an error because
1630 * it's not killable and not within a connection
1631 */
1632 BUG_ON(error != 0);
1633
1634 /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
1635 spin_lock(&(connection->state_lock));
1636 connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1637 spin_unlock(&(connection->state_lock));
1638
1639exit:
1640
1641#ifdef CONFIG_SMP
1642 ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
1643 if (ret_affinity != 0)
1644 dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
1645#endif
1646 return error;
1647}
1648
1649/*----------------------------------------------------------------------------
1650 * Power management
1651 *----------------------------------------------------------------------------*/
1652
1653
1654/*
1655 * Handles all the power management calls.
1656 * The operation is the type of power management
1657 * operation to be performed.
1658 *
1659 * This routine will only return if a failure occured or if
1660 * the required opwer management is of type "resume".
1661 * "Hibernate" and "Shutdown" should lock when doing the
1662 * corresponding SMC to the Secure World
1663 */
1664int tf_power_management(struct tf_comm *comm,
1665 enum TF_POWER_OPERATION operation)
1666{
1667 u32 status;
1668 int error = 0;
1669
1670 dprintk(KERN_INFO "tf_power_management(%d)\n", operation);
1671
1672#ifdef CONFIG_TF_ZEBRA
1673 if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
1674 dprintk(KERN_INFO "tf_power_management(%p): "
1675 "succeeded (not started)\n", comm);
1676
1677 return 0;
1678 }
1679#endif
1680
1681 status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
1682 & TF_STATUS_POWER_STATE_MASK)
1683 >> TF_STATUS_POWER_STATE_SHIFT);
1684
1685 switch (operation) {
1686 case TF_POWER_OPERATION_SHUTDOWN:
1687 switch (status) {
1688 case TF_POWER_MODE_ACTIVE:
1689 error = tf_pm_shutdown(comm);
1690
1691 if (error) {
1692 dprintk(KERN_ERR "tf_power_management(): "
1693 "Failed with error code 0x%08x\n",
1694 error);
1695 goto error;
1696 }
1697 break;
1698
1699 default:
1700 goto not_allowed;
1701 }
1702 break;
1703
1704 case TF_POWER_OPERATION_HIBERNATE:
1705 switch (status) {
1706 case TF_POWER_MODE_ACTIVE:
1707 error = tf_pm_hibernate(comm);
1708
1709 if (error) {
1710 dprintk(KERN_ERR "tf_power_management(): "
1711 "Failed with error code 0x%08x\n",
1712 error);
1713 goto error;
1714 }
1715 break;
1716
1717 default:
1718 goto not_allowed;
1719 }
1720 break;
1721
1722 case TF_POWER_OPERATION_RESUME:
1723 error = tf_pm_resume(comm);
1724
1725 if (error != 0) {
1726 dprintk(KERN_ERR "tf_power_management(): "
1727 "Failed with error code 0x%08x\n",
1728 error);
1729 goto error;
1730 }
1731 break;
1732 }
1733
1734 dprintk(KERN_INFO "tf_power_management(): succeeded\n");
1735 return 0;
1736
1737not_allowed:
1738 dprintk(KERN_ERR "tf_power_management(): "
1739 "Power command not allowed in current "
1740 "Secure World state %d\n", status);
1741 error = -ENOTTY;
1742error:
1743 return error;
1744}
1745
diff --git a/security/tf_driver/tf_comm.h b/security/tf_driver/tf_comm.h
new file mode 100644
index 00000000000..8921dc1d1be
--- /dev/null
+++ b/security/tf_driver/tf_comm.h
@@ -0,0 +1,202 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#ifndef __TF_COMM_H__
21#define __TF_COMM_H__
22
23#include "tf_defs.h"
24#include "tf_protocol.h"
25
26/*----------------------------------------------------------------------------
27 * Misc
28 *----------------------------------------------------------------------------*/
29
30void tf_set_current_time(struct tf_comm *comm);
31
32/*
33 * Atomic accesses to 32-bit variables in the L1 Shared buffer
34 */
35static inline u32 tf_read_reg32(const u32 *comm_buffer)
36{
37 u32 result;
38
39 __asm__ __volatile__("@ tf_read_reg32\n"
40 "ldrex %0, [%1]\n"
41 : "=&r" (result)
42 : "r" (comm_buffer)
43 );
44
45 return result;
46}
47
48static inline void tf_write_reg32(void *comm_buffer, u32 value)
49{
50 u32 tmp;
51
52 __asm__ __volatile__("@ tf_write_reg32\n"
53 "1: ldrex %0, [%2]\n"
54 " strex %0, %1, [%2]\n"
55 " teq %0, #0\n"
56 " bne 1b"
57 : "=&r" (tmp)
58 : "r" (value), "r" (comm_buffer)
59 : "cc"
60 );
61}
62
63/*
64 * Atomic accesses to 64-bit variables in the L1 Shared buffer
65 */
66static inline u64 tf_read_reg64(void *comm_buffer)
67{
68 u64 result;
69
70 __asm__ __volatile__("@ tf_read_reg64\n"
71 "ldrexd %0, [%1]\n"
72 : "=&r" (result)
73 : "r" (comm_buffer)
74 );
75
76 return result;
77}
78
79static inline void tf_write_reg64(void *comm_buffer, u64 value)
80{
81 u64 tmp;
82
83 __asm__ __volatile__("@ tf_write_reg64\n"
84 "1: ldrexd %0, [%2]\n"
85 " strexd %0, %1, [%2]\n"
86 " teq %0, #0\n"
87 " bne 1b"
88 : "=&r" (tmp)
89 : "r" (value), "r" (comm_buffer)
90 : "cc"
91 );
92}
93
94/*----------------------------------------------------------------------------
95 * SMC operations
96 *----------------------------------------------------------------------------*/
97
98/* RPC return values */
99#define RPC_NO 0x00 /* No RPC to execute */
100#define RPC_YIELD 0x01 /* Yield RPC */
101#define RPC_NON_YIELD 0x02 /* non-Yield RPC */
102
103int tf_rpc_execute(struct tf_comm *comm);
104
105/*----------------------------------------------------------------------------
106 * Shared memory related operations
107 *----------------------------------------------------------------------------*/
108
109#define L1_DESCRIPTOR_FAULT (0x00000000)
110#define L2_DESCRIPTOR_FAULT (0x00000000)
111
112#define L2_DESCRIPTOR_ADDR_MASK (0xFFFFF000)
113
114#define DESCRIPTOR_V13_12_MASK (0x3 << PAGE_SHIFT)
115#define DESCRIPTOR_V13_12_GET(a) ((a & DESCRIPTOR_V13_12_MASK) >> PAGE_SHIFT)
116
117struct tf_coarse_page_table *tf_alloc_coarse_page_table(
118 struct tf_coarse_page_table_allocation_context *alloc_context,
119 u32 type);
120
121void tf_free_coarse_page_table(
122 struct tf_coarse_page_table_allocation_context *alloc_context,
123 struct tf_coarse_page_table *coarse_pg_table,
124 int force);
125
126void tf_init_coarse_page_table_allocator(
127 struct tf_coarse_page_table_allocation_context *alloc_context);
128
129void tf_release_coarse_page_table_allocator(
130 struct tf_coarse_page_table_allocation_context *alloc_context);
131
132struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor);
133
134u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm);
135
136void tf_cleanup_shared_memory(
137 struct tf_coarse_page_table_allocation_context *alloc_context,
138 struct tf_shmem_desc *shmem_desc,
139 u32 full_cleanup);
140
141int tf_fill_descriptor_table(
142 struct tf_coarse_page_table_allocation_context *alloc_context,
143 struct tf_shmem_desc *shmem_desc,
144 u32 buffer,
145 struct vm_area_struct **vmas,
146 u32 descriptors[TF_MAX_COARSE_PAGES],
147 u32 buffer_size,
148 u32 *buffer_start_offset,
149 bool in_user_space,
150 u32 flags,
151 u32 *descriptor_count);
152
153/*----------------------------------------------------------------------------
154 * Standard communication operations
155 *----------------------------------------------------------------------------*/
156
157int tf_schedule_secure_world(struct tf_comm *comm);
158
159int tf_send_receive(
160 struct tf_comm *comm,
161 union tf_command *command,
162 union tf_answer *answer,
163 struct tf_connection *connection,
164 bool bKillable);
165
166
167/**
168 * get a pointer to the secure world description.
169 * This points directly into the L1 shared buffer
170 * and is valid only once the communication has
171 * been initialized
172 **/
173u8 *tf_get_description(struct tf_comm *comm);
174
175/*----------------------------------------------------------------------------
176 * Power management
177 *----------------------------------------------------------------------------*/
178
179enum TF_POWER_OPERATION {
180 TF_POWER_OPERATION_HIBERNATE = 1,
181 TF_POWER_OPERATION_SHUTDOWN = 2,
182 TF_POWER_OPERATION_RESUME = 3,
183};
184
185int tf_pm_hibernate(struct tf_comm *comm);
186int tf_pm_resume(struct tf_comm *comm);
187int tf_pm_shutdown(struct tf_comm *comm);
188
189int tf_power_management(struct tf_comm *comm,
190 enum TF_POWER_OPERATION operation);
191
192
193/*----------------------------------------------------------------------------
194 * Communication initialization and termination
195 *----------------------------------------------------------------------------*/
196
197int tf_init(struct tf_comm *comm);
198
199void tf_terminate(struct tf_comm *comm);
200
201
202#endif /* __TF_COMM_H__ */
diff --git a/security/tf_driver/tf_comm_tz.c b/security/tf_driver/tf_comm_tz.c
new file mode 100644
index 00000000000..4c89de84acc
--- /dev/null
+++ b/security/tf_driver/tf_comm_tz.c
@@ -0,0 +1,885 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#include <asm/div64.h>
21#include <asm/system.h>
22#include <linux/version.h>
23#include <asm/cputype.h>
24#include <linux/interrupt.h>
25#include <linux/page-flags.h>
26#include <linux/pagemap.h>
27#include <linux/vmalloc.h>
28#include <linux/jiffies.h>
29
30#include "tf_defs.h"
31#include "tf_comm.h"
32#include "tf_protocol.h"
33#include "tf_util.h"
34#include "tf_conn.h"
35
36/*
37 * Structure common to all SMC operations
38 */
39struct tf_generic_smc {
40 u32 reg0;
41 u32 reg1;
42 u32 reg2;
43 u32 reg3;
44 u32 reg4;
45};
46
47/*----------------------------------------------------------------------------
48 * SMC operations
49 *----------------------------------------------------------------------------*/
50
51static inline void tf_smc_generic_call(
52 struct tf_generic_smc *generic_smc)
53{
54#ifdef CONFIG_SMP
55 long ret;
56 cpumask_t saved_cpu_mask;
57 cpumask_t local_cpu_mask = CPU_MASK_NONE;
58
59 cpu_set(0, local_cpu_mask);
60 sched_getaffinity(0, &saved_cpu_mask);
61 ret = sched_setaffinity(0, &local_cpu_mask);
62 if (ret != 0)
63 dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret);
64#endif
65
66 __asm__ volatile(
67 "mov r0, %2\n"
68 "mov r1, %3\n"
69 "mov r2, %4\n"
70 "mov r3, %5\n"
71 "mov r4, %6\n"
72 ".word 0xe1600070 @ SMC 0\n"
73 "mov %0, r0\n"
74 "mov %1, r1\n"
75 : "=r" (generic_smc->reg0), "=r" (generic_smc->reg1)
76 : "r" (generic_smc->reg0), "r" (generic_smc->reg1),
77 "r" (generic_smc->reg2), "r" (generic_smc->reg3),
78 "r" (generic_smc->reg4)
79 : "r0", "r1", "r2", "r3", "r4");
80
81#ifdef CONFIG_SMP
82 ret = sched_setaffinity(0, &saved_cpu_mask);
83 if (ret != 0)
84 dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret);
85#endif
86}
87
88/*
89 * Calls the get protocol version SMC.
90 * Fills the parameter pProtocolVersion with the version number returned by the
91 * SMC
92 */
93static inline void tf_smc_get_protocol_version(u32 *protocol_version)
94{
95 struct tf_generic_smc generic_smc;
96
97 generic_smc.reg0 = TF_SMC_GET_PROTOCOL_VERSION;
98 generic_smc.reg1 = 0;
99 generic_smc.reg2 = 0;
100 generic_smc.reg3 = 0;
101 generic_smc.reg4 = 0;
102
103 tf_smc_generic_call(&generic_smc);
104 *protocol_version = generic_smc.reg1;
105}
106
107
108/*
109 * Calls the init SMC with the specified parameters.
110 * Returns zero upon successful completion, or an appropriate error code upon
111 * failure.
112 */
113static inline int tf_smc_init(u32 shared_page_descriptor)
114{
115 struct tf_generic_smc generic_smc;
116
117 generic_smc.reg0 = TF_SMC_INIT;
118 /* Descriptor for the layer 1 shared buffer */
119 generic_smc.reg1 = shared_page_descriptor;
120 generic_smc.reg2 = 0;
121 generic_smc.reg3 = 0;
122 generic_smc.reg4 = 0;
123
124 tf_smc_generic_call(&generic_smc);
125 if (generic_smc.reg0 != S_SUCCESS)
126 printk(KERN_ERR "tf_smc_init:"
127 " r0=0x%08X upon return (expected 0x%08X)!\n",
128 generic_smc.reg0,
129 S_SUCCESS);
130
131 return generic_smc.reg0;
132}
133
134
135/*
136 * Calls the reset irq SMC.
137 */
138static inline void tf_smc_reset_irq(void)
139{
140 struct tf_generic_smc generic_smc;
141
142 generic_smc.reg0 = TF_SMC_RESET_IRQ;
143 generic_smc.reg1 = 0;
144 generic_smc.reg2 = 0;
145 generic_smc.reg3 = 0;
146 generic_smc.reg4 = 0;
147
148 tf_smc_generic_call(&generic_smc);
149}
150
151
152/*
153 * Calls the WAKE_UP SMC.
154 * Returns zero upon successful completion, or an appropriate error code upon
155 * failure.
156 */
157static inline int tf_smc_wake_up(u32 l1_shared_buffer_descriptor,
158 u32 shared_mem_start_offset,
159 u32 shared_mem_size)
160{
161 struct tf_generic_smc generic_smc;
162
163 generic_smc.reg0 = TF_SMC_WAKE_UP;
164 generic_smc.reg1 = shared_mem_start_offset;
165 /* long form command */
166 generic_smc.reg2 = shared_mem_size | 0x80000000;
167 generic_smc.reg3 = l1_shared_buffer_descriptor;
168 generic_smc.reg4 = 0;
169
170 tf_smc_generic_call(&generic_smc);
171
172 if (generic_smc.reg0 != S_SUCCESS)
173 printk(KERN_ERR "tf_smc_wake_up:"
174 " r0=0x%08X upon return (expected 0x%08X)!\n",
175 generic_smc.reg0,
176 S_SUCCESS);
177
178 return generic_smc.reg0;
179}
180
181/*
182 * Calls the N-Yield SMC.
183 */
184static inline void tf_smc_nyield(void)
185{
186 struct tf_generic_smc generic_smc;
187
188 generic_smc.reg0 = TF_SMC_N_YIELD;
189 generic_smc.reg1 = 0;
190 generic_smc.reg2 = 0;
191 generic_smc.reg3 = 0;
192 generic_smc.reg4 = 0;
193
194 tf_smc_generic_call(&generic_smc);
195}
196
197/* Yields the Secure World */
198int tf_schedule_secure_world(struct tf_comm *comm)
199{
200 tf_set_current_time(comm);
201
202 /* yield to the Secure World */
203 tf_smc_nyield();
204
205 return 0;
206}
207
208/*
209 * Returns the L2 descriptor for the specified user page.
210 */
211
212#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
213#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
214
215static u32 tf_get_l2init_descriptor(u32 vaddr)
216{
217 struct page *page;
218 u32 paddr;
219 u32 descriptor;
220
221 descriptor = L2_INIT_DESCRIPTOR_BASE;
222
223 /* get physical address and add to descriptor */
224 page = virt_to_page(vaddr);
225 paddr = page_to_phys(page);
226 descriptor |= (paddr & L2_DESCRIPTOR_ADDR_MASK);
227
228 /* Add virtual address v[13:12] bits to descriptor */
229 descriptor |= (DESCRIPTOR_V13_12_GET(vaddr)
230 << L2_INIT_DESCRIPTOR_V13_12_SHIFT);
231
232 descriptor |= tf_get_l2_descriptor_common(vaddr, &init_mm);
233
234
235 return descriptor;
236}
237
238
239/*----------------------------------------------------------------------------
240 * Power management
241 *----------------------------------------------------------------------------*/
242
243/*
244 * Free the memory used by the W3B buffer for the specified comm.
245 * This function does nothing if no W3B buffer is allocated for the device.
246 */
247static inline void tf_free_w3b(struct tf_comm *comm)
248{
249 tf_cleanup_shared_memory(
250 &(comm->w3b_cpt_alloc_context),
251 &(comm->w3b_shmem_desc),
252 0);
253
254 tf_release_coarse_page_table_allocator(&(comm->w3b_cpt_alloc_context));
255
256 internal_vfree((void *)comm->w3b);
257 comm->w3b = 0;
258 comm->w3b_shmem_size = 0;
259 clear_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
260}
261
262
263/*
264 * Allocates the W3B buffer for the specified comm.
265 * Returns zero upon successful completion, or an appropriate error code upon
266 * failure.
267 */
268static inline int tf_allocate_w3b(struct tf_comm *comm)
269{
270 int error;
271 u32 flags;
272 u32 config_flag_s;
273 u32 *w3b_descriptors;
274 u32 w3b_descriptor_count;
275 u32 w3b_current_size;
276
277 config_flag_s = tf_read_reg32(&comm->l1_buffer->config_flag_s);
278
279retry:
280 if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags))) == 0) {
281 /*
282 * Initialize the shared memory for the W3B
283 */
284 tf_init_coarse_page_table_allocator(
285 &comm->w3b_cpt_alloc_context);
286 } else {
287 /*
288 * The W3B is allocated but do we have to reallocate a bigger
289 * one?
290 */
291 /* Check H bit */
292 if ((config_flag_s & (1<<4)) != 0) {
293 /* The size of the W3B may change after SMC_INIT */
294 /* Read the current value */
295 w3b_current_size = tf_read_reg32(
296 &comm->l1_buffer->w3b_size_current_s);
297 if (comm->w3b_shmem_size > w3b_current_size)
298 return 0;
299
300 tf_free_w3b(comm);
301 goto retry;
302 } else {
303 return 0;
304 }
305 }
306
307 /* check H bit */
308 if ((config_flag_s & (1<<4)) != 0)
309 /* The size of the W3B may change after SMC_INIT */
310 /* Read the current value */
311 comm->w3b_shmem_size = tf_read_reg32(
312 &comm->l1_buffer->w3b_size_current_s);
313 else
314 comm->w3b_shmem_size = tf_read_reg32(
315 &comm->l1_buffer->w3b_size_max_s);
316
317 comm->w3b = (u32) internal_vmalloc(comm->w3b_shmem_size);
318 if (comm->w3b == 0) {
319 printk(KERN_ERR "tf_allocate_w3b():"
320 " Out of memory for W3B buffer (%u bytes)!\n",
321 (unsigned int)(comm->w3b_shmem_size));
322 error = -ENOMEM;
323 goto error;
324 }
325
326 /* initialize the w3b_shmem_desc structure */
327 comm->w3b_shmem_desc.type = TF_SHMEM_TYPE_PM_HIBERNATE;
328 INIT_LIST_HEAD(&(comm->w3b_shmem_desc.list));
329
330 flags = (TF_SHMEM_TYPE_READ | TF_SHMEM_TYPE_WRITE);
331
332 /* directly point to the L1 shared buffer W3B descriptors */
333 w3b_descriptors = comm->l1_buffer->w3b_descriptors;
334
335 /*
336 * tf_fill_descriptor_table uses the following parameter as an
337 * IN/OUT
338 */
339
340 error = tf_fill_descriptor_table(
341 &(comm->w3b_cpt_alloc_context),
342 &(comm->w3b_shmem_desc),
343 comm->w3b,
344 NULL,
345 w3b_descriptors,
346 comm->w3b_shmem_size,
347 &(comm->w3b_shmem_offset),
348 false,
349 flags,
350 &w3b_descriptor_count);
351 if (error != 0) {
352 printk(KERN_ERR "tf_allocate_w3b():"
353 " tf_fill_descriptor_table failed with "
354 "error code 0x%08x!\n",
355 error);
356 goto error;
357 }
358
359 set_bit(TF_COMM_FLAG_W3B_ALLOCATED, &(comm->flags));
360
361 /* successful completion */
362 return 0;
363
364error:
365 tf_free_w3b(comm);
366
367 return error;
368}
369
370/*
371 * Perform a Secure World shutdown operation.
372 * The routine does not return if the operation succeeds.
373 * the routine returns an appropriate error code if
374 * the operation fails.
375 */
376int tf_pm_shutdown(struct tf_comm *comm)
377{
378#ifdef CONFIG_TFN
379 /* this function is useless for the TEGRA product */
380 return 0;
381#else
382 int error;
383 union tf_command command;
384 union tf_answer answer;
385
386 dprintk(KERN_INFO "tf_pm_shutdown()\n");
387
388 memset(&command, 0, sizeof(command));
389
390 command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
391 command.header.message_size =
392 (sizeof(struct tf_command_management) -
393 sizeof(struct tf_command_header))/sizeof(u32);
394
395 command.management.command = TF_MANAGEMENT_SHUTDOWN;
396
397 error = tf_send_receive(
398 comm,
399 &command,
400 &answer,
401 NULL,
402 false);
403
404 if (error != 0) {
405 dprintk(KERN_ERR "tf_pm_shutdown(): "
406 "tf_send_receive failed (error %d)!\n",
407 error);
408 return error;
409 }
410
411#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
412 if (answer.header.error_code != 0)
413 dprintk(KERN_ERR "tf_driver: shutdown failed.\n");
414 else
415 dprintk(KERN_INFO "tf_driver: shutdown succeeded.\n");
416#endif
417
418 return answer.header.error_code;
419#endif
420}
421
422
423/*
424 * Perform a Secure World hibernate operation.
425 * The routine does not return if the operation succeeds.
426 * the routine returns an appropriate error code if
427 * the operation fails.
428 */
429int tf_pm_hibernate(struct tf_comm *comm)
430{
431#ifdef CONFIG_TFN
432 /* this function is useless for the TEGRA product */
433 return 0;
434#else
435 int error;
436 union tf_command command;
437 union tf_answer answer;
438 u32 first_command;
439 u32 first_free_command;
440
441 dprintk(KERN_INFO "tf_pm_hibernate()\n");
442
443 error = tf_allocate_w3b(comm);
444 if (error != 0) {
445 dprintk(KERN_ERR "tf_pm_hibernate(): "
446 "tf_allocate_w3b failed (error %d)!\n",
447 error);
448 return error;
449 }
450
451 /*
452 * As the polling thread is already hibernating, we
453 * should send the message and receive the answer ourself
454 */
455
456 /* build the "prepare to hibernate" message */
457 command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
458 command.management.command = TF_MANAGEMENT_HIBERNATE;
459 /* Long Form Command */
460 command.management.shared_mem_descriptors[0] = 0;
461 command.management.shared_mem_descriptors[1] = 0;
462 command.management.w3b_size =
463 comm->w3b_shmem_size | 0x80000000;
464 command.management.w3b_start_offset =
465 comm->w3b_shmem_offset;
466 command.header.operation_id = (u32) &answer;
467
468 tf_dump_command(&command);
469
470 /* find a slot to send the message in */
471
472 /* AFY: why not use the function tf_send_receive?? We are
473 * duplicating a lot of subtle code here. And it's not going to be
474 * tested because power management is currently not supported by the
475 * secure world. */
476 for (;;) {
477 int queue_words_count, command_size;
478
479 spin_lock(&(comm->lock));
480
481 first_command = tf_read_reg32(
482 &comm->l1_buffer->first_command);
483 first_free_command = tf_read_reg32(
484 &comm->l1_buffer->first_free_command);
485
486 queue_words_count = first_free_command - first_command;
487 command_size = command.header.message_size
488 + sizeof(struct tf_command_header);
489 if ((queue_words_count + command_size) <
490 TF_N_MESSAGE_QUEUE_CAPACITY) {
491 /* Command queue is not full */
492 memcpy(&comm->l1_buffer->command_queue[
493 first_free_command %
494 TF_N_MESSAGE_QUEUE_CAPACITY],
495 &command,
496 command_size * sizeof(u32));
497
498 tf_write_reg32(&comm->l1_buffer->first_free_command,
499 first_free_command + command_size);
500
501 spin_unlock(&(comm->lock));
502 break;
503 }
504
505 spin_unlock(&(comm->lock));
506 (void)tf_schedule_secure_world(comm);
507 }
508
509 /* now wait for the answer, dispatching other answers */
510 while (1) {
511 u32 first_answer;
512 u32 first_free_answer;
513
514 /* check all the answers */
515 first_free_answer = tf_read_reg32(
516 &comm->l1_buffer->first_free_answer);
517 first_answer = tf_read_reg32(
518 &comm->l1_buffer->first_answer);
519
520 if (first_answer != first_free_answer) {
521 int bFoundAnswer = 0;
522
523 do {
524 /* answer queue not empty */
525 union tf_answer tmp_answer;
526 struct tf_answer_header header;
527 /* size of the command in words of 32bit */
528 int command_size;
529
530 /* get the message_size */
531 memcpy(&header,
532 &comm->l1_buffer->answer_queue[
533 first_answer %
534 TF_S_ANSWER_QUEUE_CAPACITY],
535 sizeof(struct tf_answer_header));
536 command_size = header.message_size +
537 sizeof(struct tf_answer_header);
538
539 /*
540 * NOTE: message_size is the number of words
541 * following the first word
542 */
543 memcpy(&tmp_answer,
544 &comm->l1_buffer->answer_queue[
545 first_answer %
546 TF_S_ANSWER_QUEUE_CAPACITY],
547 command_size * sizeof(u32));
548
549 tf_dump_answer(&tmp_answer);
550
551 if (tmp_answer.header.operation_id ==
552 (u32) &answer) {
553 /*
554 * this is the answer to the "prepare to
555 * hibernate" message
556 */
557 memcpy(&answer,
558 &tmp_answer,
559 command_size * sizeof(u32));
560
561 bFoundAnswer = 1;
562 tf_write_reg32(
563 &comm->l1_buffer->first_answer,
564 first_answer + command_size);
565 break;
566 } else {
567 /*
568 * this is a standard message answer,
569 * dispatch it
570 */
571 struct tf_answer_struct
572 *answerStructure;
573
574 answerStructure =
575 (struct tf_answer_struct *)
576 tmp_answer.header.operation_id;
577
578 memcpy(answerStructure->answer,
579 &tmp_answer,
580 command_size * sizeof(u32));
581
582 answerStructure->answer_copied = true;
583 }
584
585 tf_write_reg32(
586 &comm->l1_buffer->first_answer,
587 first_answer + command_size);
588 } while (first_answer != first_free_answer);
589
590 if (bFoundAnswer)
591 break;
592 }
593
594 /*
595 * since the Secure World is at least running the "prepare to
596 * hibernate" message, its timeout must be immediate So there is
597 * no need to check its timeout and schedule() the current
598 * thread
599 */
600 (void)tf_schedule_secure_world(comm);
601 } /* while (1) */
602
603 printk(KERN_INFO "tf_driver: hibernate.\n");
604 return 0;
605#endif
606}
607
608
609/*
610 * Perform a Secure World resume operation.
611 * The routine returns once the Secure World is active again
612 * or if an error occurs during the "resume" process
613 */
614int tf_pm_resume(struct tf_comm *comm)
615{
616#ifdef CONFIG_TFN
617 /* this function is useless for the TEGRA product */
618 return 0;
619#else
620 int error;
621 u32 status;
622
623 dprintk(KERN_INFO "tf_pm_resume()\n");
624
625 error = tf_smc_wake_up(
626 tf_get_l2init_descriptor((u32)comm->l1_buffer),
627 comm->w3b_shmem_offset,
628 comm->w3b_shmem_size);
629
630 if (error != 0) {
631 dprintk(KERN_ERR "tf_pm_resume(): "
632 "tf_smc_wake_up failed (error %d)!\n",
633 error);
634 return error;
635 }
636
637 status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
638 & TF_STATUS_POWER_STATE_MASK)
639 >> TF_STATUS_POWER_STATE_SHIFT);
640
641 while ((status != TF_POWER_MODE_ACTIVE)
642 && (status != TF_POWER_MODE_PANIC)) {
643 tf_smc_nyield();
644
645 status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
646 & TF_STATUS_POWER_STATE_MASK)
647 >> TF_STATUS_POWER_STATE_SHIFT);
648
649 /*
650 * As this may last quite a while, call the kernel scheduler to
651 * hand over CPU for other operations
652 */
653 schedule();
654 }
655
656 switch (status) {
657 case TF_POWER_MODE_ACTIVE:
658 break;
659
660 case TF_POWER_MODE_PANIC:
661 dprintk(KERN_ERR "tf_pm_resume(): "
662 "Secure World POWER_MODE_PANIC!\n");
663 return -EINVAL;
664
665 default:
666 dprintk(KERN_ERR "tf_pm_resume(): "
667 "unexpected Secure World POWER_MODE (%d)!\n", status);
668 return -EINVAL;
669 }
670
671 dprintk(KERN_INFO "tf_pm_resume() succeeded\n");
672 return 0;
673#endif
674}
675
676/*----------------------------------------------------------------------------
677 * Communication initialization and termination
678 *----------------------------------------------------------------------------*/
679
680/*
681 * Handles the software interrupts issued by the Secure World.
682 */
683static irqreturn_t tf_soft_int_handler(int irq, void *dev_id)
684{
685 struct tf_comm *comm = (struct tf_comm *) dev_id;
686
687 if (comm->l1_buffer == NULL)
688 return IRQ_NONE;
689
690 if ((tf_read_reg32(&comm->l1_buffer->status_s) &
691 TF_STATUS_P_MASK) == 0)
692 /* interrupt not issued by the Trusted Foundations Software */
693 return IRQ_NONE;
694
695 tf_smc_reset_irq();
696
697 /* signal N_SM_EVENT */
698 wake_up(&comm->wait_queue);
699
700 return IRQ_HANDLED;
701}
702
703/*
704 * Initializes the communication with the Secure World.
705 * The L1 shared buffer is allocated and the Secure World
706 * is yielded for the first time.
707 * returns successfuly once the communication with
708 * the Secure World is up and running
709 *
710 * Returns 0 upon success or appropriate error code
711 * upon failure
712 */
713int tf_init(struct tf_comm *comm)
714{
715 int error;
716 struct page *buffer_page;
717 u32 protocol_version;
718
719 dprintk(KERN_INFO "tf_init()\n");
720
721 spin_lock_init(&(comm->lock));
722 comm->flags = 0;
723 comm->l1_buffer = NULL;
724 init_waitqueue_head(&(comm->wait_queue));
725
726 /*
727 * Check the Secure World protocol version is the expected one.
728 */
729 tf_smc_get_protocol_version(&protocol_version);
730
731 if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version))
732 != TF_S_PROTOCOL_MAJOR_VERSION) {
733 printk(KERN_ERR "tf_init():"
734 " Unsupported Secure World Major Version "
735 "(0x%02X, expected 0x%02X)!\n",
736 GET_PROTOCOL_MAJOR_VERSION(protocol_version),
737 TF_S_PROTOCOL_MAJOR_VERSION);
738 error = -EIO;
739 goto error;
740 }
741
742 /*
743 * Register the software interrupt handler if required to.
744 */
745 if (comm->soft_int_irq != -1) {
746 dprintk(KERN_INFO "tf_init(): "
747 "Registering software interrupt handler (IRQ %d)\n",
748 comm->soft_int_irq);
749
750 error = request_irq(comm->soft_int_irq,
751 tf_soft_int_handler,
752 IRQF_SHARED,
753 TF_DEVICE_BASE_NAME,
754 comm);
755 if (error != 0) {
756 dprintk(KERN_ERR "tf_init(): "
757 "request_irq failed for irq %d (error %d)\n",
758 comm->soft_int_irq, error);
759 goto error;
760 }
761 set_bit(TF_COMM_FLAG_IRQ_REQUESTED, &(comm->flags));
762 }
763
764 /*
765 * Allocate and initialize the L1 shared buffer.
766 */
767 comm->l1_buffer = (void *) internal_get_zeroed_page(GFP_KERNEL);
768 if (comm->l1_buffer == NULL) {
769 printk(KERN_ERR "tf_init():"
770 " get_zeroed_page failed for L1 shared buffer!\n");
771 error = -ENOMEM;
772 goto error;
773 }
774
775 /*
776 * Ensure the page storing the L1 shared buffer is mapped.
777 */
778 buffer_page = virt_to_page(comm->l1_buffer);
779 trylock_page(buffer_page);
780
781 dprintk(KERN_INFO "tf_init(): "
782 "L1 shared buffer allocated at virtual:%p, "
783 "physical:%p (page:%p)\n",
784 comm->l1_buffer,
785 (void *)virt_to_phys(comm->l1_buffer),
786 buffer_page);
787
788 set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags));
789
790 /*
791 * Init SMC
792 */
793 error = tf_smc_init(
794 tf_get_l2init_descriptor((u32)comm->l1_buffer));
795 if (error != S_SUCCESS) {
796 dprintk(KERN_ERR "tf_init(): "
797 "tf_smc_init failed (error 0x%08X)!\n",
798 error);
799 goto error;
800 }
801
802 /*
803 * check whether the interrupts are actually enabled
804 * If not, remove irq handler
805 */
806 if ((tf_read_reg32(&comm->l1_buffer->config_flag_s) &
807 TF_CONFIG_FLAG_S) == 0) {
808 if (test_and_clear_bit(TF_COMM_FLAG_IRQ_REQUESTED,
809 &(comm->flags)) != 0) {
810 dprintk(KERN_INFO "tf_init(): "
811 "Interrupts not used, unregistering "
812 "softint (IRQ %d)\n",
813 comm->soft_int_irq);
814
815 free_irq(comm->soft_int_irq, comm);
816 }
817 } else {
818 if (test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
819 &(comm->flags)) == 0) {
820 /*
821 * Interrupts are enabled in the Secure World, but not
822 * handled by driver
823 */
824 dprintk(KERN_ERR "tf_init(): "
825 "soft_interrupt argument not provided\n");
826 error = -EINVAL;
827 goto error;
828 }
829 }
830
831 /*
832 * Successful completion.
833 */
834
835 /* yield for the first time */
836 (void)tf_schedule_secure_world(comm);
837
838 dprintk(KERN_INFO "tf_init(): Success\n");
839 return S_SUCCESS;
840
841error:
842 /*
843 * Error handling.
844 */
845 dprintk(KERN_INFO "tf_init(): Failure (error %d)\n",
846 error);
847 tf_terminate(comm);
848 return error;
849}
850
851
852/*
853 * Attempt to terminate the communication with the Secure World.
854 * The L1 shared buffer is freed.
855 * Calling this routine terminates definitaly the communication
856 * with the Secure World : there is no way to inform the Secure World of a new
857 * L1 shared buffer to be used once it has been initialized.
858 */
859void tf_terminate(struct tf_comm *comm)
860{
861 dprintk(KERN_INFO "tf_terminate()\n");
862
863 set_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags));
864
865 if ((test_bit(TF_COMM_FLAG_W3B_ALLOCATED,
866 &(comm->flags))) != 0) {
867 dprintk(KERN_INFO "tf_terminate(): "
868 "Freeing the W3B buffer...\n");
869 tf_free_w3b(comm);
870 }
871
872 if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
873 &(comm->flags))) != 0) {
874 __clear_page_locked(virt_to_page(comm->l1_buffer));
875 internal_free_page((unsigned long) comm->l1_buffer);
876 }
877
878 if ((test_bit(TF_COMM_FLAG_IRQ_REQUESTED,
879 &(comm->flags))) != 0) {
880 dprintk(KERN_INFO "tf_terminate(): "
881 "Unregistering softint (IRQ %d)\n",
882 comm->soft_int_irq);
883 free_irq(comm->soft_int_irq, comm);
884 }
885}
diff --git a/security/tf_driver/tf_conn.c b/security/tf_driver/tf_conn.c
new file mode 100644
index 00000000000..3148fec4635
--- /dev/null
+++ b/security/tf_driver/tf_conn.c
@@ -0,0 +1,1574 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#include <linux/atomic.h>
21#include <linux/uaccess.h>
22#include <linux/delay.h>
23#include <linux/errno.h>
24#include <linux/list.h>
25#include <linux/mm.h>
26#include <linux/pagemap.h>
27#include <linux/stddef.h>
28#include <linux/types.h>
29
30#include "s_version.h"
31
32#include "tf_protocol.h"
33#include "tf_defs.h"
34#include "tf_util.h"
35#include "tf_comm.h"
36#include "tf_conn.h"
37
38#ifdef CONFIG_TF_ZEBRA
39#include "tf_crypto.h"
40#endif
41
42#ifdef CONFIG_ANDROID
43#define TF_PRIVILEGED_UID_GID 1000 /* Android system AID */
44#else
45#define TF_PRIVILEGED_UID_GID 0
46#endif
47
48/*----------------------------------------------------------------------------
49 * Management of the shared memory blocks.
50 *
51 * Shared memory blocks are the blocks registered through
52 * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
53 *----------------------------------------------------------------------------*/
54
55/**
56 * Unmaps a shared memory
57 **/
58void tf_unmap_shmem(
59 struct tf_connection *connection,
60 struct tf_shmem_desc *shmem_desc,
61 u32 full_cleanup)
62{
63 /* check shmem_desc contains a descriptor */
64 if (shmem_desc == NULL)
65 return;
66
67 dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc);
68
69retry:
70 mutex_lock(&(connection->shmem_mutex));
71 if (atomic_read(&shmem_desc->ref_count) > 1) {
72 /*
73 * Shared mem still in use, wait for other operations completion
74 * before actually unmapping it.
75 */
76 dprintk(KERN_INFO "Descriptor in use\n");
77 mutex_unlock(&(connection->shmem_mutex));
78 schedule();
79 goto retry;
80 }
81
82 tf_cleanup_shared_memory(
83 &(connection->cpt_alloc_context),
84 shmem_desc,
85 full_cleanup);
86
87 list_del(&(shmem_desc->list));
88
89 if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
90 (full_cleanup != 0)) {
91 internal_kfree(shmem_desc);
92
93 atomic_dec(&(connection->shmem_count));
94 } else {
95 /*
96 * This is a preallocated shared memory, add to free list
97 * Since the device context is unmapped last, it is
98 * always the first element of the free list if no
99 * device context has been created
100 */
101 shmem_desc->block_identifier = 0;
102 list_add(&(shmem_desc->list), &(connection->free_shmem_list));
103 }
104
105 mutex_unlock(&(connection->shmem_mutex));
106}
107
108
109/**
110 * Find the first available slot for a new block of shared memory
111 * and map the user buffer.
112 * Update the descriptors to L1 descriptors
113 * Update the buffer_start_offset and buffer_size fields
114 * shmem_desc is updated to the mapped shared memory descriptor
115 **/
116int tf_map_shmem(
117 struct tf_connection *connection,
118 u32 buffer,
119 /* flags for read-write access rights on the memory */
120 u32 flags,
121 bool in_user_space,
122 u32 descriptors[TF_MAX_COARSE_PAGES],
123 u32 *buffer_start_offset,
124 u32 buffer_size,
125 struct tf_shmem_desc **shmem_desc,
126 u32 *descriptor_count)
127{
128 struct tf_shmem_desc *desc = NULL;
129 int error;
130
131 dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
132 connection,
133 (void *) buffer,
134 flags);
135
136 mutex_lock(&(connection->shmem_mutex));
137
138 /*
139 * Check the list of free shared memory
140 * is not empty
141 */
142 if (list_empty(&(connection->free_shmem_list))) {
143 if (atomic_read(&(connection->shmem_count)) ==
144 TF_SHMEM_MAX_COUNT) {
145 printk(KERN_ERR "tf_map_shmem(%p):"
146 " maximum shared memories already registered\n",
147 connection);
148 error = -ENOMEM;
149 goto error;
150 }
151
152 /* no descriptor available, allocate a new one */
153
154 desc = (struct tf_shmem_desc *) internal_kmalloc(
155 sizeof(*desc), GFP_KERNEL);
156 if (desc == NULL) {
157 printk(KERN_ERR "tf_map_shmem(%p):"
158 " failed to allocate descriptor\n",
159 connection);
160 error = -ENOMEM;
161 goto error;
162 }
163
164 /* Initialize the structure */
165 desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
166 atomic_set(&desc->ref_count, 1);
167 INIT_LIST_HEAD(&(desc->list));
168
169 atomic_inc(&(connection->shmem_count));
170 } else {
171 /* take the first free shared memory descriptor */
172 desc = list_first_entry(&(connection->free_shmem_list),
173 struct tf_shmem_desc, list);
174 list_del(&(desc->list));
175 }
176
177 /* Add the descriptor to the used list */
178 list_add(&(desc->list), &(connection->used_shmem_list));
179
180 error = tf_fill_descriptor_table(
181 &(connection->cpt_alloc_context),
182 desc,
183 buffer,
184 connection->vmas,
185 descriptors,
186 buffer_size,
187 buffer_start_offset,
188 in_user_space,
189 flags,
190 descriptor_count);
191
192 if (error != 0) {
193 dprintk(KERN_ERR "tf_map_shmem(%p):"
194 " tf_fill_descriptor_table failed with error "
195 "code %d!\n",
196 connection,
197 error);
198 goto error;
199 }
200 desc->client_buffer = (u8 *) buffer;
201
202 /*
203 * Successful completion.
204 */
205 *shmem_desc = desc;
206 mutex_unlock(&(connection->shmem_mutex));
207 dprintk(KERN_DEBUG "tf_map_shmem: success\n");
208 return 0;
209
210
211 /*
212 * Error handling.
213 */
214error:
215 mutex_unlock(&(connection->shmem_mutex));
216 dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
217 error);
218
219 tf_unmap_shmem(
220 connection,
221 desc,
222 0);
223
224 return error;
225}
226
227
228
229/* This function is a copy of the find_vma() function
230in linux kernel 2.6.15 version with some fixes :
231 - memory block may end on vm_end
232 - check the full memory block is in the memory area
233 - guarantee NULL is returned if no memory area is found */
234struct vm_area_struct *tf_find_vma(struct mm_struct *mm,
235 unsigned long addr, unsigned long size)
236{
237 struct vm_area_struct *vma = NULL;
238
239 dprintk(KERN_INFO
240 "tf_find_vma addr=0x%lX size=0x%lX\n", addr, size);
241
242 if (mm) {
243 /* Check the cache first. */
244 /* (Cache hit rate is typically around 35%.) */
245 vma = mm->mmap_cache;
246 if (!(vma && vma->vm_end >= (addr+size) &&
247 vma->vm_start <= addr)) {
248 struct rb_node *rb_node;
249
250 rb_node = mm->mm_rb.rb_node;
251 vma = NULL;
252
253 while (rb_node) {
254 struct vm_area_struct *vma_tmp;
255
256 vma_tmp = rb_entry(rb_node,
257 struct vm_area_struct, vm_rb);
258
259 dprintk(KERN_INFO
260 "vma_tmp->vm_start=0x%lX"
261 "vma_tmp->vm_end=0x%lX\n",
262 vma_tmp->vm_start,
263 vma_tmp->vm_end);
264
265 if (vma_tmp->vm_end >= (addr+size)) {
266 vma = vma_tmp;
267 if (vma_tmp->vm_start <= addr)
268 break;
269
270 rb_node = rb_node->rb_left;
271 } else {
272 rb_node = rb_node->rb_right;
273 }
274 }
275
276 if (vma)
277 mm->mmap_cache = vma;
278 if (rb_node == NULL)
279 vma = NULL;
280 }
281 }
282 return vma;
283}
284
285int tf_validate_shmem_and_flags(
286 u32 shmem,
287 u32 shmem_size,
288 u32 flags)
289{
290 struct vm_area_struct *vma;
291 u32 chunk;
292
293 if (shmem_size == 0)
294 /* This is always valid */
295 return 0;
296
297 if ((shmem + shmem_size) < shmem)
298 /* Overflow */
299 return -EINVAL;
300
301 down_read(&current->mm->mmap_sem);
302
303 /*
304 * When looking for a memory address, split buffer into chunks of
305 * size=PAGE_SIZE.
306 */
307 chunk = PAGE_SIZE - (shmem & (PAGE_SIZE-1));
308 if (chunk > shmem_size)
309 chunk = shmem_size;
310
311 do {
312 vma = tf_find_vma(current->mm, shmem, chunk);
313
314 if (vma == NULL) {
315 dprintk(KERN_ERR "%s: area not found\n", __func__);
316 goto error;
317 }
318
319 if (flags & TF_SHMEM_TYPE_READ)
320 if (!(vma->vm_flags & VM_READ)) {
321 dprintk(KERN_ERR "%s: no read permission\n",
322 __func__);
323 goto error;
324 }
325 if (flags & TF_SHMEM_TYPE_WRITE)
326 if (!(vma->vm_flags & VM_WRITE)) {
327 dprintk(KERN_ERR "%s: no write permission\n",
328 __func__);
329 goto error;
330 }
331
332 shmem_size -= chunk;
333 shmem += chunk;
334 chunk = (shmem_size <= PAGE_SIZE ?
335 shmem_size : PAGE_SIZE);
336 } while (shmem_size != 0);
337
338 up_read(&current->mm->mmap_sem);
339 return 0;
340
341error:
342 up_read(&current->mm->mmap_sem);
343 return -EFAULT;
344}
345
346
347static int tf_map_temp_shmem(struct tf_connection *connection,
348 struct tf_command_param_temp_memref *temp_memref,
349 u32 param_type,
350 struct tf_shmem_desc **shmem_desc)
351{
352 u32 flags;
353 u32 error = S_SUCCESS;
354 bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
355
356 dprintk(KERN_INFO "tf_map_temp_shmem(%p, "
357 "0x%08x[size=0x%08x], offset=0x%08x)\n",
358 connection,
359 temp_memref->descriptor,
360 temp_memref->size,
361 temp_memref->offset);
362
363 switch (param_type) {
364 case TF_PARAM_TYPE_MEMREF_TEMP_INPUT:
365 flags = TF_SHMEM_TYPE_READ;
366 break;
367 case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
368 flags = TF_SHMEM_TYPE_WRITE;
369 break;
370 case TF_PARAM_TYPE_MEMREF_TEMP_INOUT:
371 flags = TF_SHMEM_TYPE_WRITE | TF_SHMEM_TYPE_READ;
372 break;
373 default:
374 error = -EINVAL;
375 goto error;
376 }
377
378 if (temp_memref->descriptor == 0) {
379 /* NULL tmpref */
380 temp_memref->offset = 0;
381 *shmem_desc = NULL;
382 } else if ((temp_memref->descriptor != 0) &&
383 (temp_memref->size == 0)) {
384 /* Empty tmpref */
385 temp_memref->offset = temp_memref->descriptor;
386 temp_memref->descriptor = 0;
387 temp_memref->size = 0;
388 *shmem_desc = NULL;
389 } else {
390 /* Map the temp shmem block */
391
392 u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
393 u32 descriptor_count;
394
395 if (in_user_space) {
396 error = tf_validate_shmem_and_flags(
397 temp_memref->descriptor,
398 temp_memref->size,
399 flags);
400 if (error != 0)
401 goto error;
402 }
403
404 error = tf_map_shmem(
405 connection,
406 temp_memref->descriptor,
407 flags,
408 in_user_space,
409 shared_mem_descriptors,
410 &(temp_memref->offset),
411 temp_memref->size,
412 shmem_desc,
413 &descriptor_count);
414 temp_memref->descriptor = shared_mem_descriptors[0];
415 }
416
417error:
418 return error;
419}
420
421/*
422 * Clean up a list of shared memory descriptors.
423 */
424static void tf_shared_memory_cleanup_list(
425 struct tf_connection *connection,
426 struct list_head *shmem_desc_list)
427{
428 while (!list_empty(shmem_desc_list)) {
429 struct tf_shmem_desc *shmem_desc;
430
431 shmem_desc = list_first_entry(shmem_desc_list,
432 struct tf_shmem_desc, list);
433
434 tf_unmap_shmem(connection, shmem_desc, 1);
435 }
436}
437
438
439/*
440 * Clean up the shared memory information in the connection.
441 * Releases all allocated pages.
442 */
443static void tf_cleanup_shared_memories(struct tf_connection *connection)
444{
445 /* clean up the list of used and free descriptors.
446 * done outside the mutex, because tf_unmap_shmem already
447 * mutex()ed
448 */
449 tf_shared_memory_cleanup_list(connection,
450 &connection->used_shmem_list);
451 tf_shared_memory_cleanup_list(connection,
452 &connection->free_shmem_list);
453
454 mutex_lock(&(connection->shmem_mutex));
455
456 /* Free the Vmas page */
457 if (connection->vmas) {
458 internal_free_page((unsigned long) connection->vmas);
459 connection->vmas = NULL;
460 }
461
462 tf_release_coarse_page_table_allocator(
463 &(connection->cpt_alloc_context));
464
465 mutex_unlock(&(connection->shmem_mutex));
466}
467
468
469/*
470 * Initialize the shared memory in a connection.
471 * Allocates the minimum memory to be provided
472 * for shared memory management
473 */
474int tf_init_shared_memory(struct tf_connection *connection)
475{
476 int error;
477 int i;
478 int coarse_page_index;
479
480 /*
481 * We only need to initialize special elements and attempt to allocate
482 * the minimum shared memory descriptors we want to support
483 */
484
485 mutex_init(&(connection->shmem_mutex));
486 INIT_LIST_HEAD(&(connection->free_shmem_list));
487 INIT_LIST_HEAD(&(connection->used_shmem_list));
488 atomic_set(&(connection->shmem_count), 0);
489
490 tf_init_coarse_page_table_allocator(
491 &(connection->cpt_alloc_context));
492
493
494 /*
495 * Preallocate 3 pages to increase the chances that a connection
496 * succeeds in allocating shared mem
497 */
498 for (i = 0;
499 i < 3;
500 i++) {
501 struct tf_shmem_desc *shmem_desc =
502 (struct tf_shmem_desc *) internal_kmalloc(
503 sizeof(*shmem_desc), GFP_KERNEL);
504
505 if (shmem_desc == NULL) {
506 printk(KERN_ERR "tf_init_shared_memory(%p):"
507 " failed to pre allocate descriptor %d\n",
508 connection,
509 i);
510 error = -ENOMEM;
511 goto error;
512 }
513
514 for (coarse_page_index = 0;
515 coarse_page_index < TF_MAX_COARSE_PAGES;
516 coarse_page_index++) {
517 struct tf_coarse_page_table *coarse_pg_table;
518
519 coarse_pg_table = tf_alloc_coarse_page_table(
520 &(connection->cpt_alloc_context),
521 TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
522
523 if (coarse_pg_table == NULL) {
524 printk(KERN_ERR "tf_init_shared_memory(%p)"
525 ": descriptor %d coarse page %d - "
526 "tf_alloc_coarse_page_table() "
527 "failed\n",
528 connection,
529 i,
530 coarse_page_index);
531 error = -ENOMEM;
532 goto error;
533 }
534
535 shmem_desc->coarse_pg_table[coarse_page_index] =
536 coarse_pg_table;
537 }
538 shmem_desc->coarse_pg_table_count = 0;
539
540 shmem_desc->type = TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
541 atomic_set(&shmem_desc->ref_count, 1);
542
543 /*
544 * add this preallocated descriptor to the list of free
545 * descriptors Keep the device context specific one at the
546 * beginning of the list
547 */
548 INIT_LIST_HEAD(&(shmem_desc->list));
549 list_add_tail(&(shmem_desc->list),
550 &(connection->free_shmem_list));
551 }
552
553 /* allocate memory for the vmas structure */
554 connection->vmas =
555 (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
556 if (connection->vmas == NULL) {
557 printk(KERN_ERR "tf_init_shared_memory(%p):"
558 " vmas - failed to get_zeroed_page\n",
559 connection);
560 error = -ENOMEM;
561 goto error;
562 }
563
564 return 0;
565
566error:
567 tf_cleanup_shared_memories(connection);
568 return error;
569}
570
571/*----------------------------------------------------------------------------
572 * Connection operations to the Secure World
573 *----------------------------------------------------------------------------*/
574
575int tf_create_device_context(
576 struct tf_connection *connection)
577{
578 union tf_command command;
579 union tf_answer answer;
580 int error = 0;
581
582 dprintk(KERN_INFO "tf_create_device_context(%p)\n",
583 connection);
584
585 command.create_device_context.message_type =
586 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
587 command.create_device_context.message_size =
588 (sizeof(struct tf_command_create_device_context)
589 - sizeof(struct tf_command_header))/sizeof(u32);
590 command.create_device_context.operation_id = (u32) &answer;
591 command.create_device_context.device_context_id = (u32) connection;
592
593 error = tf_send_receive(
594 &connection->dev->sm,
595 &command,
596 &answer,
597 connection,
598 true);
599
600 if ((error != 0) ||
601 (answer.create_device_context.error_code != S_SUCCESS))
602 goto error;
603
604 /*
605 * CREATE_DEVICE_CONTEXT succeeded,
606 * store device context handler and update connection status
607 */
608 connection->device_context =
609 answer.create_device_context.device_context;
610 spin_lock(&(connection->state_lock));
611 connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
612 spin_unlock(&(connection->state_lock));
613
614 /* successful completion */
615 dprintk(KERN_INFO "tf_create_device_context(%p):"
616 " device_context=0x%08x\n",
617 connection,
618 answer.create_device_context.device_context);
619 return 0;
620
621error:
622 if (error != 0) {
623 dprintk(KERN_ERR "tf_create_device_context failed with "
624 "error %d\n", error);
625 } else {
626 /*
627 * We sent a DeviceCreateContext. The state is now
628 * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
629 * reset if we ever want to send a DeviceCreateContext again
630 */
631 spin_lock(&(connection->state_lock));
632 connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
633 spin_unlock(&(connection->state_lock));
634 dprintk(KERN_ERR "tf_create_device_context failed with "
635 "error_code 0x%08X\n",
636 answer.create_device_context.error_code);
637 if (answer.create_device_context.error_code ==
638 S_ERROR_OUT_OF_MEMORY)
639 error = -ENOMEM;
640 else
641 error = -EFAULT;
642 }
643
644 return error;
645}
646
647/* Check that the current application belongs to the
648 * requested GID */
649static bool tf_check_gid(gid_t requested_gid)
650{
651 if (requested_gid == current_egid()) {
652 return true;
653 } else {
654 u32 size;
655 u32 i;
656 /* Look in the supplementary GIDs */
657 get_group_info(GROUP_INFO);
658 size = GROUP_INFO->ngroups;
659 for (i = 0; i < size; i++)
660 if (requested_gid == GROUP_AT(GROUP_INFO , i))
661 return true;
662 }
663 return false;
664}
665
666/*
667 * Opens a client session to the Secure World
668 */
669int tf_open_client_session(
670 struct tf_connection *connection,
671 union tf_command *command,
672 union tf_answer *answer)
673{
674 int error = 0;
675 struct tf_shmem_desc *shmem_desc[4] = {NULL};
676 u32 i;
677
678 dprintk(KERN_INFO "tf_open_client_session(%p)\n", connection);
679
680 /*
681 * Initialize the message size with no login data. This will be later
682 * adjusted the the cases below
683 */
684 command->open_client_session.message_size =
685 (sizeof(struct tf_command_open_client_session) - 20
686 - sizeof(struct tf_command_header))/4;
687
688 switch (command->open_client_session.login_type) {
689 case TF_LOGIN_PUBLIC:
690 /* Nothing to do */
691 break;
692
693 case TF_LOGIN_USER:
694 /*
695 * Send the EUID of the calling application in the login data.
696 * Update message size.
697 */
698 *(u32 *) &command->open_client_session.login_data =
699 current_euid();
700#ifndef CONFIG_ANDROID
701 command->open_client_session.login_type =
702 (u32) TF_LOGIN_USER_LINUX_EUID;
703#else
704 command->open_client_session.login_type =
705 (u32) TF_LOGIN_USER_ANDROID_EUID;
706#endif
707
708 /* Added one word */
709 command->open_client_session.message_size += 1;
710 break;
711
712 case TF_LOGIN_GROUP: {
713 /* Check requested GID */
714 gid_t requested_gid =
715 *(u32 *) command->open_client_session.login_data;
716
717 if (!tf_check_gid(requested_gid)) {
718 dprintk(KERN_ERR "tf_open_client_session(%p) "
719 "TF_LOGIN_GROUP: requested GID (0x%x) does "
720 "not match real eGID (0x%x)"
721 "or any of the supplementary GIDs\n",
722 connection, requested_gid, current_egid());
723 error = -EACCES;
724 goto error;
725 }
726#ifndef CONFIG_ANDROID
727 command->open_client_session.login_type =
728 TF_LOGIN_GROUP_LINUX_GID;
729#else
730 command->open_client_session.login_type =
731 TF_LOGIN_GROUP_ANDROID_GID;
732#endif
733
734 command->open_client_session.message_size += 1; /* GID */
735 break;
736 }
737
738#ifndef CONFIG_ANDROID
739 case TF_LOGIN_APPLICATION: {
740 /*
741 * Compute SHA-1 hash of the application fully-qualified path
742 * name. Truncate the hash to 16 bytes and send it as login
743 * data. Update message size.
744 */
745 u8 pSHA1Hash[SHA1_DIGEST_SIZE];
746
747 error = tf_hash_application_path_and_data(pSHA1Hash,
748 NULL, 0);
749 if (error != 0) {
750 dprintk(KERN_ERR "tf_open_client_session: "
751 "error in tf_hash_application_path_and_data\n");
752 goto error;
753 }
754 memcpy(&command->open_client_session.login_data,
755 pSHA1Hash, 16);
756 command->open_client_session.login_type =
757 TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
758 /* 16 bytes */
759 command->open_client_session.message_size += 4;
760 break;
761 }
762#else
763 case TF_LOGIN_APPLICATION:
764 /*
765 * Send the real UID of the calling application in the login
766 * data. Update message size.
767 */
768 *(u32 *) &command->open_client_session.login_data =
769 current_uid();
770
771 command->open_client_session.login_type =
772 (u32) TF_LOGIN_APPLICATION_ANDROID_UID;
773
774 /* Added one word */
775 command->open_client_session.message_size += 1;
776 break;
777#endif
778
779#ifndef CONFIG_ANDROID
780 case TF_LOGIN_APPLICATION_USER: {
781 /*
782 * Compute SHA-1 hash of the concatenation of the application
783 * fully-qualified path name and the EUID of the calling
784 * application. Truncate the hash to 16 bytes and send it as
785 * login data. Update message size.
786 */
787 u8 pSHA1Hash[SHA1_DIGEST_SIZE];
788
789 error = tf_hash_application_path_and_data(pSHA1Hash,
790 (u8 *) &(current_euid()), sizeof(current_euid()));
791 if (error != 0) {
792 dprintk(KERN_ERR "tf_open_client_session: "
793 "error in tf_hash_application_path_and_data\n");
794 goto error;
795 }
796 memcpy(&command->open_client_session.login_data,
797 pSHA1Hash, 16);
798 command->open_client_session.login_type =
799 TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
800
801 /* 16 bytes */
802 command->open_client_session.message_size += 4;
803
804 break;
805 }
806#else
807 case TF_LOGIN_APPLICATION_USER:
808 /*
809 * Send the real UID and the EUID of the calling application in
810 * the login data. Update message size.
811 */
812 *(u32 *) &command->open_client_session.login_data =
813 current_uid();
814 *(u32 *) &command->open_client_session.login_data[4] =
815 current_euid();
816
817 command->open_client_session.login_type =
818 TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
819
820 /* Added two words */
821 command->open_client_session.message_size += 2;
822 break;
823#endif
824
825#ifndef CONFIG_ANDROID
826 case TF_LOGIN_APPLICATION_GROUP: {
827 /*
828 * Check requested GID. Compute SHA-1 hash of the concatenation
829 * of the application fully-qualified path name and the
830 * requested GID. Update message size
831 */
832 gid_t requested_gid;
833 u8 pSHA1Hash[SHA1_DIGEST_SIZE];
834
835 requested_gid = *(u32 *) &command->open_client_session.
836 login_data;
837
838 if (!tf_check_gid(requested_gid)) {
839 dprintk(KERN_ERR "tf_open_client_session(%p) "
840 "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
841 "does not match real eGID (0x%x)"
842 "or any of the supplementary GIDs\n",
843 connection, requested_gid, current_egid());
844 error = -EACCES;
845 goto error;
846 }
847
848 error = tf_hash_application_path_and_data(pSHA1Hash,
849 &requested_gid, sizeof(u32));
850 if (error != 0) {
851 dprintk(KERN_ERR "tf_open_client_session: "
852 "error in tf_hash_application_path_and_data\n");
853 goto error;
854 }
855
856 memcpy(&command->open_client_session.login_data,
857 pSHA1Hash, 16);
858 command->open_client_session.login_type =
859 TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
860
861 /* 16 bytes */
862 command->open_client_session.message_size += 4;
863 break;
864 }
865#else
866 case TF_LOGIN_APPLICATION_GROUP: {
867 /*
868 * Check requested GID. Send the real UID and the requested GID
869 * in the login data. Update message size.
870 */
871 gid_t requested_gid;
872
873 requested_gid = *(u32 *) &command->open_client_session.
874 login_data;
875
876 if (!tf_check_gid(requested_gid)) {
877 dprintk(KERN_ERR "tf_open_client_session(%p) "
878 "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
879 "does not match real eGID (0x%x)"
880 "or any of the supplementary GIDs\n",
881 connection, requested_gid, current_egid());
882 error = -EACCES;
883 goto error;
884 }
885
886 *(u32 *) &command->open_client_session.login_data =
887 current_uid();
888 *(u32 *) &command->open_client_session.login_data[4] =
889 requested_gid;
890
891 command->open_client_session.login_type =
892 TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
893
894 /* Added two words */
895 command->open_client_session.message_size += 2;
896
897 break;
898 }
899#endif
900
901 case TF_LOGIN_PRIVILEGED:
902 /* A privileged login may be performed only on behalf of the
903 kernel itself or on behalf of a process with euid=0 or
904 egid=0 or euid=system or egid=system. */
905 if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
906 dprintk(KERN_DEBUG "tf_open_client_session: "
907 "TF_LOGIN_PRIVILEGED for kernel API\n");
908 } else if ((current_euid() != TF_PRIVILEGED_UID_GID) &&
909 (current_egid() != TF_PRIVILEGED_UID_GID) &&
910 (current_euid() != 0) && (current_egid() != 0)) {
911 dprintk(KERN_ERR "tf_open_client_session: "
912 " user %d, group %d not allowed to open "
913 "session with TF_LOGIN_PRIVILEGED\n",
914 current_euid(), current_egid());
915 error = -EACCES;
916 goto error;
917 } else {
918 dprintk(KERN_DEBUG "tf_open_client_session: "
919 "TF_LOGIN_PRIVILEGED for %u:%u\n",
920 current_euid(), current_egid());
921 }
922 command->open_client_session.login_type =
923 TF_LOGIN_PRIVILEGED;
924 break;
925
926 case TF_LOGIN_AUTHENTICATION: {
927 /*
928 * Compute SHA-1 hash of the application binary
929 * Send this hash as the login data (20 bytes)
930 */
931
932 u8 *hash;
933 hash = &(command->open_client_session.login_data[0]);
934
935 error = tf_get_current_process_hash(hash);
936 if (error != 0) {
937 dprintk(KERN_ERR "tf_open_client_session: "
938 "error in tf_get_current_process_hash\n");
939 goto error;
940 }
941 command->open_client_session.login_type =
942 TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
943
944 /* 20 bytes */
945 command->open_client_session.message_size += 5;
946 break;
947 }
948
949 case TF_LOGIN_PRIVILEGED_KERNEL:
950 /* A kernel login may be performed only on behalf of the
951 kernel itself. */
952 if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
953 dprintk(KERN_DEBUG "tf_open_client_session: "
954 "TF_LOGIN_PRIVILEGED_KERNEL for kernel API\n");
955 command->open_client_session.login_type =
956 TF_LOGIN_PRIVILEGED_KERNEL;
957 } else {
958 dprintk(KERN_ERR "tf_open_client_session: "
959 " user %d, group %d not allowed to open "
960 "session with TF_LOGIN_PRIVILEGED_KERNEL\n",
961 current_euid(), current_egid());
962 error = -EACCES;
963 goto error;
964 }
965 command->open_client_session.login_type =
966 TF_LOGIN_PRIVILEGED_KERNEL;
967 break;
968
969 default:
970 dprintk(KERN_ERR "tf_open_client_session: "
971 "unknown login_type(%08X)\n",
972 command->open_client_session.login_type);
973 error = -EOPNOTSUPP;
974 goto error;
975 }
976
977 /* Map the temporary memory references */
978 for (i = 0; i < 4; i++) {
979 int param_type;
980 param_type = TF_GET_PARAM_TYPE(
981 command->open_client_session.param_types, i);
982 if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
983 TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
984 == TF_PARAM_TYPE_MEMREF_FLAG) {
985 /* Map temp mem ref */
986 error = tf_map_temp_shmem(connection,
987 &command->open_client_session.
988 params[i].temp_memref,
989 param_type,
990 &shmem_desc[i]);
991 if (error != 0) {
992 dprintk(KERN_ERR "tf_open_client_session: "
993 "unable to map temporary memory block "
994 "(%08X)\n", error);
995 goto error;
996 }
997 }
998 }
999
1000 /* Fill the handle of the Device Context */
1001 command->open_client_session.device_context =
1002 connection->device_context;
1003
1004 error = tf_send_receive(
1005 &connection->dev->sm,
1006 command,
1007 answer,
1008 connection,
1009 true);
1010
1011error:
1012 /* Unmap the temporary memory references */
1013 for (i = 0; i < 4; i++)
1014 if (shmem_desc[i] != NULL)
1015 tf_unmap_shmem(connection, shmem_desc[i], 0);
1016
1017 if (error != 0)
1018 dprintk(KERN_ERR "tf_open_client_session returns %d\n",
1019 error);
1020 else
1021 dprintk(KERN_ERR "tf_open_client_session returns "
1022 "error_code 0x%08X\n",
1023 answer->open_client_session.error_code);
1024
1025 return error;
1026}
1027
1028
1029/*
1030 * Closes a client session from the Secure World
1031 */
1032int tf_close_client_session(
1033 struct tf_connection *connection,
1034 union tf_command *command,
1035 union tf_answer *answer)
1036{
1037 int error = 0;
1038
1039 dprintk(KERN_DEBUG "tf_close_client_session(%p)\n", connection);
1040
1041 command->close_client_session.message_size =
1042 (sizeof(struct tf_command_close_client_session) -
1043 sizeof(struct tf_command_header)) / 4;
1044 command->close_client_session.device_context =
1045 connection->device_context;
1046
1047 error = tf_send_receive(
1048 &connection->dev->sm,
1049 command,
1050 answer,
1051 connection,
1052 true);
1053
1054 if (error != 0)
1055 dprintk(KERN_ERR "tf_close_client_session returns %d\n",
1056 error);
1057 else
1058 dprintk(KERN_ERR "tf_close_client_session returns "
1059 "error 0x%08X\n",
1060 answer->close_client_session.error_code);
1061
1062 return error;
1063}
1064
1065
1066/*
1067 * Registers a shared memory to the Secure World
1068 */
1069int tf_register_shared_memory(
1070 struct tf_connection *connection,
1071 union tf_command *command,
1072 union tf_answer *answer)
1073{
1074 int error = 0;
1075 struct tf_shmem_desc *shmem_desc = NULL;
1076 bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
1077 struct tf_command_register_shared_memory *msg =
1078 &command->register_shared_memory;
1079
1080 dprintk(KERN_INFO "tf_register_shared_memory(%p) "
1081 "%p[0x%08X][0x%08x]\n",
1082 connection,
1083 (void *)msg->shared_mem_descriptors[0],
1084 msg->shared_mem_size,
1085 (u32)msg->memory_flags);
1086
1087 if (in_user_space) {
1088 error = tf_validate_shmem_and_flags(
1089 msg->shared_mem_descriptors[0],
1090 msg->shared_mem_size,
1091 (u32)msg->memory_flags);
1092 if (error != 0)
1093 goto error;
1094 }
1095
1096 /* Initialize message_size with no descriptors */
1097 msg->message_size
1098 = (offsetof(struct tf_command_register_shared_memory,
1099 shared_mem_descriptors) -
1100 sizeof(struct tf_command_header)) / 4;
1101
1102 /* Map the shmem block and update the message */
1103 if (msg->shared_mem_size == 0) {
1104 /* Empty shared mem */
1105 msg->shared_mem_start_offset = msg->shared_mem_descriptors[0];
1106 } else {
1107 u32 descriptor_count;
1108 error = tf_map_shmem(
1109 connection,
1110 msg->shared_mem_descriptors[0],
1111 msg->memory_flags,
1112 in_user_space,
1113 msg->shared_mem_descriptors,
1114 &(msg->shared_mem_start_offset),
1115 msg->shared_mem_size,
1116 &shmem_desc,
1117 &descriptor_count);
1118 if (error != 0) {
1119 dprintk(KERN_ERR "tf_register_shared_memory: "
1120 "unable to map shared memory block\n");
1121 goto error;
1122 }
1123 msg->message_size += descriptor_count;
1124 }
1125
1126 /*
1127 * write the correct device context handle and the address of the shared
1128 * memory descriptor in the message
1129 */
1130 msg->device_context = connection->device_context;
1131 msg->block_id = (u32)shmem_desc;
1132
1133 /* Send the updated message */
1134 error = tf_send_receive(
1135 &connection->dev->sm,
1136 command,
1137 answer,
1138 connection,
1139 true);
1140
1141 if ((error != 0) ||
1142 (answer->register_shared_memory.error_code
1143 != S_SUCCESS)) {
1144 dprintk(KERN_ERR "tf_register_shared_memory: "
1145 "operation failed. Unmap block\n");
1146 goto error;
1147 }
1148
1149 /* Saves the block handle returned by the secure world */
1150 if (shmem_desc != NULL)
1151 shmem_desc->block_identifier =
1152 answer->register_shared_memory.block;
1153
1154 /* successful completion */
1155 dprintk(KERN_INFO "tf_register_shared_memory(%p):"
1156 " block_id=0x%08x block=0x%08x\n",
1157 connection, msg->block_id,
1158 answer->register_shared_memory.block);
1159 return 0;
1160
1161 /* error completion */
1162error:
1163 tf_unmap_shmem(
1164 connection,
1165 shmem_desc,
1166 0);
1167
1168 if (error != 0)
1169 dprintk(KERN_ERR "tf_register_shared_memory returns %d\n",
1170 error);
1171 else
1172 dprintk(KERN_ERR "tf_register_shared_memory returns "
1173 "error_code 0x%08X\n",
1174 answer->register_shared_memory.error_code);
1175
1176 return error;
1177}
1178
1179
1180/*
1181 * Releases a shared memory from the Secure World
1182 */
1183int tf_release_shared_memory(
1184 struct tf_connection *connection,
1185 union tf_command *command,
1186 union tf_answer *answer)
1187{
1188 int error = 0;
1189
1190 dprintk(KERN_DEBUG "tf_release_shared_memory(%p)\n", connection);
1191
1192 command->release_shared_memory.message_size =
1193 (sizeof(struct tf_command_release_shared_memory) -
1194 sizeof(struct tf_command_header)) / 4;
1195 command->release_shared_memory.device_context =
1196 connection->device_context;
1197
1198 error = tf_send_receive(
1199 &connection->dev->sm,
1200 command,
1201 answer,
1202 connection,
1203 true);
1204
1205 if ((error != 0) ||
1206 (answer->release_shared_memory.error_code != S_SUCCESS))
1207 goto error;
1208
1209 /* Use block_id to get back the pointer to shmem_desc */
1210 tf_unmap_shmem(
1211 connection,
1212 (struct tf_shmem_desc *)
1213 answer->release_shared_memory.block_id,
1214 0);
1215
1216 /* successful completion */
1217 dprintk(KERN_INFO "tf_release_shared_memory(%p):"
1218 " block_id=0x%08x block=0x%08x\n",
1219 connection, answer->release_shared_memory.block_id,
1220 command->release_shared_memory.block);
1221 return 0;
1222
1223
1224error:
1225 if (error != 0)
1226 dprintk(KERN_ERR "tf_release_shared_memory returns %d\n",
1227 error);
1228 else
1229 dprintk(KERN_ERR "tf_release_shared_memory returns "
1230 "nChannelStatus 0x%08X\n",
1231 answer->release_shared_memory.error_code);
1232
1233 return error;
1234
1235}
1236
1237
1238/*
1239 * Invokes a client command to the Secure World
1240 */
1241int tf_invoke_client_command(
1242 struct tf_connection *connection,
1243 union tf_command *command,
1244 union tf_answer *answer)
1245{
1246 int error = 0;
1247 struct tf_shmem_desc *shmem_desc[4] = {NULL};
1248 int i;
1249
1250 dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection);
1251
1252 command->release_shared_memory.message_size =
1253 (sizeof(struct tf_command_invoke_client_command) -
1254 sizeof(struct tf_command_header)) / 4;
1255
1256#ifdef CONFIG_TF_ZEBRA
1257 error = tf_crypto_try_shortcuted_update(connection,
1258 (struct tf_command_invoke_client_command *) command,
1259 (struct tf_answer_invoke_client_command *) answer);
1260 if (error == 0)
1261 return error;
1262#endif
1263
1264 /* Map the tmprefs */
1265 for (i = 0; i < 4; i++) {
1266 int param_type = TF_GET_PARAM_TYPE(
1267 command->invoke_client_command.param_types, i);
1268 if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
1269 TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
1270 == TF_PARAM_TYPE_MEMREF_FLAG) {
1271 /* A temporary memref: map it */
1272 error = tf_map_temp_shmem(connection,
1273 &command->invoke_client_command.
1274 params[i].temp_memref,
1275 param_type, &shmem_desc[i]);
1276 if (error != 0) {
1277 dprintk(KERN_ERR
1278 "tf_invoke_client_command: "
1279 "unable to map temporary memory "
1280 "block\n (%08X)", error);
1281 goto error;
1282 }
1283 }
1284 }
1285
1286 command->invoke_client_command.device_context =
1287 connection->device_context;
1288
1289 error = tf_send_receive(&connection->dev->sm, command,
1290 answer, connection, true);
1291
1292error:
1293 /* Unmap de temp mem refs */
1294 for (i = 0; i < 4; i++) {
1295 if (shmem_desc[i] != NULL) {
1296 dprintk(KERN_INFO "tf_invoke_client_command: "
1297 "UnMatemp_memref %d\n ", i);
1298 tf_unmap_shmem(connection, shmem_desc[i], 0);
1299 }
1300 }
1301
1302 if (error != 0)
1303 dprintk(KERN_ERR "tf_invoke_client_command returns %d\n",
1304 error);
1305 else
1306 dprintk(KERN_ERR "tf_invoke_client_command returns "
1307 "error_code 0x%08X\n",
1308 answer->invoke_client_command.error_code);
1309
1310 return error;
1311}
1312
1313
1314/*
1315 * Cancels a client command from the Secure World
1316 */
1317int tf_cancel_client_command(
1318 struct tf_connection *connection,
1319 union tf_command *command,
1320 union tf_answer *answer)
1321{
1322 int error = 0;
1323
1324 dprintk(KERN_DEBUG "tf_cancel_client_command(%p)\n", connection);
1325
1326 command->cancel_client_operation.device_context =
1327 connection->device_context;
1328 command->cancel_client_operation.message_size =
1329 (sizeof(struct tf_command_cancel_client_operation) -
1330 sizeof(struct tf_command_header)) / 4;
1331
1332 error = tf_send_receive(
1333 &connection->dev->sm,
1334 command,
1335 answer,
1336 connection,
1337 true);
1338
1339 if ((error != 0) ||
1340 (answer->cancel_client_operation.error_code != S_SUCCESS))
1341 goto error;
1342
1343
1344 /* successful completion */
1345 return 0;
1346
1347error:
1348 if (error != 0)
1349 dprintk(KERN_ERR "tf_cancel_client_command returns %d\n",
1350 error);
1351 else
1352 dprintk(KERN_ERR "tf_cancel_client_command returns "
1353 "nChannelStatus 0x%08X\n",
1354 answer->cancel_client_operation.error_code);
1355
1356 return error;
1357}
1358
1359
1360
1361/*
1362 * Destroys a device context from the Secure World
1363 */
1364int tf_destroy_device_context(
1365 struct tf_connection *connection)
1366{
1367 int error;
1368 /*
1369 * AFY: better use the specialized tf_command_destroy_device_context
1370 * structure: this will save stack
1371 */
1372 union tf_command command;
1373 union tf_answer answer;
1374
1375 dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", connection);
1376
1377 BUG_ON(connection == NULL);
1378
1379 command.header.message_type = TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
1380 command.header.message_size =
1381 (sizeof(struct tf_command_destroy_device_context) -
1382 sizeof(struct tf_command_header))/sizeof(u32);
1383
1384 /*
1385 * fill in the device context handler
1386 * it is guarantied that the first shared memory descriptor describes
1387 * the device context
1388 */
1389 command.destroy_device_context.device_context =
1390 connection->device_context;
1391
1392 error = tf_send_receive(
1393 &connection->dev->sm,
1394 &command,
1395 &answer,
1396 connection,
1397 false);
1398
1399 if ((error != 0) ||
1400 (answer.destroy_device_context.error_code != S_SUCCESS))
1401 goto error;
1402
1403 spin_lock(&(connection->state_lock));
1404 connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1405 spin_unlock(&(connection->state_lock));
1406
1407 /* successful completion */
1408 dprintk(KERN_INFO "tf_destroy_device_context(%p)\n",
1409 connection);
1410 return 0;
1411
1412error:
1413 if (error != 0) {
1414 dprintk(KERN_ERR "tf_destroy_device_context failed with "
1415 "error %d\n", error);
1416 } else {
1417 dprintk(KERN_ERR "tf_destroy_device_context failed with "
1418 "error_code 0x%08X\n",
1419 answer.destroy_device_context.error_code);
1420 if (answer.destroy_device_context.error_code ==
1421 S_ERROR_OUT_OF_MEMORY)
1422 error = -ENOMEM;
1423 else
1424 error = -EFAULT;
1425 }
1426
1427 return error;
1428}
1429
1430
1431/*----------------------------------------------------------------------------
1432 * Connection initialization and cleanup operations
1433 *----------------------------------------------------------------------------*/
1434
1435/*
1436 * Opens a connection to the specified device.
1437 *
1438 * The placeholder referenced by connection is set to the address of the
1439 * new connection; it is set to NULL upon failure.
1440 *
1441 * Returns zero upon successful completion, or an appropriate error code upon
1442 * failure.
1443 */
1444int tf_open(struct tf_device *dev,
1445 struct file *file,
1446 struct tf_connection **connection)
1447{
1448 int error;
1449 struct tf_connection *conn = NULL;
1450
1451 dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection);
1452
1453 /*
1454 * Allocate and initialize the conn.
1455 * kmalloc only allocates sizeof(*conn) virtual memory
1456 */
1457 conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn),
1458 GFP_KERNEL);
1459 if (conn == NULL) {
1460 printk(KERN_ERR "tf_open(): "
1461 "Out of memory for conn!\n");
1462 error = -ENOMEM;
1463 goto error;
1464 }
1465
1466 memset(conn, 0, sizeof(*conn));
1467
1468 conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1469 conn->dev = dev;
1470 spin_lock_init(&(conn->state_lock));
1471 atomic_set(&(conn->pending_op_count), 0);
1472 INIT_LIST_HEAD(&(conn->list));
1473
1474 /*
1475 * Initialize the shared memory
1476 */
1477 error = tf_init_shared_memory(conn);
1478 if (error != 0)
1479 goto error;
1480
1481#ifdef CONFIG_TF_ZEBRA
1482 /*
1483 * Initialize CUS specifics
1484 */
1485 tf_crypto_init_cus(conn);
1486#endif
1487
1488 /*
1489 * Attach the conn to the device.
1490 */
1491 spin_lock(&(dev->connection_list_lock));
1492 list_add(&(conn->list), &(dev->connection_list));
1493 spin_unlock(&(dev->connection_list_lock));
1494
1495 /*
1496 * Successful completion.
1497 */
1498
1499 *connection = conn;
1500
1501 dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn);
1502 return 0;
1503
1504 /*
1505 * Error handling.
1506 */
1507
1508error:
1509 dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error);
1510 /* Deallocate the descriptor pages if necessary */
1511 internal_kfree(conn);
1512 *connection = NULL;
1513 return error;
1514}
1515
1516
1517/*
1518 * Closes the specified connection.
1519 *
1520 * Upon return, the connection has been destroyed and cannot be used anymore.
1521 *
1522 * This function does nothing if connection is set to NULL.
1523 */
1524void tf_close(struct tf_connection *connection)
1525{
1526 int error;
1527 enum TF_CONN_STATE state;
1528
1529 dprintk(KERN_DEBUG "tf_close(%p)\n", connection);
1530
1531 if (connection == NULL)
1532 return;
1533
1534 /*
1535 * Assumption: Linux guarantees that no other operation is in progress
1536 * and that no other operation will be started when close is called
1537 */
1538 BUG_ON(atomic_read(&(connection->pending_op_count)) != 0);
1539
1540 /*
1541 * Exchange a Destroy Device Context message if needed.
1542 */
1543 spin_lock(&(connection->state_lock));
1544 state = connection->state;
1545 spin_unlock(&(connection->state_lock));
1546 if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) {
1547 /*
1548 * A DestroyDeviceContext operation was not performed. Do it
1549 * now.
1550 */
1551 error = tf_destroy_device_context(connection);
1552 if (error != 0)
1553 /* avoid cleanup if destroy device context fails */
1554 goto error;
1555 }
1556
1557 /*
1558 * Clean up the shared memory
1559 */
1560 tf_cleanup_shared_memories(connection);
1561
1562 spin_lock(&(connection->dev->connection_list_lock));
1563 list_del(&(connection->list));
1564 spin_unlock(&(connection->dev->connection_list_lock));
1565
1566 internal_kfree(connection);
1567
1568 return;
1569
1570error:
1571 dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n",
1572 connection, error);
1573}
1574
diff --git a/security/tf_driver/tf_conn.h b/security/tf_driver/tf_conn.h
new file mode 100644
index 00000000000..8bed16f19d5
--- /dev/null
+++ b/security/tf_driver/tf_conn.h
@@ -0,0 +1,106 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#ifndef __TF_CONN_H__
21#define __TF_CONN_H__
22
23#include "tf_defs.h"
24
25/*
26 * Returns a pointer to the connection referenced by the
27 * specified file.
28 */
29static inline struct tf_connection *tf_conn_from_file(
30 struct file *file)
31{
32 return file->private_data;
33}
34
35int tf_validate_shmem_and_flags(u32 shmem, u32 shmem_size, u32 flags);
36
37int tf_map_shmem(
38 struct tf_connection *connection,
39 u32 buffer,
40 /* flags for read-write access rights on the memory */
41 u32 flags,
42 bool in_user_space,
43 u32 descriptors[TF_MAX_COARSE_PAGES],
44 u32 *buffer_start_offset,
45 u32 buffer_size,
46 struct tf_shmem_desc **shmem_desc,
47 u32 *descriptor_count);
48
49void tf_unmap_shmem(
50 struct tf_connection *connection,
51 struct tf_shmem_desc *shmem_desc,
52 u32 full_cleanup);
53
54/*----------------------------------------------------------------------------
55 * Connection operations to the Secure World
56 *----------------------------------------------------------------------------*/
57
58int tf_create_device_context(
59 struct tf_connection *connection);
60
61int tf_destroy_device_context(
62 struct tf_connection *connection);
63
64int tf_open_client_session(
65 struct tf_connection *connection,
66 union tf_command *command,
67 union tf_answer *answer);
68
69int tf_close_client_session(
70 struct tf_connection *connection,
71 union tf_command *command,
72 union tf_answer *answer);
73
74int tf_register_shared_memory(
75 struct tf_connection *connection,
76 union tf_command *command,
77 union tf_answer *answer);
78
79int tf_release_shared_memory(
80 struct tf_connection *connection,
81 union tf_command *command,
82 union tf_answer *answer);
83
84int tf_invoke_client_command(
85 struct tf_connection *connection,
86 union tf_command *command,
87 union tf_answer *answer);
88
89int tf_cancel_client_command(
90 struct tf_connection *connection,
91 union tf_command *command,
92 union tf_answer *answer);
93
94/*----------------------------------------------------------------------------
95 * Connection initialization and cleanup operations
96 *----------------------------------------------------------------------------*/
97
98int tf_open(struct tf_device *dev,
99 struct file *file,
100 struct tf_connection **connection);
101
102void tf_close(
103 struct tf_connection *connection);
104
105
106#endif /* !defined(__TF_CONN_H__) */
diff --git a/security/tf_driver/tf_defs.h b/security/tf_driver/tf_defs.h
new file mode 100644
index 00000000000..ac209370c55
--- /dev/null
+++ b/security/tf_driver/tf_defs.h
@@ -0,0 +1,538 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#ifndef __TF_DEFS_H__
21#define __TF_DEFS_H__
22
23#include <linux/atomic.h>
24#include <linux/version.h>
25#include <linux/fs.h>
26#include <linux/cdev.h>
27#include <linux/completion.h>
28#include <linux/list.h>
29#include <linux/spinlock.h>
30#include <linux/sysfs.h>
31#include <linux/sched.h>
32#include <linux/semaphore.h>
33#ifdef CONFIG_HAS_WAKELOCK
34#include <linux/wakelock.h>
35#endif
36
37#include "tf_protocol.h"
38
39/*----------------------------------------------------------------------------*/
40
41#define SIZE_1KB 0x400
42
43/*
44 * Maximum number of shared memory blocks that can be reigsters in a connection
45 */
46#define TF_SHMEM_MAX_COUNT (64)
47
48/*
49 * Describes the possible types of shared memories
50 *
51 * TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM :
52 * The descriptor describes a registered shared memory.
53 * Its coarse pages are preallocated when initializing the
54 * connection
55 * TF_SHMEM_TYPE_REGISTERED_SHMEM :
56 * The descriptor describes a registered shared memory.
57 * Its coarse pages are not preallocated
58 * TF_SHMEM_TYPE_PM_HIBERNATE :
59 * The descriptor describes a power management shared memory.
60 */
61enum TF_SHMEM_TYPE {
62 TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM = 0,
63 TF_SHMEM_TYPE_REGISTERED_SHMEM,
64 TF_SHMEM_TYPE_PM_HIBERNATE,
65};
66
67
68/*
69 * This structure contains a pointer on a coarse page table
70 */
71struct tf_coarse_page_table {
72 /*
73 * Identifies the coarse page table descriptor in
74 * free_coarse_page_tables list
75 */
76 struct list_head list;
77
78 /*
79 * The address of the coarse page table
80 */
81 u32 *descriptors;
82
83 /*
84 * The address of the array containing this coarse page table
85 */
86 struct tf_coarse_page_table_array *parent;
87};
88
89
90#define TF_PAGE_DESCRIPTOR_TYPE_NORMAL 0
91#define TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED 1
92
93/*
94 * This structure describes an array of up to 4 coarse page tables
95 * allocated within a single 4KB page.
96 */
97struct tf_coarse_page_table_array {
98 /*
99 * identifies the element in the coarse_page_table_arrays list
100 */
101 struct list_head list;
102
103 /*
104 * Type of page descriptor
105 * can take any of TF_PAGE_DESCRIPTOR_TYPE_XXX value
106 */
107 u32 type;
108
109 struct tf_coarse_page_table coarse_page_tables[4];
110
111 /*
112 * A counter of the number of coarse pages currently used
113 * the max value should be 4 (one coarse page table is 1KB while one
114 * page is 4KB)
115 */
116 u8 ref_count;
117};
118
119
120/*
121 * This structure describes a list of coarse page table arrays
122 * with some of the coarse page tables free. It is used
123 * when the driver needs to allocate a new coarse page
124 * table.
125 */
126struct tf_coarse_page_table_allocation_context {
127 /*
128 * The spin lock protecting concurrent access to the structure.
129 */
130 spinlock_t lock;
131
132 /*
133 * The list of allocated coarse page table arrays
134 */
135 struct list_head coarse_page_table_arrays;
136
137 /*
138 * The list of free coarse page tables
139 */
140 struct list_head free_coarse_page_tables;
141};
142
143
144/*
145 * Fully describes a shared memory block
146 */
147struct tf_shmem_desc {
148 /*
149 * Identifies the shared memory descriptor in the list of free shared
150 * memory descriptors
151 */
152 struct list_head list;
153
154 /*
155 * Identifies the type of shared memory descriptor
156 */
157 enum TF_SHMEM_TYPE type;
158
159 /*
160 * The identifier of the block of shared memory, as returned by the
161 * Secure World.
162 * This identifier is block field of a REGISTER_SHARED_MEMORY answer
163 */
164 u32 block_identifier;
165
166 /* Client buffer */
167 u8 *client_buffer;
168
169 /* Up to eight coarse page table context */
170 struct tf_coarse_page_table *coarse_pg_table[TF_MAX_COARSE_PAGES];
171
172 u32 coarse_pg_table_count;
173
174 /* Reference counter */
175 atomic_t ref_count;
176};
177
178
179/*----------------------------------------------------------------------------*/
180
181/*
182 * This structure describes the communication with the Secure World
183 *
184 * Note that this driver supports only one instance of the Secure World
185 */
186struct tf_comm {
187 /*
188 * The spin lock protecting concurrent access to the structure.
189 */
190 spinlock_t lock;
191
192 /*
193 * Bit vector with the following possible flags:
194 * - TF_COMM_FLAG_IRQ_REQUESTED: If set, indicates that
195 * the IRQ has been successfuly requested.
196 * - TF_COMM_FLAG_TERMINATING: If set, indicates that the
197 * communication with the Secure World is being terminated.
198 * Transmissions to the Secure World are not permitted
199 * - TF_COMM_FLAG_W3B_ALLOCATED: If set, indicates that the
200 * W3B buffer has been allocated.
201 *
202 * This bit vector must be accessed with the kernel's atomic bitwise
203 * operations.
204 */
205 unsigned long flags;
206
207 /*
208 * The virtual address of the L1 shared buffer.
209 */
210 struct tf_l1_shared_buffer *l1_buffer;
211
212 /*
213 * The wait queue the client threads are waiting on.
214 */
215 wait_queue_head_t wait_queue;
216
217#ifdef CONFIG_TF_TRUSTZONE
218 /*
219 * The interrupt line used by the Secure World.
220 */
221 int soft_int_irq;
222
223 /* ----- W3B ----- */
224 /* shared memory descriptor to identify the W3B */
225 struct tf_shmem_desc w3b_shmem_desc;
226
227 /* Virtual address of the kernel allocated shared memory */
228 u32 w3b;
229
230 /* offset of data in shared memory coarse pages */
231 u32 w3b_shmem_offset;
232
233 u32 w3b_shmem_size;
234
235 struct tf_coarse_page_table_allocation_context
236 w3b_cpt_alloc_context;
237#endif
238#ifdef CONFIG_TF_ZEBRA
239 /*
240 * The SE SDP can only be initialized once...
241 */
242 int se_initialized;
243
244 /*
245 * Lock to be held by a client when executing an RPC
246 */
247 struct mutex rpc_mutex;
248
249 /*
250 * Lock to protect concurrent accesses to DMA channels
251 */
252 struct mutex dma_mutex;
253#endif
254};
255
256
257#define TF_COMM_FLAG_IRQ_REQUESTED (0)
258#define TF_COMM_FLAG_PA_AVAILABLE (1)
259#define TF_COMM_FLAG_TERMINATING (2)
260#define TF_COMM_FLAG_W3B_ALLOCATED (3)
261#define TF_COMM_FLAG_L1_SHARED_ALLOCATED (4)
262
263/*----------------------------------------------------------------------------*/
264
265struct tf_device_stats {
266 atomic_t stat_pages_allocated;
267 atomic_t stat_memories_allocated;
268 atomic_t stat_pages_locked;
269};
270
271/*
272 * This structure describes the information about one device handled by the
273 * driver. Note that the driver supports only a single device. see the global
274 * variable g_tf_dev
275
276 */
277struct tf_device {
278 /*
279 * The kernel object for the device
280 */
281 struct kobject kobj;
282
283 /*
284 * The device number for the device.
285 */
286 dev_t dev_number;
287
288 /*
289 * Interfaces the char device with the kernel.
290 */
291 struct cdev cdev;
292
293#ifdef CONFIG_TF_TEEC
294 struct cdev cdev_teec;
295#endif
296
297#ifdef CONFIG_TF_ZEBRA
298 struct cdev cdev_ctrl;
299
300 /*
301 * Globals for CUS
302 */
303 /* Current key handles loaded in HWAs */
304 u32 aes1_key_context;
305 u32 des_key_context;
306 bool sham1_is_public;
307
308 /* Object used to serialize HWA accesses */
309 struct semaphore aes1_sema;
310 struct semaphore des_sema;
311 struct semaphore sha_sema;
312
313 /*
314 * An aligned and correctly shaped pre-allocated buffer used for DMA
315 * transfers
316 */
317 u32 dma_buffer_length;
318 u8 *dma_buffer;
319 dma_addr_t dma_buffer_phys;
320
321 /* Workspace allocated at boot time and reserved to the Secure World */
322 u32 workspace_addr;
323 u32 workspace_size;
324
325 /*
326 * A Mutex to provide exclusive locking of the ioctl()
327 */
328 struct mutex dev_mutex;
329#endif
330
331 /*
332 * Communications with the SM.
333 */
334 struct tf_comm sm;
335
336 /*
337 * Lists the connections attached to this device. A connection is
338 * created each time a user space application "opens" a file descriptor
339 * on the driver
340 */
341 struct list_head connection_list;
342
343 /*
344 * The spin lock used to protect concurrent access to the connection
345 * list.
346 */
347 spinlock_t connection_list_lock;
348
349 struct tf_device_stats stats;
350};
351
352/*----------------------------------------------------------------------------*/
353/*
354 * This type describes a connection state.
355 * This is used to determine whether a message is valid or not.
356 *
357 * Messages are only valid in a certain device state.
358 * Messages may be invalidated between the start of the ioctl call and the
359 * moment the message is sent to the Secure World.
360 *
361 * TF_CONN_STATE_NO_DEVICE_CONTEXT :
362 * The connection has no DEVICE_CONTEXT created and no
363 * CREATE_DEVICE_CONTEXT being processed by the Secure World
364 * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT :
365 * The connection has a CREATE_DEVICE_CONTEXT being processed by the Secure
366 * World
367 * TF_CONN_STATE_VALID_DEVICE_CONTEXT :
368 * The connection has a DEVICE_CONTEXT created and no
369 * DESTROY_DEVICE_CONTEXT is being processed by the Secure World
370 * TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT :
371 * The connection has a DESTROY_DEVICE_CONTEXT being processed by the Secure
372 * World
373 */
374enum TF_CONN_STATE {
375 TF_CONN_STATE_NO_DEVICE_CONTEXT = 0,
376 TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT,
377 TF_CONN_STATE_VALID_DEVICE_CONTEXT,
378 TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT
379};
380
381
382/*
383 * This type describes the status of the command.
384 *
385 * PENDING:
386 * The initial state; the command has not been sent yet.
387 * SENT:
388 * The command has been sent, we are waiting for an answer.
389 * ABORTED:
390 * The command cannot be sent because the device context is invalid.
391 * Note that this only covers the case where some other thread
392 * sent a DESTROY_DEVICE_CONTEXT command.
393 */
394enum TF_COMMAND_STATE {
395 TF_COMMAND_STATE_PENDING = 0,
396 TF_COMMAND_STATE_SENT,
397 TF_COMMAND_STATE_ABORTED
398};
399
400/*
401 * The origin of connection parameters such as login data and
402 * memory reference pointers.
403 *
404 * PROCESS: the calling process. All arguments must be validated.
405 * KERNEL: kernel code. All arguments can be trusted by this driver.
406 */
407enum TF_CONNECTION_OWNER {
408 TF_CONNECTION_OWNER_PROCESS = 0,
409 TF_CONNECTION_OWNER_KERNEL,
410};
411
412
413/*
414 * This structure describes a connection to the driver
415 * A connection is created each time an application opens a file descriptor on
416 * the driver
417 */
418struct tf_connection {
419 /*
420 * Identifies the connection in the list of the connections attached to
421 * the same device.
422 */
423 struct list_head list;
424
425 /*
426 * State of the connection.
427 */
428 enum TF_CONN_STATE state;
429
430 /*
431 * A pointer to the corresponding device structure
432 */
433 struct tf_device *dev;
434
435 /*
436 * A spinlock to use to access state
437 */
438 spinlock_t state_lock;
439
440 /*
441 * Counts the number of operations currently pending on the connection.
442 * (for debug only)
443 */
444 atomic_t pending_op_count;
445
446 /*
447 * A handle for the device context
448 */
449 u32 device_context;
450
451 /*
452 * Lists the used shared memory descriptors
453 */
454 struct list_head used_shmem_list;
455
456 /*
457 * Lists the free shared memory descriptors
458 */
459 struct list_head free_shmem_list;
460
461 /*
462 * A mutex to use to access this structure
463 */
464 struct mutex shmem_mutex;
465
466 /*
467 * Counts the number of shared memories registered.
468 */
469 atomic_t shmem_count;
470
471 /*
472 * Page to retrieve memory properties when
473 * registering shared memory through REGISTER_SHARED_MEMORY
474 * messages
475 */
476 struct vm_area_struct **vmas;
477
478 /*
479 * coarse page table allocation context
480 */
481 struct tf_coarse_page_table_allocation_context cpt_alloc_context;
482
483 /* The origin of connection parameters such as login data and
484 memory reference pointers. */
485 enum TF_CONNECTION_OWNER owner;
486
487#ifdef CONFIG_TF_ZEBRA
488 /* Lists all the Cryptoki Update Shortcuts */
489 struct list_head shortcut_list;
490
491 /* Lock to protect concurrent accesses to shortcut_list */
492 spinlock_t shortcut_list_lock;
493#endif
494};
495
496/*----------------------------------------------------------------------------*/
497
498/*
499 * The operation_id field of a message points to this structure.
500 * It is used to identify the thread that triggered the message transmission
501 * Whoever reads an answer can wake up that thread using the completion event
502 */
503struct tf_answer_struct {
504 bool answer_copied;
505 union tf_answer *answer;
506};
507
508/*----------------------------------------------------------------------------*/
509
510/**
511 * The ASCII-C string representation of the base name of the devices managed by
512 * this driver.
513 */
514#define TF_DEVICE_BASE_NAME "tf_driver"
515
516
517/**
518 * The major and minor numbers of the registered character device driver.
519 * Only 1 instance of the driver is supported.
520 */
521#define TF_DEVICE_MINOR_NUMBER (0)
522
523struct tf_device *tf_get_device(void);
524
525#define CLEAN_CACHE_CFG_MASK (~0xC) /* 1111 0011 */
526
527/*----------------------------------------------------------------------------*/
528/*
529 * Kernel Differences
530 */
531
532#ifdef CONFIG_ANDROID
533#define GROUP_INFO get_current_groups()
534#else
535#define GROUP_INFO (current->group_info)
536#endif
537
538#endif /* !defined(__TF_DEFS_H__) */
diff --git a/security/tf_driver/tf_device.c b/security/tf_driver/tf_device.c
new file mode 100644
index 00000000000..ad44b46c206
--- /dev/null
+++ b/security/tf_driver/tf_device.c
@@ -0,0 +1,796 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#include <linux/atomic.h>
21#include <linux/uaccess.h>
22#include <linux/module.h>
23#include <linux/errno.h>
24#include <linux/mm.h>
25#include <linux/page-flags.h>
26#include <linux/pm.h>
27#include <linux/syscore_ops.h>
28#include <linux/vmalloc.h>
29#include <linux/signal.h>
30#ifdef CONFIG_ANDROID
31#include <linux/device.h>
32#endif
33
34#include "tf_protocol.h"
35#include "tf_defs.h"
36#include "tf_util.h"
37#include "tf_conn.h"
38#include "tf_comm.h"
39#ifdef CONFIG_TF_ZEBRA
40#include <plat/cpu.h>
41#include "tf_zebra.h"
42#endif
43#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
44#include "tf_crypto.h"
45#endif
46
47#include "s_version.h"
48
49/*----------------------------------------------------------------------------
50 * Forward Declarations
51 *----------------------------------------------------------------------------*/
52
53/*
54 * Creates and registers the device to be managed by the specified driver.
55 *
56 * Returns zero upon successful completion, or an appropriate error code upon
57 * failure.
58 */
59static int tf_device_register(void);
60
61
62/*
63 * Implements the device Open callback.
64 */
65static int tf_device_open(
66 struct inode *inode,
67 struct file *file);
68
69
70/*
71 * Implements the device Release callback.
72 */
73static int tf_device_release(
74 struct inode *inode,
75 struct file *file);
76
77
78/*
79 * Implements the device ioctl callback.
80 */
81static long tf_device_ioctl(
82 struct file *file,
83 unsigned int ioctl_num,
84 unsigned long ioctl_param);
85
86
87/*
88 * Implements the device shutdown callback.
89 */
90static int tf_device_shutdown(void);
91
92
93/*
94 * Implements the device suspend callback.
95 */
96static int tf_device_suspend(void);
97
98
99/*
100 * Implements the device resume callback.
101 */
102static int tf_device_resume(void);
103
104
105/*---------------------------------------------------------------------------
106 * Module Parameters
107 *---------------------------------------------------------------------------*/
108
109/*
110 * The device major number used to register a unique character device driver.
111 * Let the default value be 122
112 */
113static int device_major_number = 122;
114
115module_param(device_major_number, int, 0000);
116MODULE_PARM_DESC(device_major_number,
117 "The device major number used to register a unique character "
118 "device driver");
119
120#ifdef CONFIG_TF_TRUSTZONE
121/**
122 * The softint interrupt line used by the Secure World.
123 */
124static int soft_interrupt = -1;
125
126module_param(soft_interrupt, int, 0000);
127MODULE_PARM_DESC(soft_interrupt,
128 "The softint interrupt line used by the Secure world");
129#endif
130
131#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
132unsigned tf_debug_level = UINT_MAX;
133module_param_named(debug, tf_debug_level, uint, 0644);
134#endif
135
136#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
137char *tf_integrity_hmac_sha256_expected_value;
138module_param_named(hmac_sha256, tf_integrity_hmac_sha256_expected_value,
139 charp, 0444);
140
141#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
142unsigned tf_fault_injection_mask;
143module_param_named(fault, tf_fault_injection_mask, uint, 0644);
144#endif
145
146int tf_self_test_blkcipher_align;
147module_param_named(post_align, tf_self_test_blkcipher_align, int, 0644);
148int tf_self_test_blkcipher_use_vmalloc;
149module_param_named(post_vmalloc, tf_self_test_blkcipher_use_vmalloc, int, 0644);
150#endif
151
152#ifdef CONFIG_ANDROID
153static struct class *tf_class;
154#endif
155
156/*----------------------------------------------------------------------------
157 * Global Variables
158 *----------------------------------------------------------------------------*/
159
160/*
161 * tf_driver character device definitions.
162 * read and write methods are not defined
163 * and will return an error if used by user space
164 */
165static const struct file_operations g_tf_device_file_ops = {
166 .owner = THIS_MODULE,
167 .open = tf_device_open,
168 .release = tf_device_release,
169 .unlocked_ioctl = tf_device_ioctl,
170 .llseek = no_llseek,
171};
172
173
174static struct syscore_ops g_tf_device_syscore_ops = {
175 .shutdown = tf_device_shutdown,
176 .suspend = tf_device_suspend,
177 .resume = tf_device_resume,
178};
179
180/* The single device supported by this driver */
181static struct tf_device g_tf_dev;
182
183/*----------------------------------------------------------------------------
184 * Implementations
185 *----------------------------------------------------------------------------*/
186
187struct tf_device *tf_get_device(void)
188{
189 return &g_tf_dev;
190}
191
192/*
193 * sysfs entries
194 */
195struct tf_sysfs_entry {
196 struct attribute attr;
197 ssize_t (*show)(struct tf_device *, char *);
198 ssize_t (*store)(struct tf_device *, const char *, size_t);
199};
200
201/*
202 * sysfs entry showing allocation stats
203 */
204static ssize_t info_show(struct tf_device *dev, char *buf)
205{
206 struct tf_device_stats *dev_stats = &dev->stats;
207
208 return snprintf(buf, PAGE_SIZE,
209 "stat.memories.allocated: %d\n"
210 "stat.pages.allocated: %d\n"
211 "stat.pages.locked: %d\n",
212 atomic_read(&dev_stats->stat_memories_allocated),
213 atomic_read(&dev_stats->stat_pages_allocated),
214 atomic_read(&dev_stats->stat_pages_locked));
215}
216static struct tf_sysfs_entry tf_info_entry = __ATTR_RO(info);
217
218#ifdef CONFIG_TF_ZEBRA
219/*
220 * sysfs entry showing whether secure world is up and running
221 */
222static ssize_t tf_started_show(struct tf_device *dev, char *buf)
223{
224 int tf_started = test_bit(TF_COMM_FLAG_PA_AVAILABLE,
225 &dev->sm.flags);
226
227 return snprintf(buf, PAGE_SIZE, "%s\n", tf_started ? "yes" : "no");
228}
229static struct tf_sysfs_entry tf_started_entry =
230 __ATTR_RO(tf_started);
231
232static ssize_t workspace_addr_show(struct tf_device *dev, char *buf)
233{
234 return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_addr);
235}
236static struct tf_sysfs_entry tf_workspace_addr_entry =
237 __ATTR_RO(workspace_addr);
238
239static ssize_t workspace_size_show(struct tf_device *dev, char *buf)
240{
241 return snprintf(buf, PAGE_SIZE, "0x%08x\n", dev->workspace_size);
242}
243static struct tf_sysfs_entry tf_workspace_size_entry =
244 __ATTR_RO(workspace_size);
245#endif
246
247static ssize_t tf_attr_show(struct kobject *kobj, struct attribute *attr,
248 char *page)
249{
250 struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
251 attr);
252 struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
253
254 if (!entry->show)
255 return -EIO;
256
257 return entry->show(dev, page);
258}
259
260static ssize_t tf_attr_store(struct kobject *kobj, struct attribute *attr,
261 const char *page, size_t length)
262{
263 struct tf_sysfs_entry *entry = container_of(attr, struct tf_sysfs_entry,
264 attr);
265 struct tf_device *dev = container_of(kobj, struct tf_device, kobj);
266
267 if (!entry->store)
268 return -EIO;
269
270 return entry->store(dev, page, length);
271}
272
273static void tf_kobj_release(struct kobject *kobj) {}
274
275static struct attribute *tf_default_attrs[] = {
276 &tf_info_entry.attr,
277#ifdef CONFIG_TF_ZEBRA
278 &tf_started_entry.attr,
279 &tf_workspace_addr_entry.attr,
280 &tf_workspace_size_entry.attr,
281#endif
282 NULL,
283};
284static const struct sysfs_ops tf_sysfs_ops = {
285 .show = tf_attr_show,
286 .store = tf_attr_store,
287};
288static struct kobj_type tf_ktype = {
289 .release = tf_kobj_release,
290 .sysfs_ops = &tf_sysfs_ops,
291 .default_attrs = tf_default_attrs
292};
293
294/*----------------------------------------------------------------------------*/
295
296#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
297static char *smc_mem;
298module_param(smc_mem, charp, S_IRUGO);
299#endif
300
301/*
302 * First routine called when the kernel module is loaded
303 */
304static int __init tf_device_register(void)
305{
306 int error;
307 struct tf_device *dev = &g_tf_dev;
308
309 dprintk(KERN_INFO "tf_device_register()\n");
310
311 /*
312 * Initialize the device
313 */
314 dev->dev_number = MKDEV(device_major_number,
315 TF_DEVICE_MINOR_NUMBER);
316 cdev_init(&dev->cdev, &g_tf_device_file_ops);
317 dev->cdev.owner = THIS_MODULE;
318
319 INIT_LIST_HEAD(&dev->connection_list);
320 spin_lock_init(&dev->connection_list_lock);
321
322#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
323 error = (*tf_comm_early_init)();
324 if (error)
325 goto module_early_init_failed;
326
327 error = tf_device_mshield_init(smc_mem);
328 if (error)
329 goto mshield_init_failed;
330
331#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
332 error = tf_crypto_hmac_module_init();
333 if (error)
334 goto hmac_init_failed;
335
336 error = tf_self_test_register_device();
337 if (error)
338 goto self_test_register_device_failed;
339#endif
340#endif
341
342 /* register the sysfs object driver stats */
343 error = kobject_init_and_add(&dev->kobj, &tf_ktype, NULL, "%s",
344 TF_DEVICE_BASE_NAME);
345 if (error) {
346 printk(KERN_ERR "tf_device_register(): "
347 "kobject_init_and_add failed (error %d)!\n", error);
348 kobject_put(&dev->kobj);
349 goto kobject_init_and_add_failed;
350 }
351
352 /*
353 * Register the system device.
354 */
355 register_syscore_ops(&g_tf_device_syscore_ops);
356
357 /*
358 * Register the char device.
359 */
360 printk(KERN_INFO "Registering char device %s (%u:%u)\n",
361 TF_DEVICE_BASE_NAME,
362 MAJOR(dev->dev_number),
363 MINOR(dev->dev_number));
364 error = register_chrdev_region(dev->dev_number, 1,
365 TF_DEVICE_BASE_NAME);
366 if (error != 0) {
367 printk(KERN_ERR "tf_device_register():"
368 " register_chrdev_region failed (error %d)!\n",
369 error);
370 goto register_chrdev_region_failed;
371 }
372
373 error = cdev_add(&dev->cdev, dev->dev_number, 1);
374 if (error != 0) {
375 printk(KERN_ERR "tf_device_register(): "
376 "cdev_add failed (error %d)!\n",
377 error);
378 goto cdev_add_failed;
379 }
380
381 /*
382 * Initialize the communication with the Secure World.
383 */
384#ifdef CONFIG_TF_TRUSTZONE
385 dev->sm.soft_int_irq = soft_interrupt;
386#endif
387 error = tf_init(&g_tf_dev.sm);
388 if (error != S_SUCCESS) {
389 dprintk(KERN_ERR "tf_device_register(): "
390 "tf_init failed (error %d)!\n",
391 error);
392 goto init_failed;
393 }
394
395#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
396 error = tf_self_test_post_init(&(dev_stats->kobj));
397 /* N.B. error > 0 indicates a POST failure, which will not
398 prevent the module from loading. */
399 if (error < 0) {
400 dprintk(KERN_ERR "tf_device_register(): "
401 "tf_self_test_post_vectors failed (error %d)!\n",
402 error);
403 goto post_failed;
404 }
405#endif
406
407#ifdef CONFIG_ANDROID
408 tf_class = class_create(THIS_MODULE, TF_DEVICE_BASE_NAME);
409 device_create(tf_class, NULL,
410 dev->dev_number,
411 NULL, TF_DEVICE_BASE_NAME);
412#endif
413
414#ifdef CONFIG_TF_ZEBRA
415 /*
416 * Initializes the /dev/tf_ctrl device node.
417 */
418 error = tf_ctrl_device_register();
419 if (error)
420 goto ctrl_failed;
421#endif
422
423#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
424 address_cache_property((unsigned long) &tf_device_register);
425#endif
426 /*
427 * Successful completion.
428 */
429
430 dprintk(KERN_INFO "tf_device_register(): Success\n");
431 return 0;
432
433 /*
434 * Error: undo all operations in the reverse order
435 */
436#ifdef CONFIG_TF_ZEBRA
437ctrl_failed:
438#endif
439#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
440 tf_self_test_post_exit();
441post_failed:
442#endif
443init_failed:
444 cdev_del(&dev->cdev);
445cdev_add_failed:
446 unregister_chrdev_region(dev->dev_number, 1);
447register_chrdev_region_failed:
448 unregister_syscore_ops(&g_tf_device_syscore_ops);
449kobject_init_and_add_failed:
450 kobject_del(&g_tf_dev.kobj);
451
452#if defined(MODULE) && defined(CONFIG_TF_ZEBRA)
453#ifdef CONFIG_TF_DRIVER_CRYPTO_FIPS
454 tf_self_test_unregister_device();
455self_test_register_device_failed:
456 tf_crypto_hmac_module_exit();
457hmac_init_failed:
458#endif
459 tf_device_mshield_exit();
460mshield_init_failed:
461module_early_init_failed:
462#endif
463 dprintk(KERN_INFO "tf_device_register(): Failure (error %d)\n",
464 error);
465 return error;
466}
467
468/*----------------------------------------------------------------------------*/
469
470static int tf_device_open(struct inode *inode, struct file *file)
471{
472 int error;
473 struct tf_device *dev = &g_tf_dev;
474 struct tf_connection *connection = NULL;
475
476 dprintk(KERN_INFO "tf_device_open(%u:%u, %p)\n",
477 imajor(inode), iminor(inode), file);
478
479 /* Dummy lseek for non-seekable driver */
480 error = nonseekable_open(inode, file);
481 if (error != 0) {
482 dprintk(KERN_ERR "tf_device_open(%p): "
483 "nonseekable_open failed (error %d)!\n",
484 file, error);
485 goto error;
486 }
487
488#ifndef CONFIG_ANDROID
489 /*
490 * Check file flags. We only autthorize the O_RDWR access
491 */
492 if (file->f_flags != O_RDWR) {
493 dprintk(KERN_ERR "tf_device_open(%p): "
494 "Invalid access mode %u\n",
495 file, file->f_flags);
496 error = -EACCES;
497 goto error;
498 }
499#endif
500
501 /*
502 * Open a new connection.
503 */
504
505 error = tf_open(dev, file, &connection);
506 if (error != 0) {
507 dprintk(KERN_ERR "tf_device_open(%p): "
508 "tf_open failed (error %d)!\n",
509 file, error);
510 goto error;
511 }
512
513 file->private_data = connection;
514
515 /*
516 * Send the CreateDeviceContext command to the secure
517 */
518 error = tf_create_device_context(connection);
519 if (error != 0) {
520 dprintk(KERN_ERR "tf_device_open(%p): "
521 "tf_create_device_context failed (error %d)!\n",
522 file, error);
523 goto error1;
524 }
525
526 /*
527 * Successful completion.
528 */
529
530 dprintk(KERN_INFO "tf_device_open(%p): Success (connection=%p)\n",
531 file, connection);
532 return 0;
533
534 /*
535 * Error handling.
536 */
537
538error1:
539 tf_close(connection);
540error:
541 dprintk(KERN_INFO "tf_device_open(%p): Failure (error %d)\n",
542 file, error);
543 return error;
544}
545
546/*----------------------------------------------------------------------------*/
547
548static int tf_device_release(struct inode *inode, struct file *file)
549{
550 struct tf_connection *connection;
551
552 dprintk(KERN_INFO "tf_device_release(%u:%u, %p)\n",
553 imajor(inode), iminor(inode), file);
554
555 connection = tf_conn_from_file(file);
556 tf_close(connection);
557
558 dprintk(KERN_INFO "tf_device_release(%p): Success\n", file);
559 return 0;
560}
561
562/*----------------------------------------------------------------------------*/
563
564static long tf_device_ioctl(struct file *file, unsigned int ioctl_num,
565 unsigned long ioctl_param)
566{
567 int result = S_SUCCESS;
568 struct tf_connection *connection;
569 union tf_command command;
570 struct tf_command_header header;
571 union tf_answer answer;
572 u32 command_size;
573 u32 answer_size;
574 void *user_answer;
575
576 dprintk(KERN_INFO "tf_device_ioctl(%p, %u, %p)\n",
577 file, ioctl_num, (void *) ioctl_param);
578
579 switch (ioctl_num) {
580 case IOCTL_TF_GET_VERSION:
581 /* ioctl is asking for the driver interface version */
582 result = TF_DRIVER_INTERFACE_VERSION;
583 goto exit;
584
585 case IOCTL_TF_EXCHANGE:
586 /*
587 * ioctl is asking to perform a message exchange with the Secure
588 * Module
589 */
590
591 /*
592 * Make a local copy of the data from the user application
593 * This routine checks the data is readable
594 *
595 * Get the header first.
596 */
597 if (copy_from_user(&header,
598 (struct tf_command_header *)ioctl_param,
599 sizeof(struct tf_command_header))) {
600 dprintk(KERN_ERR "tf_device_ioctl(%p): "
601 "Cannot access ioctl parameter %p\n",
602 file, (void *) ioctl_param);
603 result = -EFAULT;
604 goto exit;
605 }
606
607 /* size in words of u32 */
608 command_size = header.message_size +
609 sizeof(struct tf_command_header)/sizeof(u32);
610 if (command_size > sizeof(command)/sizeof(u32)) {
611 dprintk(KERN_ERR "tf_device_ioctl(%p): "
612 "Buffer overflow: too many bytes to copy %d\n",
613 file, command_size);
614 result = -EFAULT;
615 goto exit;
616 }
617
618 if (copy_from_user(&command,
619 (union tf_command *)ioctl_param,
620 command_size * sizeof(u32))) {
621 dprintk(KERN_ERR "tf_device_ioctl(%p): "
622 "Cannot access ioctl parameter %p\n",
623 file, (void *) ioctl_param);
624 result = -EFAULT;
625 goto exit;
626 }
627
628 connection = tf_conn_from_file(file);
629 BUG_ON(connection == NULL);
630
631 /*
632 * The answer memory space address is in the operation_id field
633 */
634 user_answer = (void *) command.header.operation_id;
635
636 atomic_inc(&(connection->pending_op_count));
637
638 dprintk(KERN_WARNING "tf_device_ioctl(%p): "
639 "Sending message type 0x%08x\n",
640 file, command.header.message_type);
641
642 switch (command.header.message_type) {
643 case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
644 result = tf_open_client_session(connection,
645 &command, &answer);
646 break;
647
648 case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
649 result = tf_close_client_session(connection,
650 &command, &answer);
651 break;
652
653 case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
654 result = tf_register_shared_memory(connection,
655 &command, &answer);
656 break;
657
658 case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
659 result = tf_release_shared_memory(connection,
660 &command, &answer);
661 break;
662
663 case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
664 result = tf_invoke_client_command(connection,
665 &command, &answer);
666 break;
667
668 case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
669 result = tf_cancel_client_command(connection,
670 &command, &answer);
671 break;
672
673 default:
674 dprintk(KERN_ERR "tf_device_ioctl(%p): "
675 "Incorrect message type (0x%08x)!\n",
676 connection, command.header.message_type);
677 result = -EOPNOTSUPP;
678 break;
679 }
680
681 atomic_dec(&(connection->pending_op_count));
682
683 if (result != 0) {
684 dprintk(KERN_WARNING "tf_device_ioctl(%p): "
685 "Operation returning error code 0x%08x)!\n",
686 file, result);
687 goto exit;
688 }
689
690 /*
691 * Copy the answer back to the user space application.
692 * The driver does not check this field, only copy back to user
693 * space the data handed over by Secure World
694 */
695 answer_size = answer.header.message_size +
696 sizeof(struct tf_answer_header)/sizeof(u32);
697 if (copy_to_user(user_answer,
698 &answer, answer_size * sizeof(u32))) {
699 dprintk(KERN_WARNING "tf_device_ioctl(%p): "
700 "Failed to copy back the full command "
701 "answer to %p\n", file, user_answer);
702 result = -EFAULT;
703 goto exit;
704 }
705
706 /* successful completion */
707 dprintk(KERN_INFO "tf_device_ioctl(%p): Success\n", file);
708 break;
709
710 case IOCTL_TF_GET_DESCRIPTION: {
711 /* ioctl asking for the version information buffer */
712 struct tf_version_information_buffer *pInfoBuffer;
713
714 dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION:(%p, %u, %p)\n",
715 file, ioctl_num, (void *) ioctl_param);
716
717 pInfoBuffer =
718 ((struct tf_version_information_buffer *) ioctl_param);
719
720 dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION1: "
721 "driver_description=\"%64s\"\n", S_VERSION_STRING);
722
723 if (copy_to_user(pInfoBuffer->driver_description,
724 S_VERSION_STRING,
725 strlen(S_VERSION_STRING) + 1)) {
726 dprintk(KERN_ERR "tf_device_ioctl(%p): "
727 "Fail to copy back the driver description "
728 "to %p\n",
729 file, pInfoBuffer->driver_description);
730 result = -EFAULT;
731 goto exit;
732 }
733
734 dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION2: "
735 "secure_world_description=\"%64s\"\n",
736 tf_get_description(&g_tf_dev.sm));
737
738 if (copy_to_user(pInfoBuffer->secure_world_description,
739 tf_get_description(&g_tf_dev.sm),
740 TF_DESCRIPTION_BUFFER_LENGTH)) {
741 dprintk(KERN_WARNING "tf_device_ioctl(%p): "
742 "Failed to copy back the secure world "
743 "description to %p\n",
744 file, pInfoBuffer->secure_world_description);
745 result = -EFAULT;
746 goto exit;
747 }
748 break;
749 }
750
751 default:
752 dprintk(KERN_ERR "tf_device_ioctl(%p): "
753 "Unknown IOCTL code 0x%08x!\n",
754 file, ioctl_num);
755 result = -EOPNOTSUPP;
756 goto exit;
757 }
758
759exit:
760 return result;
761}
762
763/*----------------------------------------------------------------------------*/
764
765static int tf_device_shutdown(void)
766{
767
768 return tf_power_management(&g_tf_dev.sm,
769 TF_POWER_OPERATION_SHUTDOWN);
770}
771
772/*----------------------------------------------------------------------------*/
773
774static int tf_device_suspend(void)
775{
776 dprintk(KERN_INFO "tf_device_suspend: Enter\n");
777 return tf_power_management(&g_tf_dev.sm,
778 TF_POWER_OPERATION_HIBERNATE);
779}
780
781
782/*----------------------------------------------------------------------------*/
783
784static int tf_device_resume(void)
785{
786 return tf_power_management(&g_tf_dev.sm,
787 TF_POWER_OPERATION_RESUME);
788}
789
790
791/*----------------------------------------------------------------------------*/
792
793module_init(tf_device_register);
794
795MODULE_LICENSE("GPL");
796MODULE_AUTHOR("Trusted Logic S.A.");
diff --git a/security/tf_driver/tf_protocol.h b/security/tf_driver/tf_protocol.h
new file mode 100644
index 00000000000..403df8ec8ef
--- /dev/null
+++ b/security/tf_driver/tf_protocol.h
@@ -0,0 +1,690 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#ifndef __TF_PROTOCOL_H__
21#define __TF_PROTOCOL_H__
22
23/*----------------------------------------------------------------------------
24 *
25 * This header file defines the structure used in the SChannel Protocol.
26 * See your Product Reference Manual for a specification of the SChannel
27 * protocol.
28 *---------------------------------------------------------------------------*/
29
30/*
31 * The driver interface version returned by the version ioctl
32 */
33#define TF_DRIVER_INTERFACE_VERSION 0x04000000
34
35/*
36 * Protocol version handling
37 */
38#define TF_S_PROTOCOL_MAJOR_VERSION (0x06)
39#define GET_PROTOCOL_MAJOR_VERSION(a) (a >> 24)
40#define GET_PROTOCOL_MINOR_VERSION(a) ((a >> 16) & 0xFF)
41
42/*
43 * The S flag of the config_flag_s register.
44 */
45#define TF_CONFIG_FLAG_S (1 << 3)
46
47/*
48 * The TimeSlot field of the sync_serial_n register.
49 */
50#define TF_SYNC_SERIAL_TIMESLOT_N (1)
51
52/*
53 * status_s related defines.
54 */
55#define TF_STATUS_P_MASK (0X00000001)
56#define TF_STATUS_POWER_STATE_SHIFT (3)
57#define TF_STATUS_POWER_STATE_MASK (0x1F << TF_STATUS_POWER_STATE_SHIFT)
58
59/*
60 * Possible power states of the POWER_STATE field of the status_s register
61 */
62#define TF_POWER_MODE_COLD_BOOT (0)
63#define TF_POWER_MODE_WARM_BOOT (1)
64#define TF_POWER_MODE_ACTIVE (3)
65#define TF_POWER_MODE_READY_TO_SHUTDOWN (5)
66#define TF_POWER_MODE_READY_TO_HIBERNATE (7)
67#define TF_POWER_MODE_WAKEUP (8)
68#define TF_POWER_MODE_PANIC (15)
69
70/*
71 * Possible command values for MANAGEMENT commands
72 */
73#define TF_MANAGEMENT_HIBERNATE (1)
74#define TF_MANAGEMENT_SHUTDOWN (2)
75#define TF_MANAGEMENT_PREPARE_FOR_CORE_OFF (3)
76#define TF_MANAGEMENT_RESUME_FROM_CORE_OFF (4)
77
78/*
79 * The capacity of the Normal Word message queue, in number of slots.
80 */
81#define TF_N_MESSAGE_QUEUE_CAPACITY (512)
82
83/*
84 * The capacity of the Secure World message answer queue, in number of slots.
85 */
86#define TF_S_ANSWER_QUEUE_CAPACITY (256)
87
88/*
89 * The value of the S-timeout register indicating an infinite timeout.
90 */
91#define TF_S_TIMEOUT_0_INFINITE (0xFFFFFFFF)
92#define TF_S_TIMEOUT_1_INFINITE (0xFFFFFFFF)
93
94/*
95 * The value of the S-timeout register indicating an immediate timeout.
96 */
97#define TF_S_TIMEOUT_0_IMMEDIATE (0x0)
98#define TF_S_TIMEOUT_1_IMMEDIATE (0x0)
99
100/*
101 * Identifies the get protocol version SMC.
102 */
103#define TF_SMC_GET_PROTOCOL_VERSION (0XFFFFFFFB)
104
105/*
106 * Identifies the init SMC.
107 */
108#define TF_SMC_INIT (0XFFFFFFFF)
109
110/*
111 * Identifies the reset irq SMC.
112 */
113#define TF_SMC_RESET_IRQ (0xFFFFFFFE)
114
115/*
116 * Identifies the SET_W3B SMC.
117 */
118#define TF_SMC_WAKE_UP (0xFFFFFFFD)
119
120/*
121 * Identifies the STOP SMC.
122 */
123#define TF_SMC_STOP (0xFFFFFFFC)
124
125/*
126 * Identifies the n-yield SMC.
127 */
128#define TF_SMC_N_YIELD (0X00000003)
129
130
131/* Possible stop commands for SMC_STOP */
132#define SCSTOP_HIBERNATE (0xFFFFFFE1)
133#define SCSTOP_SHUTDOWN (0xFFFFFFE2)
134
135/*
136 * representation of an UUID.
137 */
138struct tf_uuid {
139 u32 time_low;
140 u16 time_mid;
141 u16 time_hi_and_version;
142 u8 clock_seq_and_node[8];
143};
144
145
146/**
147 * Command parameters.
148 */
149struct tf_command_param_value {
150 u32 a;
151 u32 b;
152};
153
154struct tf_command_param_temp_memref {
155 u32 descriptor; /* data pointer for exchange message.*/
156 u32 size;
157 u32 offset;
158};
159
160struct tf_command_param_memref {
161 u32 block;
162 u32 size;
163 u32 offset;
164};
165
166union tf_command_param {
167 struct tf_command_param_value value;
168 struct tf_command_param_temp_memref temp_memref;
169 struct tf_command_param_memref memref;
170};
171
172/**
173 * Answer parameters.
174 */
175struct tf_answer_param_value {
176 u32 a;
177 u32 b;
178};
179
180struct tf_answer_param_size {
181 u32 _ignored;
182 u32 size;
183};
184
185union tf_answer_param {
186 struct tf_answer_param_size size;
187 struct tf_answer_param_value value;
188};
189
190/*
191 * Descriptor tables capacity
192 */
193#define TF_MAX_W3B_COARSE_PAGES (2)
194/* TF_MAX_COARSE_PAGES is the number of level 1 descriptors (describing
195 * 1MB each) that can be shared with the secure world in a single registered
196 * shared memory block. It must be kept in synch with
197 * SCHANNEL6_MAX_DESCRIPTORS_PER_REGISTERED_SHARED_MEM in the SChannel
198 * protocol spec. */
199#define TF_MAX_COARSE_PAGES 128
200#define TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT (8)
201#define TF_DESCRIPTOR_TABLE_CAPACITY \
202 (1 << TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT)
203#define TF_DESCRIPTOR_TABLE_CAPACITY_MASK \
204 (TF_DESCRIPTOR_TABLE_CAPACITY - 1)
205/* Shared memories coarse pages can map up to 1MB */
206#define TF_MAX_COARSE_PAGE_MAPPED_SIZE \
207 (PAGE_SIZE * TF_DESCRIPTOR_TABLE_CAPACITY)
208/* Shared memories cannot exceed 8MB */
209#define TF_MAX_SHMEM_SIZE \
210 (TF_MAX_COARSE_PAGE_MAPPED_SIZE << 3)
211
212/*
213 * Buffer size for version description fields
214 */
215#define TF_DESCRIPTION_BUFFER_LENGTH 64
216
217/*
218 * Shared memory type flags.
219 */
220#define TF_SHMEM_TYPE_READ (0x00000001)
221#define TF_SHMEM_TYPE_WRITE (0x00000002)
222
223/*
224 * Shared mem flags
225 */
226#define TF_SHARED_MEM_FLAG_INPUT 1
227#define TF_SHARED_MEM_FLAG_OUTPUT 2
228#define TF_SHARED_MEM_FLAG_INOUT 3
229
230
231/*
232 * Parameter types
233 */
234#define TF_PARAM_TYPE_NONE 0x0
235#define TF_PARAM_TYPE_VALUE_INPUT 0x1
236#define TF_PARAM_TYPE_VALUE_OUTPUT 0x2
237#define TF_PARAM_TYPE_VALUE_INOUT 0x3
238#define TF_PARAM_TYPE_MEMREF_TEMP_INPUT 0x5
239#define TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT 0x6
240#define TF_PARAM_TYPE_MEMREF_TEMP_INOUT 0x7
241#define TF_PARAM_TYPE_MEMREF_INPUT 0xD
242#define TF_PARAM_TYPE_MEMREF_OUTPUT 0xE
243#define TF_PARAM_TYPE_MEMREF_INOUT 0xF
244
245#define TF_PARAM_TYPE_MEMREF_FLAG 0x4
246#define TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG 0x8
247
248
249#define TF_MAKE_PARAM_TYPES(t0, t1, t2, t3) \
250 ((t0) | ((t1) << 4) | ((t2) << 8) | ((t3) << 12))
251#define TF_GET_PARAM_TYPE(t, i) (((t) >> (4 * i)) & 0xF)
252
253/*
254 * Login types.
255 */
256#define TF_LOGIN_PUBLIC 0x00000000
257#define TF_LOGIN_USER 0x00000001
258#define TF_LOGIN_GROUP 0x00000002
259#define TF_LOGIN_APPLICATION 0x00000004
260#define TF_LOGIN_APPLICATION_USER 0x00000005
261#define TF_LOGIN_APPLICATION_GROUP 0x00000006
262#define TF_LOGIN_AUTHENTICATION 0x80000000
263#define TF_LOGIN_PRIVILEGED 0x80000002
264
265/* Login variants */
266
267#define TF_LOGIN_VARIANT(main_type, os, variant) \
268 ((main_type) | (1 << 27) | ((os) << 16) | ((variant) << 8))
269
270#define TF_LOGIN_GET_MAIN_TYPE(type) \
271 ((type) & ~TF_LOGIN_VARIANT(0, 0xFF, 0xFF))
272
273#define TF_LOGIN_OS_ANY 0x00
274#define TF_LOGIN_OS_LINUX 0x01
275#define TF_LOGIN_OS_ANDROID 0x04
276
277/* OS-independent variants */
278#define TF_LOGIN_USER_NONE \
279 TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANY, 0xFF)
280#define TF_LOGIN_GROUP_NONE \
281 TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANY, 0xFF)
282#define TF_LOGIN_APPLICATION_USER_NONE \
283 TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANY, 0xFF)
284#define TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH \
285 TF_LOGIN_VARIANT(TF_LOGIN_AUTHENTICATION, TF_LOGIN_OS_ANY, 0x01)
286#define TF_LOGIN_PRIVILEGED_KERNEL \
287 TF_LOGIN_VARIANT(TF_LOGIN_PRIVILEGED, TF_LOGIN_OS_ANY, 0x01)
288
289/* Linux variants */
290#define TF_LOGIN_USER_LINUX_EUID \
291 TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_LINUX, 0x01)
292#define TF_LOGIN_GROUP_LINUX_GID \
293 TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_LINUX, 0x01)
294#define TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH \
295 TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_LINUX, 0x01)
296#define TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH \
297 TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_LINUX, 0x01)
298#define TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH \
299 TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_LINUX, 0x01)
300
301/* Android variants */
302#define TF_LOGIN_USER_ANDROID_EUID \
303 TF_LOGIN_VARIANT(TF_LOGIN_USER, TF_LOGIN_OS_ANDROID, 0x01)
304#define TF_LOGIN_GROUP_ANDROID_GID \
305 TF_LOGIN_VARIANT(TF_LOGIN_GROUP, TF_LOGIN_OS_ANDROID, 0x01)
306#define TF_LOGIN_APPLICATION_ANDROID_UID \
307 TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION, TF_LOGIN_OS_ANDROID, 0x01)
308#define TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID \
309 TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_USER, TF_LOGIN_OS_ANDROID, \
310 0x01)
311#define TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID \
312 TF_LOGIN_VARIANT(TF_LOGIN_APPLICATION_GROUP, TF_LOGIN_OS_ANDROID, \
313 0x01)
314
315/*
316 * return origins
317 */
318#define TF_ORIGIN_COMMS 2
319#define TF_ORIGIN_TEE 3
320#define TF_ORIGIN_TRUSTED_APP 4
321/*
322 * The message types.
323 */
324#define TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT 0x02
325#define TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT 0xFD
326#define TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY 0xF7
327#define TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY 0xF9
328#define TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION 0xF0
329#define TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION 0xF2
330#define TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND 0xF5
331#define TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND 0xF4
332#define TF_MESSAGE_TYPE_MANAGEMENT 0xFE
333
334
335/*
336 * The SChannel error codes.
337 */
338#define S_SUCCESS 0x00000000
339#define S_ERROR_OUT_OF_MEMORY 0xFFFF000C
340
341
342struct tf_command_header {
343 u8 message_size;
344 u8 message_type;
345 u16 message_info;
346 u32 operation_id;
347};
348
349struct tf_answer_header {
350 u8 message_size;
351 u8 message_type;
352 u16 message_info;
353 u32 operation_id;
354 u32 error_code;
355};
356
357/*
358 * CREATE_DEVICE_CONTEXT command message.
359 */
360struct tf_command_create_device_context {
361 u8 message_size;
362 u8 message_type;
363 u16 message_info_rfu;
364 u32 operation_id;
365 u32 device_context_id;
366};
367
368/*
369 * CREATE_DEVICE_CONTEXT answer message.
370 */
371struct tf_answer_create_device_context {
372 u8 message_size;
373 u8 message_type;
374 u16 message_info_rfu;
375 /* an opaque Normal World identifier for the operation */
376 u32 operation_id;
377 u32 error_code;
378 /* an opaque Normal World identifier for the device context */
379 u32 device_context;
380};
381
382/*
383 * DESTROY_DEVICE_CONTEXT command message.
384 */
385struct tf_command_destroy_device_context {
386 u8 message_size;
387 u8 message_type;
388 u16 message_info_rfu;
389 u32 operation_id;
390 u32 device_context;
391};
392
393/*
394 * DESTROY_DEVICE_CONTEXT answer message.
395 */
396struct tf_answer_destroy_device_context {
397 u8 message_size;
398 u8 message_type;
399 u16 message_info_rfu;
400 /* an opaque Normal World identifier for the operation */
401 u32 operation_id;
402 u32 error_code;
403 u32 device_context_id;
404};
405
406/*
407 * OPEN_CLIENT_SESSION command message.
408 */
409struct tf_command_open_client_session {
410 u8 message_size;
411 u8 message_type;
412 u16 param_types;
413 /* an opaque Normal World identifier for the operation */
414 u32 operation_id;
415 u32 device_context;
416 u32 cancellation_id;
417 u64 timeout;
418 struct tf_uuid destination_uuid;
419 union tf_command_param params[4];
420 u32 login_type;
421 /*
422 * Size = 0 for public, [16] for group identification, [20] for
423 * authentication
424 */
425 u8 login_data[20];
426};
427
428/*
429 * OPEN_CLIENT_SESSION answer message.
430 */
431struct tf_answer_open_client_session {
432 u8 message_size;
433 u8 message_type;
434 u8 error_origin;
435 u8 __reserved;
436 /* an opaque Normal World identifier for the operation */
437 u32 operation_id;
438 u32 error_code;
439 u32 client_session;
440 union tf_answer_param answers[4];
441};
442
443/*
444 * CLOSE_CLIENT_SESSION command message.
445 */
446struct tf_command_close_client_session {
447 u8 message_size;
448 u8 message_type;
449 u16 message_info_rfu;
450 /* an opaque Normal World identifier for the operation */
451 u32 operation_id;
452 u32 device_context;
453 u32 client_session;
454};
455
456/*
457 * CLOSE_CLIENT_SESSION answer message.
458 */
459struct tf_answer_close_client_session {
460 u8 message_size;
461 u8 message_type;
462 u16 message_info_rfu;
463 /* an opaque Normal World identifier for the operation */
464 u32 operation_id;
465 u32 error_code;
466};
467
468
469/*
470 * REGISTER_SHARED_MEMORY command message
471 */
472struct tf_command_register_shared_memory {
473 u8 message_size;
474 u8 message_type;
475 u16 memory_flags;
476 u32 operation_id;
477 u32 device_context;
478 u32 block_id;
479 u32 shared_mem_size;
480 u32 shared_mem_start_offset;
481 u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
482};
483
484/*
485 * REGISTER_SHARED_MEMORY answer message.
486 */
487struct tf_answer_register_shared_memory {
488 u8 message_size;
489 u8 message_type;
490 u16 message_info_rfu;
491 /* an opaque Normal World identifier for the operation */
492 u32 operation_id;
493 u32 error_code;
494 u32 block;
495};
496
497/*
498 * RELEASE_SHARED_MEMORY command message.
499 */
500struct tf_command_release_shared_memory {
501 u8 message_size;
502 u8 message_type;
503 u16 message_info_rfu;
504 /* an opaque Normal World identifier for the operation */
505 u32 operation_id;
506 u32 device_context;
507 u32 block;
508};
509
510/*
511 * RELEASE_SHARED_MEMORY answer message.
512 */
513struct tf_answer_release_shared_memory {
514 u8 message_size;
515 u8 message_type;
516 u16 message_info_rfu;
517 u32 operation_id;
518 u32 error_code;
519 u32 block_id;
520};
521
522/*
523 * INVOKE_CLIENT_COMMAND command message.
524 */
525struct tf_command_invoke_client_command {
526 u8 message_size;
527 u8 message_type;
528 u16 param_types;
529 u32 operation_id;
530 u32 device_context;
531 u32 client_session;
532 u64 timeout;
533 u32 cancellation_id;
534 u32 client_command_identifier;
535 union tf_command_param params[4];
536};
537
538/*
539 * INVOKE_CLIENT_COMMAND command answer.
540 */
541struct tf_answer_invoke_client_command {
542 u8 message_size;
543 u8 message_type;
544 u8 error_origin;
545 u8 __reserved;
546 u32 operation_id;
547 u32 error_code;
548 union tf_answer_param answers[4];
549};
550
551/*
552 * CANCEL_CLIENT_OPERATION command message.
553 */
554struct tf_command_cancel_client_operation {
555 u8 message_size;
556 u8 message_type;
557 u16 message_info_rfu;
558 /* an opaque Normal World identifier for the operation */
559 u32 operation_id;
560 u32 device_context;
561 u32 client_session;
562 u32 cancellation_id;
563};
564
565struct tf_answer_cancel_client_operation {
566 u8 message_size;
567 u8 message_type;
568 u16 message_info_rfu;
569 u32 operation_id;
570 u32 error_code;
571};
572
573/*
574 * MANAGEMENT command message.
575 */
576struct tf_command_management {
577 u8 message_size;
578 u8 message_type;
579 u16 command;
580 u32 operation_id;
581 u32 w3b_size;
582 u32 w3b_start_offset;
583 u32 shared_mem_descriptors[1];
584};
585
586/*
587 * POWER_MANAGEMENT answer message.
588 * The message does not provide message specific parameters.
589 * Therefore no need to define a specific answer structure
590 */
591
592/*
593 * Structure for L2 messages
594 */
595union tf_command {
596 struct tf_command_header header;
597 struct tf_command_create_device_context create_device_context;
598 struct tf_command_destroy_device_context destroy_device_context;
599 struct tf_command_open_client_session open_client_session;
600 struct tf_command_close_client_session close_client_session;
601 struct tf_command_register_shared_memory register_shared_memory;
602 struct tf_command_release_shared_memory release_shared_memory;
603 struct tf_command_invoke_client_command invoke_client_command;
604 struct tf_command_cancel_client_operation cancel_client_operation;
605 struct tf_command_management management;
606};
607
608/*
609 * Structure for any L2 answer
610 */
611
612union tf_answer {
613 struct tf_answer_header header;
614 struct tf_answer_create_device_context create_device_context;
615 struct tf_answer_open_client_session open_client_session;
616 struct tf_answer_close_client_session close_client_session;
617 struct tf_answer_register_shared_memory register_shared_memory;
618 struct tf_answer_release_shared_memory release_shared_memory;
619 struct tf_answer_invoke_client_command invoke_client_command;
620 struct tf_answer_destroy_device_context destroy_device_context;
621 struct tf_answer_cancel_client_operation cancel_client_operation;
622};
623
624/* Structure of the Communication Buffer */
625struct tf_l1_shared_buffer {
626 #ifdef CONFIG_TF_ZEBRA
627 u32 exit_code;
628 u32 l1_shared_buffer_descr;
629 u32 backing_store_addr;
630 u32 backext_storage_addr;
631 u32 workspace_addr;
632 u32 workspace_size;
633 u32 conf_descriptor;
634 u32 conf_size;
635 u32 conf_offset;
636 u32 protocol_version;
637 u32 rpc_command;
638 u32 rpc_status;
639 u8 reserved1[16];
640 #else
641 u32 config_flag_s;
642 u32 w3b_size_max_s;
643 u32 reserved0;
644 u32 w3b_size_current_s;
645 u8 reserved1[48];
646 #endif
647 u8 version_description[TF_DESCRIPTION_BUFFER_LENGTH];
648 u32 status_s;
649 u32 reserved2;
650 u32 sync_serial_n;
651 u32 sync_serial_s;
652 u64 time_n[2];
653 u64 timeout_s[2];
654 u32 first_command;
655 u32 first_free_command;
656 u32 first_answer;
657 u32 first_free_answer;
658 u32 w3b_descriptors[128];
659 #ifdef CONFIG_TF_ZEBRA
660 u8 rpc_trace_buffer[140];
661 u8 rpc_cus_buffer[180];
662 #else
663 u8 reserved3[320];
664 #endif
665 u32 command_queue[TF_N_MESSAGE_QUEUE_CAPACITY];
666 u32 answer_queue[TF_S_ANSWER_QUEUE_CAPACITY];
667};
668
669
670/*
671 * tf_version_information_buffer structure description
672 * Description of the sVersionBuffer handed over from user space to kernel space
673 * This field is filled by the driver during a CREATE_DEVICE_CONTEXT ioctl
674 * and handed back to user space
675 */
676struct tf_version_information_buffer {
677 u8 driver_description[65];
678 u8 secure_world_description[65];
679};
680
681
682/* The IOCTLs the driver supports */
683#include <linux/ioctl.h>
684
685#define IOCTL_TF_GET_VERSION _IO('z', 0)
686#define IOCTL_TF_EXCHANGE _IOWR('z', 1, union tf_command)
687#define IOCTL_TF_GET_DESCRIPTION _IOR('z', 2, \
688 struct tf_version_information_buffer)
689
690#endif /* !defined(__TF_PROTOCOL_H__) */
diff --git a/security/tf_driver/tf_util.c b/security/tf_driver/tf_util.c
new file mode 100644
index 00000000000..78f90bf677e
--- /dev/null
+++ b/security/tf_driver/tf_util.c
@@ -0,0 +1,1143 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#include <linux/mman.h>
21#include "tf_util.h"
22
23/*----------------------------------------------------------------------------
24 * Debug printing routines
25 *----------------------------------------------------------------------------*/
26#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
27
28void tf_trace_array(const char *fun, const char *msg,
29 const void *ptr, size_t len)
30{
31 char hex[511];
32 bool ell = (len > sizeof(hex)/2);
33 unsigned lim = (len > sizeof(hex)/2 ? sizeof(hex)/2 : len);
34 unsigned i;
35 for (i = 0; i < lim; i++)
36 sprintf(hex + 2 * i, "%02x", ((unsigned char *)ptr)[i]);
37 pr_info("%s: %s[%u] = %s%s\n",
38 fun, msg, len, hex, ell ? "..." : "");
39}
40
41void address_cache_property(unsigned long va)
42{
43 unsigned long pa;
44 unsigned long inner;
45 unsigned long outer;
46
47 asm volatile ("mcr p15, 0, %0, c7, c8, 0" : : "r" (va));
48 asm volatile ("mrc p15, 0, %0, c7, c4, 0" : "=r" (pa));
49
50 dprintk(KERN_INFO "VA:%x, PA:%x\n",
51 (unsigned int) va,
52 (unsigned int) pa);
53
54 if (pa & 1) {
55 dprintk(KERN_INFO "Prop Error\n");
56 return;
57 }
58
59 outer = (pa >> 2) & 3;
60 dprintk(KERN_INFO "\touter : %x", (unsigned int) outer);
61
62 switch (outer) {
63 case 3:
64 dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
65 break;
66 case 2:
67 dprintk(KERN_INFO "Write-Through, no Write-Allocate.\n");
68 break;
69 case 1:
70 dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
71 break;
72 case 0:
73 dprintk(KERN_INFO "Non-cacheable.\n");
74 break;
75 }
76
77 inner = (pa >> 4) & 7;
78 dprintk(KERN_INFO "\tinner : %x", (unsigned int)inner);
79
80 switch (inner) {
81 case 7:
82 dprintk(KERN_INFO "Write-Back, no Write-Allocate\n");
83 break;
84 case 6:
85 dprintk(KERN_INFO "Write-Through.\n");
86 break;
87 case 5:
88 dprintk(KERN_INFO "Write-Back, Write-Allocate.\n");
89 break;
90 case 3:
91 dprintk(KERN_INFO "Device.\n");
92 break;
93 case 1:
94 dprintk(KERN_INFO "Strongly-ordered.\n");
95 break;
96 case 0:
97 dprintk(KERN_INFO "Non-cacheable.\n");
98 break;
99 }
100
101 if (pa & 0x00000002)
102 dprintk(KERN_INFO "SuperSection.\n");
103 if (pa & 0x00000080)
104 dprintk(KERN_INFO "Memory is shareable.\n");
105 else
106 dprintk(KERN_INFO "Memory is non-shareable.\n");
107
108 if (pa & 0x00000200)
109 dprintk(KERN_INFO "Non-secure.\n");
110}
111
112/*
113 * Dump the L1 shared buffer.
114 */
115void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer)
116{
117 dprintk(KERN_INFO
118 "buffer@%p:\n"
119 #ifndef CONFIG_TF_ZEBRA
120 " config_flag_s=%08X\n"
121 #endif
122 " version_description=%64s\n"
123 " status_s=%08X\n"
124 " sync_serial_n=%08X\n"
125 " sync_serial_s=%08X\n"
126 " time_n[0]=%016llX\n"
127 " time_n[1]=%016llX\n"
128 " timeout_s[0]=%016llX\n"
129 " timeout_s[1]=%016llX\n"
130 " first_command=%08X\n"
131 " first_free_command=%08X\n"
132 " first_answer=%08X\n"
133 " first_free_answer=%08X\n\n",
134 buffer,
135 #ifndef CONFIG_TF_ZEBRA
136 buffer->config_flag_s,
137 #endif
138 buffer->version_description,
139 buffer->status_s,
140 buffer->sync_serial_n,
141 buffer->sync_serial_s,
142 buffer->time_n[0],
143 buffer->time_n[1],
144 buffer->timeout_s[0],
145 buffer->timeout_s[1],
146 buffer->first_command,
147 buffer->first_free_command,
148 buffer->first_answer,
149 buffer->first_free_answer);
150}
151
152
153/*
154 * Dump the specified SChannel message using dprintk.
155 */
156void tf_dump_command(union tf_command *command)
157{
158 u32 i;
159
160 dprintk(KERN_INFO "message@%p:\n", command);
161
162 switch (command->header.message_type) {
163 case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
164 dprintk(KERN_INFO
165 " message_size = 0x%02X\n"
166 " message_type = 0x%02X "
167 "TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT\n"
168 " operation_id = 0x%08X\n"
169 " device_context_id = 0x%08X\n",
170 command->header.message_size,
171 command->header.message_type,
172 command->header.operation_id,
173 command->create_device_context.device_context_id
174 );
175 break;
176
177 case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
178 dprintk(KERN_INFO
179 " message_size = 0x%02X\n"
180 " message_type = 0x%02X "
181 "TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT\n"
182 " operation_id = 0x%08X\n"
183 " device_context = 0x%08X\n",
184 command->header.message_size,
185 command->header.message_type,
186 command->header.operation_id,
187 command->destroy_device_context.device_context);
188 break;
189
190 case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
191 dprintk(KERN_INFO
192 " message_size = 0x%02X\n"
193 " message_type = 0x%02X "
194 "TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION\n"
195 " param_types = 0x%04X\n"
196 " operation_id = 0x%08X\n"
197 " device_context = 0x%08X\n"
198 " cancellation_id = 0x%08X\n"
199 " timeout = 0x%016llX\n"
200 " destination_uuid = "
201 "%08X-%04X-%04X-%02X%02X-"
202 "%02X%02X%02X%02X%02X%02X\n",
203 command->header.message_size,
204 command->header.message_type,
205 command->open_client_session.param_types,
206 command->header.operation_id,
207 command->open_client_session.device_context,
208 command->open_client_session.cancellation_id,
209 command->open_client_session.timeout,
210 command->open_client_session.destination_uuid.
211 time_low,
212 command->open_client_session.destination_uuid.
213 time_mid,
214 command->open_client_session.destination_uuid.
215 time_hi_and_version,
216 command->open_client_session.destination_uuid.
217 clock_seq_and_node[0],
218 command->open_client_session.destination_uuid.
219 clock_seq_and_node[1],
220 command->open_client_session.destination_uuid.
221 clock_seq_and_node[2],
222 command->open_client_session.destination_uuid.
223 clock_seq_and_node[3],
224 command->open_client_session.destination_uuid.
225 clock_seq_and_node[4],
226 command->open_client_session.destination_uuid.
227 clock_seq_and_node[5],
228 command->open_client_session.destination_uuid.
229 clock_seq_and_node[6],
230 command->open_client_session.destination_uuid.
231 clock_seq_and_node[7]
232 );
233
234 for (i = 0; i < 4; i++) {
235 uint32_t *param = (uint32_t *) &command->
236 open_client_session.params[i];
237 dprintk(KERN_INFO " params[%d] = "
238 "0x%08X:0x%08X:0x%08X\n",
239 i, param[0], param[1], param[2]);
240 }
241
242 switch (TF_LOGIN_GET_MAIN_TYPE(
243 command->open_client_session.login_type)) {
244 case TF_LOGIN_PUBLIC:
245 dprintk(
246 KERN_INFO " login_type = "
247 "TF_LOGIN_PUBLIC\n");
248 break;
249 case TF_LOGIN_USER:
250 dprintk(
251 KERN_INFO " login_type = "
252 "TF_LOGIN_USER\n");
253 break;
254 case TF_LOGIN_GROUP:
255 dprintk(
256 KERN_INFO " login_type = "
257 "TF_LOGIN_GROUP\n");
258 break;
259 case TF_LOGIN_APPLICATION:
260 dprintk(
261 KERN_INFO " login_type = "
262 "TF_LOGIN_APPLICATION\n");
263 break;
264 case TF_LOGIN_APPLICATION_USER:
265 dprintk(
266 KERN_INFO " login_type = "
267 "TF_LOGIN_APPLICATION_USER\n");
268 break;
269 case TF_LOGIN_APPLICATION_GROUP:
270 dprintk(
271 KERN_INFO " login_type = "
272 "TF_LOGIN_APPLICATION_GROUP\n");
273 break;
274 case TF_LOGIN_AUTHENTICATION:
275 dprintk(
276 KERN_INFO " login_type = "
277 "TF_LOGIN_AUTHENTICATION\n");
278 break;
279 case TF_LOGIN_PRIVILEGED:
280 dprintk(
281 KERN_INFO " login_type = "
282 "TF_LOGIN_PRIVILEGED\n");
283 break;
284 case TF_LOGIN_PRIVILEGED_KERNEL:
285 dprintk(
286 KERN_INFO " login_type = "
287 "TF_LOGIN_PRIVILEGED_KERNEL\n");
288 break;
289 default:
290 dprintk(
291 KERN_ERR " login_type = "
292 "0x%08X (Unknown login type)\n",
293 command->open_client_session.login_type);
294 break;
295 }
296
297 dprintk(
298 KERN_INFO " login_data = ");
299 for (i = 0; i < 20; i++)
300 dprintk(
301 KERN_INFO "%d",
302 command->open_client_session.
303 login_data[i]);
304 dprintk("\n");
305 break;
306
307 case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
308 dprintk(KERN_INFO
309 " message_size = 0x%02X\n"
310 " message_type = 0x%02X "
311 "TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION\n"
312 " operation_id = 0x%08X\n"
313 " device_context = 0x%08X\n"
314 " client_session = 0x%08X\n",
315 command->header.message_size,
316 command->header.message_type,
317 command->header.operation_id,
318 command->close_client_session.device_context,
319 command->close_client_session.client_session
320 );
321 break;
322
323 case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
324 dprintk(KERN_INFO
325 " message_size = 0x%02X\n"
326 " message_type = 0x%02X "
327 "TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY\n"
328 " memory_flags = 0x%04X\n"
329 " operation_id = 0x%08X\n"
330 " device_context = 0x%08X\n"
331 " block_id = 0x%08X\n"
332 " shared_mem_size = 0x%08X\n"
333 " shared_mem_start_offset = 0x%08X\n"
334 " shared_mem_descriptors[0] = 0x%08X\n"
335 " shared_mem_descriptors[1] = 0x%08X\n"
336 " shared_mem_descriptors[2] = 0x%08X\n"
337 " shared_mem_descriptors[3] = 0x%08X\n"
338 " shared_mem_descriptors[4] = 0x%08X\n"
339 " shared_mem_descriptors[5] = 0x%08X\n"
340 " shared_mem_descriptors[6] = 0x%08X\n"
341 " shared_mem_descriptors[7] = 0x%08X\n",
342 command->header.message_size,
343 command->header.message_type,
344 command->register_shared_memory.memory_flags,
345 command->header.operation_id,
346 command->register_shared_memory.device_context,
347 command->register_shared_memory.block_id,
348 command->register_shared_memory.shared_mem_size,
349 command->register_shared_memory.
350 shared_mem_start_offset,
351 command->register_shared_memory.
352 shared_mem_descriptors[0],
353 command->register_shared_memory.
354 shared_mem_descriptors[1],
355 command->register_shared_memory.
356 shared_mem_descriptors[2],
357 command->register_shared_memory.
358 shared_mem_descriptors[3],
359 command->register_shared_memory.
360 shared_mem_descriptors[4],
361 command->register_shared_memory.
362 shared_mem_descriptors[5],
363 command->register_shared_memory.
364 shared_mem_descriptors[6],
365 command->register_shared_memory.
366 shared_mem_descriptors[7]);
367 break;
368
369 case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
370 dprintk(KERN_INFO
371 " message_size = 0x%02X\n"
372 " message_type = 0x%02X "
373 "TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY\n"
374 " operation_id = 0x%08X\n"
375 " device_context = 0x%08X\n"
376 " block = 0x%08X\n",
377 command->header.message_size,
378 command->header.message_type,
379 command->header.operation_id,
380 command->release_shared_memory.device_context,
381 command->release_shared_memory.block);
382 break;
383
384 case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
385 dprintk(KERN_INFO
386 " message_size = 0x%02X\n"
387 " message_type = 0x%02X "
388 "TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND\n"
389 " param_types = 0x%04X\n"
390 " operation_id = 0x%08X\n"
391 " device_context = 0x%08X\n"
392 " client_session = 0x%08X\n"
393 " timeout = 0x%016llX\n"
394 " cancellation_id = 0x%08X\n"
395 " client_command_identifier = 0x%08X\n",
396 command->header.message_size,
397 command->header.message_type,
398 command->invoke_client_command.param_types,
399 command->header.operation_id,
400 command->invoke_client_command.device_context,
401 command->invoke_client_command.client_session,
402 command->invoke_client_command.timeout,
403 command->invoke_client_command.cancellation_id,
404 command->invoke_client_command.
405 client_command_identifier
406 );
407
408 for (i = 0; i < 4; i++) {
409 uint32_t *param = (uint32_t *) &command->
410 open_client_session.params[i];
411 dprintk(KERN_INFO " params[%d] = "
412 "0x%08X:0x%08X:0x%08X\n", i,
413 param[0], param[1], param[2]);
414 }
415 break;
416
417 case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
418 dprintk(KERN_INFO
419 " message_size = 0x%02X\n"
420 " message_type = 0x%02X "
421 "TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND\n"
422 " operation_id = 0x%08X\n"
423 " device_context = 0x%08X\n"
424 " client_session = 0x%08X\n",
425 command->header.message_size,
426 command->header.message_type,
427 command->header.operation_id,
428 command->cancel_client_operation.device_context,
429 command->cancel_client_operation.client_session);
430 break;
431
432 case TF_MESSAGE_TYPE_MANAGEMENT:
433 dprintk(KERN_INFO
434 " message_size = 0x%02X\n"
435 " message_type = 0x%02X "
436 "TF_MESSAGE_TYPE_MANAGEMENT\n"
437 " operation_id = 0x%08X\n"
438 " command = 0x%08X\n"
439 " w3b_size = 0x%08X\n"
440 " w3b_start_offset = 0x%08X\n",
441 command->header.message_size,
442 command->header.message_type,
443 command->header.operation_id,
444 command->management.command,
445 command->management.w3b_size,
446 command->management.w3b_start_offset);
447 break;
448
449 default:
450 dprintk(
451 KERN_ERR " message_type = 0x%08X "
452 "(Unknown message type)\n",
453 command->header.message_type);
454 break;
455 }
456}
457
458
459/*
460 * Dump the specified SChannel answer using dprintk.
461 */
462void tf_dump_answer(union tf_answer *answer)
463{
464 u32 i;
465 dprintk(
466 KERN_INFO "answer@%p:\n",
467 answer);
468
469 switch (answer->header.message_type) {
470 case TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT:
471 dprintk(KERN_INFO
472 " message_size = 0x%02X\n"
473 " message_type = 0x%02X "
474 "tf_answer_create_device_context\n"
475 " operation_id = 0x%08X\n"
476 " error_code = 0x%08X\n"
477 " device_context = 0x%08X\n",
478 answer->header.message_size,
479 answer->header.message_type,
480 answer->header.operation_id,
481 answer->create_device_context.error_code,
482 answer->create_device_context.device_context);
483 break;
484
485 case TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT:
486 dprintk(KERN_INFO
487 " message_size = 0x%02X\n"
488 " message_type = 0x%02X "
489 "ANSWER_DESTROY_DEVICE_CONTEXT\n"
490 " operation_id = 0x%08X\n"
491 " error_code = 0x%08X\n"
492 " device_context_id = 0x%08X\n",
493 answer->header.message_size,
494 answer->header.message_type,
495 answer->header.operation_id,
496 answer->destroy_device_context.error_code,
497 answer->destroy_device_context.device_context_id);
498 break;
499
500
501 case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
502 dprintk(KERN_INFO
503 " message_size = 0x%02X\n"
504 " message_type = 0x%02X "
505 "tf_answer_open_client_session\n"
506 " error_origin = 0x%02X\n"
507 " operation_id = 0x%08X\n"
508 " error_code = 0x%08X\n"
509 " client_session = 0x%08X\n",
510 answer->header.message_size,
511 answer->header.message_type,
512 answer->open_client_session.error_origin,
513 answer->header.operation_id,
514 answer->open_client_session.error_code,
515 answer->open_client_session.client_session);
516 for (i = 0; i < 4; i++) {
517 dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
518 i,
519 answer->open_client_session.answers[i].
520 value.a,
521 answer->open_client_session.answers[i].
522 value.b);
523 }
524 break;
525
526 case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
527 dprintk(KERN_INFO
528 " message_size = 0x%02X\n"
529 " message_type = 0x%02X "
530 "ANSWER_CLOSE_CLIENT_SESSION\n"
531 " operation_id = 0x%08X\n"
532 " error_code = 0x%08X\n",
533 answer->header.message_size,
534 answer->header.message_type,
535 answer->header.operation_id,
536 answer->close_client_session.error_code);
537 break;
538
539 case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
540 dprintk(KERN_INFO
541 " message_size = 0x%02X\n"
542 " message_type = 0x%02X "
543 "tf_answer_register_shared_memory\n"
544 " operation_id = 0x%08X\n"
545 " error_code = 0x%08X\n"
546 " block = 0x%08X\n",
547 answer->header.message_size,
548 answer->header.message_type,
549 answer->header.operation_id,
550 answer->register_shared_memory.error_code,
551 answer->register_shared_memory.block);
552 break;
553
554 case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
555 dprintk(KERN_INFO
556 " message_size = 0x%02X\n"
557 " message_type = 0x%02X "
558 "ANSWER_RELEASE_SHARED_MEMORY\n"
559 " operation_id = 0x%08X\n"
560 " error_code = 0x%08X\n"
561 " block_id = 0x%08X\n",
562 answer->header.message_size,
563 answer->header.message_type,
564 answer->header.operation_id,
565 answer->release_shared_memory.error_code,
566 answer->release_shared_memory.block_id);
567 break;
568
569 case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
570 dprintk(KERN_INFO
571 " message_size = 0x%02X\n"
572 " message_type = 0x%02X "
573 "tf_answer_invoke_client_command\n"
574 " error_origin = 0x%02X\n"
575 " operation_id = 0x%08X\n"
576 " error_code = 0x%08X\n",
577 answer->header.message_size,
578 answer->header.message_type,
579 answer->invoke_client_command.error_origin,
580 answer->header.operation_id,
581 answer->invoke_client_command.error_code
582 );
583 for (i = 0; i < 4; i++) {
584 dprintk(KERN_INFO " answers[%d]=0x%08X:0x%08X\n",
585 i,
586 answer->invoke_client_command.answers[i].
587 value.a,
588 answer->invoke_client_command.answers[i].
589 value.b);
590 }
591 break;
592
593 case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
594 dprintk(KERN_INFO
595 " message_size = 0x%02X\n"
596 " message_type = 0x%02X "
597 "TF_ANSWER_CANCEL_CLIENT_COMMAND\n"
598 " operation_id = 0x%08X\n"
599 " error_code = 0x%08X\n",
600 answer->header.message_size,
601 answer->header.message_type,
602 answer->header.operation_id,
603 answer->cancel_client_operation.error_code);
604 break;
605
606 case TF_MESSAGE_TYPE_MANAGEMENT:
607 dprintk(KERN_INFO
608 " message_size = 0x%02X\n"
609 " message_type = 0x%02X "
610 "TF_MESSAGE_TYPE_MANAGEMENT\n"
611 " operation_id = 0x%08X\n"
612 " error_code = 0x%08X\n",
613 answer->header.message_size,
614 answer->header.message_type,
615 answer->header.operation_id,
616 answer->header.error_code);
617 break;
618
619 default:
620 dprintk(
621 KERN_ERR " message_type = 0x%02X "
622 "(Unknown message type)\n",
623 answer->header.message_type);
624 break;
625
626 }
627}
628
629#endif /* defined(TF_DRIVER_DEBUG_SUPPORT) */
630
631/*----------------------------------------------------------------------------
632 * SHA-1 implementation
633 * This is taken from the Linux kernel source crypto/sha1.c
634 *----------------------------------------------------------------------------*/
635
636struct sha1_ctx {
637 u64 count;
638 u32 state[5];
639 u8 buffer[64];
640};
641
642static inline u32 rol(u32 value, u32 bits)
643{
644 return ((value) << (bits)) | ((value) >> (32 - (bits)));
645}
646
647/* blk0() and blk() perform the initial expand. */
648/* I got the idea of expanding during the round function from SSLeay */
649#define blk0(i) block32[i]
650
651#define blk(i) (block32[i & 15] = rol( \
652 block32[(i + 13) & 15] ^ block32[(i + 8) & 15] ^ \
653 block32[(i + 2) & 15] ^ block32[i & 15], 1))
654
655/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */
656#define R0(v, w, x, y, z, i) do { \
657 z += ((w & (x ^ y)) ^ y) + blk0(i) + 0x5A827999 + rol(v, 5); \
658 w = rol(w, 30); } while (0)
659
660#define R1(v, w, x, y, z, i) do { \
661 z += ((w & (x ^ y)) ^ y) + blk(i) + 0x5A827999 + rol(v, 5); \
662 w = rol(w, 30); } while (0)
663
664#define R2(v, w, x, y, z, i) do { \
665 z += (w ^ x ^ y) + blk(i) + 0x6ED9EBA1 + rol(v, 5); \
666 w = rol(w, 30); } while (0)
667
668#define R3(v, w, x, y, z, i) do { \
669 z += (((w | x) & y) | (w & x)) + blk(i) + 0x8F1BBCDC + rol(v, 5); \
670 w = rol(w, 30); } while (0)
671
672#define R4(v, w, x, y, z, i) do { \
673 z += (w ^ x ^ y) + blk(i) + 0xCA62C1D6 + rol(v, 5); \
674 w = rol(w, 30); } while (0)
675
676
677/* Hash a single 512-bit block. This is the core of the algorithm. */
678static void sha1_transform(u32 *state, const u8 *in)
679{
680 u32 a, b, c, d, e;
681 u32 block32[16];
682
683 /* convert/copy data to workspace */
684 for (a = 0; a < sizeof(block32)/sizeof(u32); a++)
685 block32[a] = ((u32) in[4 * a]) << 24 |
686 ((u32) in[4 * a + 1]) << 16 |
687 ((u32) in[4 * a + 2]) << 8 |
688 ((u32) in[4 * a + 3]);
689
690 /* Copy context->state[] to working vars */
691 a = state[0];
692 b = state[1];
693 c = state[2];
694 d = state[3];
695 e = state[4];
696
697 /* 4 rounds of 20 operations each. Loop unrolled. */
698 R0(a, b, c, d, e, 0); R0(e, a, b, c, d, 1);
699 R0(d, e, a, b, c, 2); R0(c, d, e, a, b, 3);
700 R0(b, c, d, e, a, 4); R0(a, b, c, d, e, 5);
701 R0(e, a, b, c, d, 6); R0(d, e, a, b, c, 7);
702 R0(c, d, e, a, b, 8); R0(b, c, d, e, a, 9);
703 R0(a, b, c, d, e, 10); R0(e, a, b, c, d, 11);
704 R0(d, e, a, b, c, 12); R0(c, d, e, a, b, 13);
705 R0(b, c, d, e, a, 14); R0(a, b, c, d, e, 15);
706
707 R1(e, a, b, c, d, 16); R1(d, e, a, b, c, 17);
708 R1(c, d, e, a, b, 18); R1(b, c, d, e, a, 19);
709
710 R2(a, b, c, d, e, 20); R2(e, a, b, c, d, 21);
711 R2(d, e, a, b, c, 22); R2(c, d, e, a, b, 23);
712 R2(b, c, d, e, a, 24); R2(a, b, c, d, e, 25);
713 R2(e, a, b, c, d, 26); R2(d, e, a, b, c, 27);
714 R2(c, d, e, a, b, 28); R2(b, c, d, e, a, 29);
715 R2(a, b, c, d, e, 30); R2(e, a, b, c, d, 31);
716 R2(d, e, a, b, c, 32); R2(c, d, e, a, b, 33);
717 R2(b, c, d, e, a, 34); R2(a, b, c, d, e, 35);
718 R2(e, a, b, c, d, 36); R2(d, e, a, b, c, 37);
719 R2(c, d, e, a, b, 38); R2(b, c, d, e, a, 39);
720
721 R3(a, b, c, d, e, 40); R3(e, a, b, c, d, 41);
722 R3(d, e, a, b, c, 42); R3(c, d, e, a, b, 43);
723 R3(b, c, d, e, a, 44); R3(a, b, c, d, e, 45);
724 R3(e, a, b, c, d, 46); R3(d, e, a, b, c, 47);
725 R3(c, d, e, a, b, 48); R3(b, c, d, e, a, 49);
726 R3(a, b, c, d, e, 50); R3(e, a, b, c, d, 51);
727 R3(d, e, a, b, c, 52); R3(c, d, e, a, b, 53);
728 R3(b, c, d, e, a, 54); R3(a, b, c, d, e, 55);
729 R3(e, a, b, c, d, 56); R3(d, e, a, b, c, 57);
730 R3(c, d, e, a, b, 58); R3(b, c, d, e, a, 59);
731
732 R4(a, b, c, d, e, 60); R4(e, a, b, c, d, 61);
733 R4(d, e, a, b, c, 62); R4(c, d, e, a, b, 63);
734 R4(b, c, d, e, a, 64); R4(a, b, c, d, e, 65);
735 R4(e, a, b, c, d, 66); R4(d, e, a, b, c, 67);
736 R4(c, d, e, a, b, 68); R4(b, c, d, e, a, 69);
737 R4(a, b, c, d, e, 70); R4(e, a, b, c, d, 71);
738 R4(d, e, a, b, c, 72); R4(c, d, e, a, b, 73);
739 R4(b, c, d, e, a, 74); R4(a, b, c, d, e, 75);
740 R4(e, a, b, c, d, 76); R4(d, e, a, b, c, 77);
741 R4(c, d, e, a, b, 78); R4(b, c, d, e, a, 79);
742
743 /* Add the working vars back into context.state[] */
744 state[0] += a;
745 state[1] += b;
746 state[2] += c;
747 state[3] += d;
748 state[4] += e;
749 /* Wipe variables */
750 a = b = c = d = e = 0;
751 memset(block32, 0x00, sizeof(block32));
752}
753
754
755static void sha1_init(void *ctx)
756{
757 struct sha1_ctx *sctx = ctx;
758 static const struct sha1_ctx initstate = {
759 0,
760 { 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 },
761 { 0, }
762 };
763
764 *sctx = initstate;
765}
766
767
768static void sha1_update(void *ctx, const u8 *data, unsigned int len)
769{
770 struct sha1_ctx *sctx = ctx;
771 unsigned int i, j;
772
773 j = (sctx->count >> 3) & 0x3f;
774 sctx->count += len << 3;
775
776 if ((j + len) > 63) {
777 memcpy(&sctx->buffer[j], data, (i = 64 - j));
778 sha1_transform(sctx->state, sctx->buffer);
779 for ( ; i + 63 < len; i += 64)
780 sha1_transform(sctx->state, &data[i]);
781 j = 0;
782 } else
783 i = 0;
784 memcpy(&sctx->buffer[j], &data[i], len - i);
785}
786
787
788/* Add padding and return the message digest. */
789static void sha1_final(void *ctx, u8 *out)
790{
791 struct sha1_ctx *sctx = ctx;
792 u32 i, j, index, padlen;
793 u64 t;
794 u8 bits[8] = { 0, };
795 static const u8 padding[64] = { 0x80, };
796
797 t = sctx->count;
798 bits[7] = 0xff & t; t >>= 8;
799 bits[6] = 0xff & t; t >>= 8;
800 bits[5] = 0xff & t; t >>= 8;
801 bits[4] = 0xff & t; t >>= 8;
802 bits[3] = 0xff & t; t >>= 8;
803 bits[2] = 0xff & t; t >>= 8;
804 bits[1] = 0xff & t; t >>= 8;
805 bits[0] = 0xff & t;
806
807 /* Pad out to 56 mod 64 */
808 index = (sctx->count >> 3) & 0x3f;
809 padlen = (index < 56) ? (56 - index) : ((64+56) - index);
810 sha1_update(sctx, padding, padlen);
811
812 /* Append length */
813 sha1_update(sctx, bits, sizeof(bits));
814
815 /* Store state in digest */
816 for (i = j = 0; i < 5; i++, j += 4) {
817 u32 t2 = sctx->state[i];
818 out[j+3] = t2 & 0xff; t2 >>= 8;
819 out[j+2] = t2 & 0xff; t2 >>= 8;
820 out[j+1] = t2 & 0xff; t2 >>= 8;
821 out[j] = t2 & 0xff;
822 }
823
824 /* Wipe context */
825 memset(sctx, 0, sizeof(*sctx));
826}
827
828
829
830
831/*----------------------------------------------------------------------------
832 * Process identification
833 *----------------------------------------------------------------------------*/
834
835/* This function generates a processes hash table for authentication */
836int tf_get_current_process_hash(void *hash)
837{
838 int result = 0;
839 void *buffer;
840 struct mm_struct *mm;
841 struct vm_area_struct *vma;
842
843 buffer = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
844 if (buffer == NULL) {
845 dprintk(
846 KERN_ERR "tf_get_current_process_hash:"
847 " Out of memory for buffer!\n");
848 return -ENOMEM;
849 }
850
851 mm = current->mm;
852
853 down_read(&(mm->mmap_sem));
854 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
855 if ((vma->vm_flags & VM_EXECUTABLE) != 0 && vma->vm_file
856 != NULL) {
857 struct dentry *dentry;
858 unsigned long start;
859 unsigned long cur;
860 unsigned long end;
861 struct sha1_ctx sha1;
862
863 dentry = dget(vma->vm_file->f_dentry);
864
865 dprintk(
866 KERN_DEBUG "tf_get_current_process_hash: "
867 "Found executable VMA for inode %lu "
868 "(%lu bytes).\n",
869 dentry->d_inode->i_ino,
870 (unsigned long) (dentry->d_inode->
871 i_size));
872
873 start = do_mmap(vma->vm_file, 0,
874 dentry->d_inode->i_size,
875 PROT_READ | PROT_WRITE | PROT_EXEC,
876 MAP_PRIVATE, 0);
877 if (start < 0) {
878 dprintk(
879 KERN_ERR "tf_get_current_process_hash"
880 "Hash: do_mmap failed (error %d)!\n",
881 (int) start);
882 dput(dentry);
883 result = -EFAULT;
884 goto vma_out;
885 }
886
887 end = start + dentry->d_inode->i_size;
888
889 sha1_init(&sha1);
890 cur = start;
891 while (cur < end) {
892 unsigned long chunk;
893
894 chunk = end - cur;
895 if (chunk > PAGE_SIZE)
896 chunk = PAGE_SIZE;
897 if (copy_from_user(buffer, (const void *) cur,
898 chunk) != 0) {
899 dprintk(
900 KERN_ERR "tf_get_current_"
901 "process_hash: copy_from_user "
902 "failed!\n");
903 result = -EINVAL;
904 (void) do_munmap(mm, start,
905 dentry->d_inode->i_size);
906 dput(dentry);
907 goto vma_out;
908 }
909 sha1_update(&sha1, buffer, chunk);
910 cur += chunk;
911 }
912 sha1_final(&sha1, hash);
913 result = 0;
914
915 (void) do_munmap(mm, start, dentry->d_inode->i_size);
916 dput(dentry);
917 break;
918 }
919 }
920vma_out:
921 up_read(&(mm->mmap_sem));
922
923 internal_kfree(buffer);
924
925 if (result == -ENOENT)
926 dprintk(
927 KERN_ERR "tf_get_current_process_hash: "
928 "No executable VMA found for process!\n");
929 return result;
930}
931
932#ifndef CONFIG_ANDROID
933/* This function hashes the path of the current application.
934 * If data = NULL ,nothing else is added to the hash
935 else add data to the hash
936 */
937int tf_hash_application_path_and_data(char *buffer, void *data,
938 u32 data_len)
939{
940 int result = -ENOENT;
941 char *tmp = NULL;
942 struct mm_struct *mm;
943 struct vm_area_struct *vma;
944
945 tmp = internal_kmalloc(PAGE_SIZE, GFP_KERNEL);
946 if (tmp == NULL) {
947 result = -ENOMEM;
948 goto end;
949 }
950
951 mm = current->mm;
952
953 down_read(&(mm->mmap_sem));
954 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
955 if ((vma->vm_flags & VM_EXECUTABLE) != 0
956 && vma->vm_file != NULL) {
957 struct path *path;
958 char *endpath;
959 size_t pathlen;
960 struct sha1_ctx sha1;
961 u8 hash[SHA1_DIGEST_SIZE];
962
963 path = &vma->vm_file->f_path;
964
965 endpath = d_path(path, tmp, PAGE_SIZE);
966 if (IS_ERR(path)) {
967 result = PTR_ERR(endpath);
968 up_read(&(mm->mmap_sem));
969 goto end;
970 }
971 pathlen = (tmp + PAGE_SIZE) - endpath;
972
973#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
974 {
975 char *c;
976 dprintk(KERN_DEBUG "current process path = ");
977 for (c = endpath;
978 c < tmp + PAGE_SIZE;
979 c++)
980 dprintk("%c", *c);
981
982 dprintk(", uid=%d, euid=%d\n", current_uid(),
983 current_euid());
984 }
985#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
986
987 sha1_init(&sha1);
988 sha1_update(&sha1, endpath, pathlen);
989 if (data != NULL) {
990 dprintk(KERN_INFO "current process path: "
991 "Hashing additional data\n");
992 sha1_update(&sha1, data, data_len);
993 }
994 sha1_final(&sha1, hash);
995 memcpy(buffer, hash, sizeof(hash));
996
997 result = 0;
998
999 break;
1000 }
1001 }
1002 up_read(&(mm->mmap_sem));
1003
1004end:
1005 if (tmp != NULL)
1006 internal_kfree(tmp);
1007
1008 return result;
1009}
1010#endif /* !CONFIG_ANDROID */
1011
1012void *internal_kmalloc(size_t size, int priority)
1013{
1014 void *ptr;
1015 struct tf_device *dev = tf_get_device();
1016
1017 ptr = kmalloc(size, priority);
1018
1019 if (ptr != NULL)
1020 atomic_inc(
1021 &dev->stats.stat_memories_allocated);
1022
1023 return ptr;
1024}
1025
1026void internal_kfree(void *ptr)
1027{
1028 struct tf_device *dev = tf_get_device();
1029
1030 if (ptr != NULL)
1031 atomic_dec(
1032 &dev->stats.stat_memories_allocated);
1033 return kfree(ptr);
1034}
1035
1036void internal_vunmap(void *ptr)
1037{
1038 struct tf_device *dev = tf_get_device();
1039
1040 if (ptr != NULL)
1041 atomic_dec(
1042 &dev->stats.stat_memories_allocated);
1043
1044 vunmap((void *) (((unsigned int)ptr) & 0xFFFFF000));
1045}
1046
1047void *internal_vmalloc(size_t size)
1048{
1049 void *ptr;
1050 struct tf_device *dev = tf_get_device();
1051
1052 ptr = vmalloc(size);
1053
1054 if (ptr != NULL)
1055 atomic_inc(
1056 &dev->stats.stat_memories_allocated);
1057
1058 return ptr;
1059}
1060
1061void internal_vfree(void *ptr)
1062{
1063 struct tf_device *dev = tf_get_device();
1064
1065 if (ptr != NULL)
1066 atomic_dec(
1067 &dev->stats.stat_memories_allocated);
1068 return vfree(ptr);
1069}
1070
1071unsigned long internal_get_zeroed_page(int priority)
1072{
1073 unsigned long result;
1074 struct tf_device *dev = tf_get_device();
1075
1076 result = get_zeroed_page(priority);
1077
1078 if (result != 0)
1079 atomic_inc(&dev->stats.
1080 stat_pages_allocated);
1081
1082 return result;
1083}
1084
1085void internal_free_page(unsigned long addr)
1086{
1087 struct tf_device *dev = tf_get_device();
1088
1089 if (addr != 0)
1090 atomic_dec(
1091 &dev->stats.stat_pages_allocated);
1092 return free_page(addr);
1093}
1094
1095int internal_get_user_pages(
1096 struct task_struct *tsk,
1097 struct mm_struct *mm,
1098 unsigned long start,
1099 int len,
1100 int write,
1101 int force,
1102 struct page **pages,
1103 struct vm_area_struct **vmas)
1104{
1105 int result;
1106 struct tf_device *dev = tf_get_device();
1107
1108 result = get_user_pages(
1109 tsk,
1110 mm,
1111 start,
1112 len,
1113 write,
1114 force,
1115 pages,
1116 vmas);
1117
1118 if (result > 0)
1119 atomic_add(result,
1120 &dev->stats.stat_pages_locked);
1121
1122 return result;
1123}
1124
1125void internal_get_page(struct page *page)
1126{
1127 struct tf_device *dev = tf_get_device();
1128
1129 atomic_inc(&dev->stats.stat_pages_locked);
1130
1131 get_page(page);
1132}
1133
1134void internal_page_cache_release(struct page *page)
1135{
1136 struct tf_device *dev = tf_get_device();
1137
1138 atomic_dec(&dev->stats.stat_pages_locked);
1139
1140 page_cache_release(page);
1141}
1142
1143
diff --git a/security/tf_driver/tf_util.h b/security/tf_driver/tf_util.h
new file mode 100644
index 00000000000..14bc78952d8
--- /dev/null
+++ b/security/tf_driver/tf_util.h
@@ -0,0 +1,122 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#ifndef __TF_UTIL_H__
21#define __TF_UTIL_H__
22
23#include <linux/spinlock.h>
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/mm.h>
28#include <linux/crypto.h>
29#include <linux/mount.h>
30#include <linux/pagemap.h>
31#include <linux/vmalloc.h>
32#include <asm/byteorder.h>
33
34#include "tf_protocol.h"
35#include "tf_defs.h"
36
37/*----------------------------------------------------------------------------
38 * Debug printing routines
39 *----------------------------------------------------------------------------*/
40
41#ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT
42extern unsigned tf_debug_level;
43
44void address_cache_property(unsigned long va);
45
46#define dprintk(args...) ((void)(tf_debug_level >= 6 ? printk(args) : 0))
47#define dpr_info(args...) ((void)(tf_debug_level >= 3 ? pr_info(args) : 0))
48#define dpr_err(args...) ((void)(tf_debug_level >= 1 ? pr_err(args) : 0))
49#define INFO(fmt, args...) \
50 (void)dprintk(KERN_INFO "%s: " fmt "\n", __func__, ## args)
51#define WARNING(fmt, args...) \
52 (tf_debug_level >= 3 ? \
53 printk(KERN_WARNING "%s: " fmt "\n", __func__, ## args) : \
54 (void)0)
55#define ERROR(fmt, args...) \
56 (tf_debug_level >= 1 ? \
57 printk(KERN_ERR "%s: " fmt "\n", __func__, ## args) : \
58 (void)0)
59void tf_trace_array(const char *fun, const char *msg,
60 const void *ptr, size_t len);
61#define TF_TRACE_ARRAY(ptr, len) \
62 (tf_debug_level >= 7 ? \
63 tf_trace_array(__func__, #ptr "/" #len, ptr, len) : \
64 0)
65
66void tf_dump_l1_shared_buffer(struct tf_l1_shared_buffer *buffer);
67
68void tf_dump_command(union tf_command *command);
69
70void tf_dump_answer(union tf_answer *answer);
71
72#else /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
73
74#define dprintk(args...) do { ; } while (0)
75#define dpr_info(args...) do { ; } while (0)
76#define dpr_err(args...) do { ; } while (0)
77#define INFO(fmt, args...) ((void)0)
78#define WARNING(fmt, args...) ((void)0)
79#define ERROR(fmt, args...) ((void)0)
80#define TF_TRACE_ARRAY(ptr, len) ((void)(ptr), (void)(len))
81#define tf_dump_l1_shared_buffer(buffer) ((void) 0)
82#define tf_dump_command(command) ((void) 0)
83#define tf_dump_answer(answer) ((void) 0)
84
85#endif /* defined(CONFIG_TF_DRIVER_DEBUG_SUPPORT) */
86
87#define SHA1_DIGEST_SIZE 20
88
89/*----------------------------------------------------------------------------
90 * Process identification
91 *----------------------------------------------------------------------------*/
92
93int tf_get_current_process_hash(void *hash);
94
95#ifndef CONFIG_ANDROID
96int tf_hash_application_path_and_data(char *buffer, void *data, u32 data_len);
97#endif /* !CONFIG_ANDROID */
98
99/*----------------------------------------------------------------------------
100 * Statistic computation
101 *----------------------------------------------------------------------------*/
102
103void *internal_kmalloc(size_t size, int priority);
104void internal_kfree(void *ptr);
105void internal_vunmap(void *ptr);
106void *internal_vmalloc(size_t size);
107void internal_vfree(void *ptr);
108unsigned long internal_get_zeroed_page(int priority);
109void internal_free_page(unsigned long addr);
110int internal_get_user_pages(
111 struct task_struct *tsk,
112 struct mm_struct *mm,
113 unsigned long start,
114 int len,
115 int write,
116 int force,
117 struct page **pages,
118 struct vm_area_struct **vmas);
119void internal_get_page(struct page *page);
120void internal_page_cache_release(struct page *page);
121#endif /* __TF_UTIL_H__ */
122