aboutsummaryrefslogtreecommitdiffstats
path: root/security/tf_driver/tf_comm.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /security/tf_driver/tf_comm.c
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'security/tf_driver/tf_comm.c')
-rw-r--r--security/tf_driver/tf_comm.c1745
1 files changed, 1745 insertions, 0 deletions
diff --git a/security/tf_driver/tf_comm.c b/security/tf_driver/tf_comm.c
new file mode 100644
index 00000000000..16915beb406
--- /dev/null
+++ b/security/tf_driver/tf_comm.c
@@ -0,0 +1,1745 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#include <asm/div64.h>
21#include <asm/system.h>
22#include <linux/version.h>
23#include <asm/cputype.h>
24#include <linux/interrupt.h>
25#include <linux/page-flags.h>
26#include <linux/pagemap.h>
27#include <linux/vmalloc.h>
28#include <linux/jiffies.h>
29#include <linux/freezer.h>
30
31#include "tf_defs.h"
32#include "tf_comm.h"
33#include "tf_protocol.h"
34#include "tf_util.h"
35#include "tf_conn.h"
36
37#ifdef CONFIG_TF_ZEBRA
38#include "tf_zebra.h"
39#endif
40
41/*---------------------------------------------------------------------------
42 * Internal Constants
43 *---------------------------------------------------------------------------*/
44
45/*
46 * shared memories descriptor constants
47 */
48#define DESCRIPTOR_B_MASK (1 << 2)
49#define DESCRIPTOR_C_MASK (1 << 3)
50#define DESCRIPTOR_S_MASK (1 << 10)
51
52#define L1_COARSE_DESCRIPTOR_BASE (0x00000001)
53#define L1_COARSE_DESCRIPTOR_ADDR_MASK (0xFFFFFC00)
54#define L1_COARSE_DESCRIPTOR_V13_12_SHIFT (5)
55
56#define L2_PAGE_DESCRIPTOR_BASE (0x00000003)
57#define L2_PAGE_DESCRIPTOR_AP_APX_READ (0x220)
58#define L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE (0x30)
59
60#define L2_INIT_DESCRIPTOR_BASE (0x00000003)
61#define L2_INIT_DESCRIPTOR_V13_12_SHIFT (4)
62
63/*
64 * Reject an attempt to share a strongly-Ordered or Device memory
65 * Strongly-Ordered: TEX=0b000, C=0, B=0
66 * Shared Device: TEX=0b000, C=0, B=1
67 * Non-Shared Device: TEX=0b010, C=0, B=0
68 */
69#define L2_TEX_C_B_MASK \
70 ((1<<8) | (1<<7) | (1<<6) | (1<<3) | (1<<2))
71#define L2_TEX_C_B_STRONGLY_ORDERED \
72 ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (0<<2))
73#define L2_TEX_C_B_SHARED_DEVICE \
74 ((0<<8) | (0<<7) | (0<<6) | (0<<3) | (1<<2))
75#define L2_TEX_C_B_NON_SHARED_DEVICE \
76 ((0<<8) | (1<<7) | (0<<6) | (0<<3) | (0<<2))
77
78#define CACHE_S(x) ((x) & (1 << 24))
79#define CACHE_DSIZE(x) (((x) >> 12) & 4095)
80
81#define TIME_IMMEDIATE ((u64) 0x0000000000000000ULL)
82#define TIME_INFINITE ((u64) 0xFFFFFFFFFFFFFFFFULL)
83
84/*---------------------------------------------------------------------------
85 * atomic operation definitions
86 *---------------------------------------------------------------------------*/
87
88/*
89 * Atomically updates the sync_serial_n and time_n register
90 * sync_serial_n and time_n modifications are thread safe
91 */
92void tf_set_current_time(struct tf_comm *comm)
93{
94 u32 new_sync_serial;
95 struct timeval now;
96 u64 time64;
97
98 /*
99 * lock the structure while updating the L1 shared memory fields
100 */
101 spin_lock(&comm->lock);
102
103 /* read sync_serial_n and change the TimeSlot bit field */
104 new_sync_serial =
105 tf_read_reg32(&comm->l1_buffer->sync_serial_n) + 1;
106
107 do_gettimeofday(&now);
108 time64 = now.tv_sec;
109 time64 = (time64 * 1000) + (now.tv_usec / 1000);
110
111 /* Write the new time64 and nSyncSerial into shared memory */
112 tf_write_reg64(&comm->l1_buffer->time_n[new_sync_serial &
113 TF_SYNC_SERIAL_TIMESLOT_N], time64);
114 tf_write_reg32(&comm->l1_buffer->sync_serial_n,
115 new_sync_serial);
116
117 spin_unlock(&comm->lock);
118}
119
120/*
121 * Performs the specific read timeout operation
122 * The difficulty here is to read atomically 2 u32
123 * values from the L1 shared buffer.
124 * This is guaranteed by reading before and after the operation
125 * the timeslot given by the Secure World
126 */
127static inline void tf_read_timeout(struct tf_comm *comm, u64 *time)
128{
129 u32 sync_serial_s_initial = 0;
130 u32 sync_serial_s_final = 1;
131 u64 time64;
132
133 spin_lock(&comm->lock);
134
135 while (sync_serial_s_initial != sync_serial_s_final) {
136 sync_serial_s_initial = tf_read_reg32(
137 &comm->l1_buffer->sync_serial_s);
138 time64 = tf_read_reg64(
139 &comm->l1_buffer->timeout_s[sync_serial_s_initial&1]);
140
141 sync_serial_s_final = tf_read_reg32(
142 &comm->l1_buffer->sync_serial_s);
143 }
144
145 spin_unlock(&comm->lock);
146
147 *time = time64;
148}
149
150/*----------------------------------------------------------------------------
151 * SIGKILL signal handling
152 *----------------------------------------------------------------------------*/
153
154static bool sigkill_pending(void)
155{
156 if (signal_pending(current)) {
157 dprintk(KERN_INFO "A signal is pending\n");
158 if (sigismember(&current->pending.signal, SIGKILL)) {
159 dprintk(KERN_INFO "A SIGKILL is pending\n");
160 return true;
161 } else if (sigismember(
162 &current->signal->shared_pending.signal, SIGKILL)) {
163 dprintk(KERN_INFO "A SIGKILL is pending (shared)\n");
164 return true;
165 }
166 }
167 return false;
168}
169
170/*----------------------------------------------------------------------------
171 * Shared memory related operations
172 *----------------------------------------------------------------------------*/
173
174struct tf_coarse_page_table *tf_alloc_coarse_page_table(
175 struct tf_coarse_page_table_allocation_context *alloc_context,
176 u32 type)
177{
178 struct tf_coarse_page_table *coarse_pg_table = NULL;
179
180 spin_lock(&(alloc_context->lock));
181
182 if (!(list_empty(&(alloc_context->free_coarse_page_tables)))) {
183 /*
184 * The free list can provide us a coarse page table
185 * descriptor
186 */
187 coarse_pg_table = list_first_entry(
188 &alloc_context->free_coarse_page_tables,
189 struct tf_coarse_page_table, list);
190 list_del(&(coarse_pg_table->list));
191
192 coarse_pg_table->parent->ref_count++;
193 } else {
194 /* no array of coarse page tables, create a new one */
195 struct tf_coarse_page_table_array *array;
196 void *page;
197 int i;
198
199 spin_unlock(&(alloc_context->lock));
200
201 /* first allocate a new page descriptor */
202 array = internal_kmalloc(sizeof(*array), GFP_KERNEL);
203 if (array == NULL) {
204 dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
205 " failed to allocate a table array\n",
206 alloc_context);
207 return NULL;
208 }
209
210 array->type = type;
211 INIT_LIST_HEAD(&(array->list));
212
213 /* now allocate the actual page the page descriptor describes */
214 page = (void *) internal_get_zeroed_page(GFP_KERNEL);
215 if (page == NULL) {
216 dprintk(KERN_ERR "tf_alloc_coarse_page_table(%p):"
217 " failed allocate a page\n",
218 alloc_context);
219 internal_kfree(array);
220 return NULL;
221 }
222
223 spin_lock(&(alloc_context->lock));
224
225 /* initialize the coarse page table descriptors */
226 for (i = 0; i < 4; i++) {
227 INIT_LIST_HEAD(&(array->coarse_page_tables[i].list));
228 array->coarse_page_tables[i].descriptors =
229 page + (i * SIZE_1KB);
230 array->coarse_page_tables[i].parent = array;
231
232 if (i == 0) {
233 /*
234 * the first element is kept for the current
235 * coarse page table allocation
236 */
237 coarse_pg_table =
238 &(array->coarse_page_tables[i]);
239 array->ref_count = 1;
240 } else {
241 /*
242 * The other elements are added to the free list
243 */
244 list_add(&(array->coarse_page_tables[i].list),
245 &(alloc_context->
246 free_coarse_page_tables));
247 }
248 }
249
250 list_add(&(array->list),
251 &(alloc_context->coarse_page_table_arrays));
252 }
253 spin_unlock(&(alloc_context->lock));
254
255 return coarse_pg_table;
256}
257
258
259void tf_free_coarse_page_table(
260 struct tf_coarse_page_table_allocation_context *alloc_context,
261 struct tf_coarse_page_table *coarse_pg_table,
262 int force)
263{
264 struct tf_coarse_page_table_array *array;
265
266 spin_lock(&(alloc_context->lock));
267
268 array = coarse_pg_table->parent;
269
270 (array->ref_count)--;
271
272 if (array->ref_count == 0) {
273 /*
274 * no coarse page table descriptor is used
275 * check if we should free the whole page
276 */
277
278 if ((array->type == TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED)
279 && (force == 0))
280 /*
281 * This is a preallocated page,
282 * add the page back to the free list
283 */
284 list_add(&(coarse_pg_table->list),
285 &(alloc_context->free_coarse_page_tables));
286 else {
287 /*
288 * None of the page's coarse page table descriptors
289 * are in use, free the whole page
290 */
291 int i;
292 u32 *descriptors;
293
294 /*
295 * remove the page's associated coarse page table
296 * descriptors from the free list
297 */
298 for (i = 0; i < 4; i++)
299 if (&(array->coarse_page_tables[i]) !=
300 coarse_pg_table)
301 list_del(&(array->
302 coarse_page_tables[i].list));
303
304 descriptors =
305 array->coarse_page_tables[0].descriptors;
306 array->coarse_page_tables[0].descriptors = NULL;
307
308 /* remove the coarse page table from the array */
309 list_del(&(array->list));
310
311 spin_unlock(&(alloc_context->lock));
312 /*
313 * Free the page.
314 * The address of the page is contained in the first
315 * element
316 */
317 internal_free_page((unsigned long) descriptors);
318 /* finaly free the array */
319 internal_kfree(array);
320
321 spin_lock(&(alloc_context->lock));
322 }
323 } else {
324 /*
325 * Some coarse page table descriptors are in use.
326 * Add the descriptor to the free list
327 */
328 list_add(&(coarse_pg_table->list),
329 &(alloc_context->free_coarse_page_tables));
330 }
331
332 spin_unlock(&(alloc_context->lock));
333}
334
335
336void tf_init_coarse_page_table_allocator(
337 struct tf_coarse_page_table_allocation_context *alloc_context)
338{
339 spin_lock_init(&(alloc_context->lock));
340 INIT_LIST_HEAD(&(alloc_context->coarse_page_table_arrays));
341 INIT_LIST_HEAD(&(alloc_context->free_coarse_page_tables));
342}
343
344void tf_release_coarse_page_table_allocator(
345 struct tf_coarse_page_table_allocation_context *alloc_context)
346{
347 spin_lock(&(alloc_context->lock));
348
349 /* now clean up the list of page descriptors */
350 while (!list_empty(&(alloc_context->coarse_page_table_arrays))) {
351 struct tf_coarse_page_table_array *page_desc;
352 u32 *descriptors;
353
354 page_desc = list_first_entry(
355 &alloc_context->coarse_page_table_arrays,
356 struct tf_coarse_page_table_array, list);
357
358 descriptors = page_desc->coarse_page_tables[0].descriptors;
359 list_del(&(page_desc->list));
360
361 spin_unlock(&(alloc_context->lock));
362
363 if (descriptors != NULL)
364 internal_free_page((unsigned long)descriptors);
365
366 internal_kfree(page_desc);
367
368 spin_lock(&(alloc_context->lock));
369 }
370
371 spin_unlock(&(alloc_context->lock));
372}
373
374/*
375 * Returns the L1 coarse page descriptor for
376 * a coarse page table located at address coarse_pg_table_descriptors
377 */
378u32 tf_get_l1_coarse_descriptor(
379 u32 coarse_pg_table_descriptors[256])
380{
381 u32 descriptor = L1_COARSE_DESCRIPTOR_BASE;
382 unsigned int info = read_cpuid(CPUID_CACHETYPE);
383
384 descriptor |= (virt_to_phys((void *) coarse_pg_table_descriptors)
385 & L1_COARSE_DESCRIPTOR_ADDR_MASK);
386
387 if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11))) {
388 dprintk(KERN_DEBUG "tf_get_l1_coarse_descriptor "
389 "V31-12 added to descriptor\n");
390 /* the 16k alignment restriction applies */
391 descriptor |= (DESCRIPTOR_V13_12_GET(
392 (u32)coarse_pg_table_descriptors) <<
393 L1_COARSE_DESCRIPTOR_V13_12_SHIFT);
394 }
395
396 return descriptor;
397}
398
399
400#define dprintk_desc(...)
401/*
402 * Returns the L2 descriptor for the specified user page.
403 */
404u32 tf_get_l2_descriptor_common(u32 vaddr, struct mm_struct *mm)
405{
406 pgd_t *pgd;
407 pud_t *pud;
408 pmd_t *pmd;
409 pte_t *ptep;
410 u32 *hwpte;
411 u32 tex = 0;
412 u32 descriptor = 0;
413
414 dprintk_desc(KERN_INFO "VirtAddr = %x\n", vaddr);
415 pgd = pgd_offset(mm, vaddr);
416 dprintk_desc(KERN_INFO "pgd = %x, value=%x\n", (unsigned int) pgd,
417 (unsigned int) *pgd);
418 if (pgd_none(*pgd))
419 goto error;
420 pud = pud_offset(pgd, vaddr);
421 dprintk_desc(KERN_INFO "pud = %x, value=%x\n", (unsigned int) pud,
422 (unsigned int) *pud);
423 if (pud_none(*pud))
424 goto error;
425 pmd = pmd_offset(pud, vaddr);
426 dprintk_desc(KERN_INFO "pmd = %x, value=%x\n", (unsigned int) pmd,
427 (unsigned int) *pmd);
428 if (pmd_none(*pmd))
429 goto error;
430
431 if (PMD_TYPE_SECT&(*pmd)) {
432 /* We have a section */
433 dprintk_desc(KERN_INFO "Section descr=%x\n",
434 (unsigned int)*pmd);
435 if ((*pmd) & PMD_SECT_BUFFERABLE)
436 descriptor |= DESCRIPTOR_B_MASK;
437 if ((*pmd) & PMD_SECT_CACHEABLE)
438 descriptor |= DESCRIPTOR_C_MASK;
439 if ((*pmd) & PMD_SECT_S)
440 descriptor |= DESCRIPTOR_S_MASK;
441 tex = ((*pmd) >> 12) & 7;
442 } else {
443 /* We have a table */
444 ptep = pte_offset_map(pmd, vaddr);
445 if (pte_present(*ptep)) {
446 dprintk_desc(KERN_INFO "L2 descr=%x\n",
447 (unsigned int) *ptep);
448 if ((*ptep) & L_PTE_MT_BUFFERABLE)
449 descriptor |= DESCRIPTOR_B_MASK;
450 if ((*ptep) & L_PTE_MT_WRITETHROUGH)
451 descriptor |= DESCRIPTOR_C_MASK;
452 if ((*ptep) & L_PTE_MT_DEV_SHARED)
453 descriptor |= DESCRIPTOR_S_MASK;
454
455 /*
456 * Linux's pte doesn't keep track of TEX value.
457 * Have to jump to hwpte see include/asm/pgtable.h
458 * (-2k before 2.6.38, then +2k)
459 */
460#ifdef PTE_HWTABLE_SIZE
461 hwpte = (u32 *) (ptep+PTE_HWTABLE_PTRS);
462#else
463 hwpte = (u32 *) (ptep-PTRS_PER_PTE);
464#endif
465 if (((*hwpte) & L2_DESCRIPTOR_ADDR_MASK) !=
466 ((*ptep) & L2_DESCRIPTOR_ADDR_MASK))
467 goto error;
468 dprintk_desc(KERN_INFO "hw descr=%x\n", *hwpte);
469 tex = ((*hwpte) >> 6) & 7;
470 pte_unmap(ptep);
471 } else {
472 pte_unmap(ptep);
473 goto error;
474 }
475 }
476
477 descriptor |= (tex << 6);
478
479 return descriptor;
480
481error:
482 dprintk(KERN_ERR "Error occured in %s\n", __func__);
483 return 0;
484}
485
486
487/*
488 * Changes an L2 page descriptor back to a pointer to a physical page
489 */
490inline struct page *tf_l2_page_descriptor_to_page(u32 l2_page_descriptor)
491{
492 return pte_page(l2_page_descriptor & L2_DESCRIPTOR_ADDR_MASK);
493}
494
495
496/*
497 * Returns the L1 descriptor for the 1KB-aligned coarse page table. The address
498 * must be in the kernel address space.
499 */
500static void tf_get_l2_page_descriptor(
501 u32 *l2_page_descriptor,
502 u32 flags, struct mm_struct *mm)
503{
504 unsigned long page_vaddr;
505 u32 descriptor;
506 struct page *page;
507 bool unmap_page = false;
508
509#if 0
510 dprintk(KERN_INFO
511 "tf_get_l2_page_descriptor():"
512 "*l2_page_descriptor=%x\n",
513 *l2_page_descriptor);
514#endif
515
516 if (*l2_page_descriptor == L2_DESCRIPTOR_FAULT)
517 return;
518
519 page = (struct page *) (*l2_page_descriptor);
520
521 page_vaddr = (unsigned long) page_address(page);
522 if (page_vaddr == 0) {
523 dprintk(KERN_INFO "page_address returned 0\n");
524 /* Should we use kmap_atomic(page, KM_USER0) instead ? */
525 page_vaddr = (unsigned long) kmap(page);
526 if (page_vaddr == 0) {
527 *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
528 dprintk(KERN_ERR "kmap returned 0\n");
529 return;
530 }
531 unmap_page = true;
532 }
533
534 descriptor = tf_get_l2_descriptor_common(page_vaddr, mm);
535 if (descriptor == 0) {
536 *l2_page_descriptor = L2_DESCRIPTOR_FAULT;
537 return;
538 }
539 descriptor |= L2_PAGE_DESCRIPTOR_BASE;
540
541 descriptor |= (page_to_phys(page) & L2_DESCRIPTOR_ADDR_MASK);
542
543 if (!(flags & TF_SHMEM_TYPE_WRITE))
544 /* only read access */
545 descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ;
546 else
547 /* read and write access */
548 descriptor |= L2_PAGE_DESCRIPTOR_AP_APX_READ_WRITE;
549
550 if (unmap_page)
551 kunmap(page);
552
553 *l2_page_descriptor = descriptor;
554}
555
556
557/*
558 * Unlocks the physical memory pages
559 * and frees the coarse pages that need to
560 */
561void tf_cleanup_shared_memory(
562 struct tf_coarse_page_table_allocation_context *alloc_context,
563 struct tf_shmem_desc *shmem_desc,
564 u32 full_cleanup)
565{
566 u32 coarse_page_index;
567
568 dprintk(KERN_INFO "tf_cleanup_shared_memory(%p)\n",
569 shmem_desc);
570
571#ifdef DEBUG_COARSE_TABLES
572 printk(KERN_DEBUG "tf_cleanup_shared_memory "
573 "- number of coarse page tables=%d\n",
574 shmem_desc->coarse_pg_table_count);
575
576 for (coarse_page_index = 0;
577 coarse_page_index < shmem_desc->coarse_pg_table_count;
578 coarse_page_index++) {
579 u32 j;
580
581 printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
582 shmem_desc->coarse_pg_table[coarse_page_index],
583 shmem_desc->coarse_pg_table[coarse_page_index]->
584 descriptors,
585 coarse_page_index);
586 if (shmem_desc->coarse_pg_table[coarse_page_index] != NULL) {
587 for (j = 0;
588 j < TF_DESCRIPTOR_TABLE_CAPACITY;
589 j += 8) {
590 int k;
591 printk(KERN_DEBUG " ");
592 for (k = j; k < j + 8; k++)
593 printk(KERN_DEBUG "%p ",
594 shmem_desc->coarse_pg_table[
595 coarse_page_index]->
596 descriptors);
597 printk(KERN_DEBUG "\n");
598 }
599 }
600 }
601 printk(KERN_DEBUG "tf_cleanup_shared_memory() - done\n\n");
602#endif
603
604 /* Parse the coarse page descriptors */
605 for (coarse_page_index = 0;
606 coarse_page_index < shmem_desc->coarse_pg_table_count;
607 coarse_page_index++) {
608 u32 j;
609 u32 found = 0;
610
611 /* parse the page descriptors of the coarse page */
612 for (j = 0; j < TF_DESCRIPTOR_TABLE_CAPACITY; j++) {
613 u32 l2_page_descriptor = (u32) (shmem_desc->
614 coarse_pg_table[coarse_page_index]->
615 descriptors[j]);
616
617 if (l2_page_descriptor != L2_DESCRIPTOR_FAULT) {
618 struct page *page =
619 tf_l2_page_descriptor_to_page(
620 l2_page_descriptor);
621
622 if (!PageReserved(page))
623 SetPageDirty(page);
624 internal_page_cache_release(page);
625
626 found = 1;
627 } else if (found == 1) {
628 break;
629 }
630 }
631
632 /*
633 * Only free the coarse pages of descriptors not preallocated
634 */
635 if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
636 (full_cleanup != 0))
637 tf_free_coarse_page_table(alloc_context,
638 shmem_desc->coarse_pg_table[coarse_page_index],
639 0);
640 }
641
642 shmem_desc->coarse_pg_table_count = 0;
643 dprintk(KERN_INFO "tf_cleanup_shared_memory(%p) done\n",
644 shmem_desc);
645}
646
647/*
648 * Make sure the coarse pages are allocated. If not allocated, do it.
649 * Locks down the physical memory pages.
650 * Verifies the memory attributes depending on flags.
651 */
652int tf_fill_descriptor_table(
653 struct tf_coarse_page_table_allocation_context *alloc_context,
654 struct tf_shmem_desc *shmem_desc,
655 u32 buffer,
656 struct vm_area_struct **vmas,
657 u32 descriptors[TF_MAX_COARSE_PAGES],
658 u32 buffer_size,
659 u32 *buffer_start_offset,
660 bool in_user_space,
661 u32 flags,
662 u32 *descriptor_count)
663{
664 u32 coarse_page_index;
665 u32 coarse_page_count;
666 u32 page_count;
667 u32 page_shift = 0;
668 int ret = 0;
669 unsigned int info = read_cpuid(CPUID_CACHETYPE);
670
671 dprintk(KERN_INFO "tf_fill_descriptor_table"
672 "(%p, buffer=0x%08X, size=0x%08X, user=%01x "
673 "flags = 0x%08x)\n",
674 shmem_desc,
675 buffer,
676 buffer_size,
677 in_user_space,
678 flags);
679
680 /*
681 * Compute the number of pages
682 * Compute the number of coarse pages
683 * Compute the page offset
684 */
685 page_count = ((buffer & ~PAGE_MASK) +
686 buffer_size + ~PAGE_MASK) >> PAGE_SHIFT;
687
688 /* check whether the 16k alignment restriction applies */
689 if (CACHE_S(info) && (CACHE_DSIZE(info) & (1 << 11)))
690 /*
691 * The 16k alignment restriction applies.
692 * Shift data to get them 16k aligned
693 */
694 page_shift = DESCRIPTOR_V13_12_GET(buffer);
695 page_count += page_shift;
696
697
698 /*
699 * Check the number of pages fit in the coarse pages
700 */
701 if (page_count > (TF_DESCRIPTOR_TABLE_CAPACITY *
702 TF_MAX_COARSE_PAGES)) {
703 dprintk(KERN_ERR "tf_fill_descriptor_table(%p): "
704 "%u pages required to map shared memory!\n",
705 shmem_desc, page_count);
706 ret = -ENOMEM;
707 goto error;
708 }
709
710 /* coarse page describe 256 pages */
711 coarse_page_count = ((page_count +
712 TF_DESCRIPTOR_TABLE_CAPACITY_MASK) >>
713 TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT);
714
715 /*
716 * Compute the buffer offset
717 */
718 *buffer_start_offset = (buffer & ~PAGE_MASK) |
719 (page_shift << PAGE_SHIFT);
720
721 /* map each coarse page */
722 for (coarse_page_index = 0;
723 coarse_page_index < coarse_page_count;
724 coarse_page_index++) {
725 u32 j;
726 struct tf_coarse_page_table *coarse_pg_table;
727
728 /* compute a virtual address with appropriate offset */
729 u32 buffer_offset_vaddr = buffer +
730 (coarse_page_index * TF_MAX_COARSE_PAGE_MAPPED_SIZE);
731 u32 pages_to_get;
732
733 /*
734 * Compute the number of pages left for this coarse page.
735 * Decrement page_count each time
736 */
737 pages_to_get = (page_count >>
738 TF_DESCRIPTOR_TABLE_CAPACITY_BIT_SHIFT) ?
739 TF_DESCRIPTOR_TABLE_CAPACITY : page_count;
740 page_count -= pages_to_get;
741
742 /*
743 * Check if the coarse page has already been allocated
744 * If not, do it now
745 */
746 if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM)
747 || (shmem_desc->type ==
748 TF_SHMEM_TYPE_PM_HIBERNATE)) {
749 coarse_pg_table = tf_alloc_coarse_page_table(
750 alloc_context,
751 TF_PAGE_DESCRIPTOR_TYPE_NORMAL);
752
753 if (coarse_pg_table == NULL) {
754 dprintk(KERN_ERR
755 "tf_fill_descriptor_table(%p): "
756 "tf_alloc_coarse_page_table "
757 "failed for coarse page %d\n",
758 shmem_desc, coarse_page_index);
759 ret = -ENOMEM;
760 goto error;
761 }
762
763 shmem_desc->coarse_pg_table[coarse_page_index] =
764 coarse_pg_table;
765 } else {
766 coarse_pg_table =
767 shmem_desc->coarse_pg_table[coarse_page_index];
768 }
769
770 /*
771 * The page is not necessarily filled with zeroes.
772 * Set the fault descriptors ( each descriptor is 4 bytes long)
773 */
774 memset(coarse_pg_table->descriptors, 0x00,
775 TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
776
777 if (in_user_space) {
778 int pages;
779
780 /*
781 * TRICK: use pCoarsePageDescriptor->descriptors to
782 * hold the (struct page*) items before getting their
783 * physical address
784 */
785 down_read(&(current->mm->mmap_sem));
786 pages = internal_get_user_pages(
787 current,
788 current->mm,
789 buffer_offset_vaddr,
790 /*
791 * page_shift is cleared after retrieving first
792 * coarse page
793 */
794 (pages_to_get - page_shift),
795 (flags & TF_SHMEM_TYPE_WRITE) ? 1 : 0,
796 0,
797 (struct page **) (coarse_pg_table->descriptors
798 + page_shift),
799 vmas);
800 up_read(&(current->mm->mmap_sem));
801
802 if ((pages <= 0) ||
803 (pages != (pages_to_get - page_shift))) {
804 dprintk(KERN_ERR "tf_fill_descriptor_table:"
805 " get_user_pages got %d pages while "
806 "trying to get %d pages!\n",
807 pages, pages_to_get - page_shift);
808 ret = -EFAULT;
809 goto error;
810 }
811
812 for (j = page_shift;
813 j < page_shift + pages;
814 j++) {
815 /* Get the actual L2 descriptors */
816 tf_get_l2_page_descriptor(
817 &coarse_pg_table->descriptors[j],
818 flags,
819 current->mm);
820 /*
821 * Reject Strongly-Ordered or Device Memory
822 */
823#define IS_STRONGLY_ORDERED_OR_DEVICE_MEM(x) \
824 ((((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_STRONGLY_ORDERED) || \
825 (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_SHARED_DEVICE) || \
826 (((x) & L2_TEX_C_B_MASK) == L2_TEX_C_B_NON_SHARED_DEVICE))
827
828 if (IS_STRONGLY_ORDERED_OR_DEVICE_MEM(
829 coarse_pg_table->
830 descriptors[j])) {
831 dprintk(KERN_ERR
832 "tf_fill_descriptor_table:"
833 " descriptor 0x%08X use "
834 "strongly-ordered or device "
835 "memory. Rejecting!\n",
836 coarse_pg_table->
837 descriptors[j]);
838 ret = -EFAULT;
839 goto error;
840 }
841 }
842 } else if (is_vmalloc_addr((void *)buffer_offset_vaddr)) {
843 /* Kernel-space memory obtained through vmalloc */
844 dprintk(KERN_INFO
845 "tf_fill_descriptor_table: "
846 "vmalloc'ed buffer starting at %p\n",
847 (void *)buffer_offset_vaddr);
848 for (j = page_shift; j < pages_to_get; j++) {
849 struct page *page;
850 void *addr =
851 (void *)(buffer_offset_vaddr +
852 (j - page_shift) * PAGE_SIZE);
853 page = vmalloc_to_page(addr);
854 if (page == NULL) {
855 dprintk(KERN_ERR
856 "tf_fill_descriptor_table: "
857 "cannot map %p (vmalloc) "
858 "to page\n",
859 addr);
860 ret = -EFAULT;
861 goto error;
862 }
863 coarse_pg_table->descriptors[j] = (u32)page;
864 get_page(page);
865
866 /* change coarse page "page address" */
867 tf_get_l2_page_descriptor(
868 &coarse_pg_table->descriptors[j],
869 flags,
870 &init_mm);
871 }
872 } else {
873 /* Kernel-space memory given by a virtual address */
874 dprintk(KERN_INFO
875 "tf_fill_descriptor_table: "
876 "buffer starting at virtual address %p\n",
877 (void *)buffer_offset_vaddr);
878 for (j = page_shift; j < pages_to_get; j++) {
879 struct page *page;
880 void *addr =
881 (void *)(buffer_offset_vaddr +
882 (j - page_shift) * PAGE_SIZE);
883 page = virt_to_page(addr);
884 if (page == NULL) {
885 dprintk(KERN_ERR
886 "tf_fill_descriptor_table: "
887 "cannot map %p (virtual) "
888 "to page\n",
889 addr);
890 ret = -EFAULT;
891 goto error;
892 }
893 coarse_pg_table->descriptors[j] = (u32)page;
894 get_page(page);
895
896 /* change coarse page "page address" */
897 tf_get_l2_page_descriptor(
898 &coarse_pg_table->descriptors[j],
899 flags,
900 &init_mm);
901 }
902 }
903
904 dmac_flush_range((void *)coarse_pg_table->descriptors,
905 (void *)(((u32)(coarse_pg_table->descriptors)) +
906 TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32)));
907
908 outer_clean_range(
909 __pa(coarse_pg_table->descriptors),
910 __pa(coarse_pg_table->descriptors) +
911 TF_DESCRIPTOR_TABLE_CAPACITY * sizeof(u32));
912 wmb();
913
914 /* Update the coarse page table address */
915 descriptors[coarse_page_index] =
916 tf_get_l1_coarse_descriptor(
917 coarse_pg_table->descriptors);
918
919 /*
920 * The next coarse page has no page shift, reset the
921 * page_shift
922 */
923 page_shift = 0;
924 }
925
926 *descriptor_count = coarse_page_count;
927 shmem_desc->coarse_pg_table_count = coarse_page_count;
928
929#ifdef DEBUG_COARSE_TABLES
930 printk(KERN_DEBUG "ntf_fill_descriptor_table - size=0x%08X "
931 "numberOfCoarsePages=%d\n", buffer_size,
932 shmem_desc->coarse_pg_table_count);
933 for (coarse_page_index = 0;
934 coarse_page_index < shmem_desc->coarse_pg_table_count;
935 coarse_page_index++) {
936 u32 j;
937 struct tf_coarse_page_table *coarse_page_table =
938 shmem_desc->coarse_pg_table[coarse_page_index];
939
940 printk(KERN_DEBUG " Descriptor=%p address=%p index=%d\n",
941 coarse_page_table,
942 coarse_page_table->descriptors,
943 coarse_page_index);
944 for (j = 0;
945 j < TF_DESCRIPTOR_TABLE_CAPACITY;
946 j += 8) {
947 int k;
948 printk(KERN_DEBUG " ");
949 for (k = j; k < j + 8; k++)
950 printk(KERN_DEBUG "0x%08X ",
951 coarse_page_table->descriptors[k]);
952 printk(KERN_DEBUG "\n");
953 }
954 }
955 printk(KERN_DEBUG "ntf_fill_descriptor_table() - done\n\n");
956#endif
957
958 return 0;
959
960error:
961 tf_cleanup_shared_memory(
962 alloc_context,
963 shmem_desc,
964 0);
965
966 return ret;
967}
968
969
970/*----------------------------------------------------------------------------
971 * Standard communication operations
972 *----------------------------------------------------------------------------*/
973
974u8 *tf_get_description(struct tf_comm *comm)
975{
976 if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
977 return comm->l1_buffer->version_description;
978
979 return NULL;
980}
981
982/*
983 * Returns a non-zero value if the specified S-timeout has expired, zero
984 * otherwise.
985 *
986 * The placeholder referenced to by relative_timeout_jiffies gives the relative
987 * timeout from now in jiffies. It is set to zero if the S-timeout has expired,
988 * or to MAX_SCHEDULE_TIMEOUT if the S-timeout is infinite.
989 */
990static int tf_test_s_timeout(
991 u64 timeout,
992 signed long *relative_timeout_jiffies)
993{
994 struct timeval now;
995 u64 time64;
996
997 *relative_timeout_jiffies = 0;
998
999 /* immediate timeout */
1000 if (timeout == TIME_IMMEDIATE)
1001 return 1;
1002
1003 /* infinite timeout */
1004 if (timeout == TIME_INFINITE) {
1005 dprintk(KERN_DEBUG "tf_test_s_timeout: "
1006 "timeout is infinite\n");
1007 *relative_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
1008 return 0;
1009 }
1010
1011 do_gettimeofday(&now);
1012 time64 = now.tv_sec;
1013 /* will not overflow as operations are done on 64bit values */
1014 time64 = (time64 * 1000) + (now.tv_usec / 1000);
1015
1016 /* timeout expired */
1017 if (time64 >= timeout) {
1018 dprintk(KERN_DEBUG "tf_test_s_timeout: timeout expired\n");
1019 return 1;
1020 }
1021
1022 /*
1023 * finite timeout, compute relative_timeout_jiffies
1024 */
1025 /* will not overflow as time64 < timeout */
1026 timeout -= time64;
1027
1028 /* guarantee *relative_timeout_jiffies is a valid timeout */
1029 if ((timeout >> 32) != 0)
1030 *relative_timeout_jiffies = MAX_JIFFY_OFFSET;
1031 else
1032 *relative_timeout_jiffies =
1033 msecs_to_jiffies((unsigned int) timeout);
1034
1035 dprintk(KERN_DEBUG "tf_test_s_timeout: timeout is 0x%lx\n",
1036 *relative_timeout_jiffies);
1037 return 0;
1038}
1039
1040static void tf_copy_answers(struct tf_comm *comm)
1041{
1042 u32 first_answer;
1043 u32 first_free_answer;
1044 struct tf_answer_struct *answerStructureTemp;
1045
1046 if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
1047 spin_lock(&comm->lock);
1048 first_free_answer = tf_read_reg32(
1049 &comm->l1_buffer->first_free_answer);
1050 first_answer = tf_read_reg32(
1051 &comm->l1_buffer->first_answer);
1052
1053 while (first_answer != first_free_answer) {
1054 /* answer queue not empty */
1055 union tf_answer sComAnswer;
1056 struct tf_answer_header header;
1057
1058 /*
1059 * the size of the command in words of 32bit, not in
1060 * bytes
1061 */
1062 u32 command_size;
1063 u32 i;
1064 u32 *temp = (uint32_t *) &header;
1065
1066 dprintk(KERN_INFO
1067 "[pid=%d] tf_copy_answers(%p): "
1068 "Read answers from L1\n",
1069 current->pid, comm);
1070
1071 /* Read the answer header */
1072 for (i = 0;
1073 i < sizeof(struct tf_answer_header)/sizeof(u32);
1074 i++)
1075 temp[i] = comm->l1_buffer->answer_queue[
1076 (first_answer + i) %
1077 TF_S_ANSWER_QUEUE_CAPACITY];
1078
1079 /* Read the answer from the L1_Buffer*/
1080 command_size = header.message_size +
1081 sizeof(struct tf_answer_header)/sizeof(u32);
1082 temp = (uint32_t *) &sComAnswer;
1083 for (i = 0; i < command_size; i++)
1084 temp[i] = comm->l1_buffer->answer_queue[
1085 (first_answer + i) %
1086 TF_S_ANSWER_QUEUE_CAPACITY];
1087
1088 answerStructureTemp = (struct tf_answer_struct *)
1089 sComAnswer.header.operation_id;
1090
1091 tf_dump_answer(&sComAnswer);
1092
1093 memcpy(answerStructureTemp->answer, &sComAnswer,
1094 command_size * sizeof(u32));
1095 answerStructureTemp->answer_copied = true;
1096
1097 first_answer += command_size;
1098 tf_write_reg32(&comm->l1_buffer->first_answer,
1099 first_answer);
1100 }
1101 spin_unlock(&(comm->lock));
1102 }
1103}
1104
1105static void tf_copy_command(
1106 struct tf_comm *comm,
1107 union tf_command *command,
1108 struct tf_connection *connection,
1109 enum TF_COMMAND_STATE *command_status)
1110{
1111 if ((test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
1112 && (command != NULL)) {
1113 /*
1114 * Write the message in the message queue.
1115 */
1116
1117 if (*command_status == TF_COMMAND_STATE_PENDING) {
1118 u32 command_size;
1119 u32 queue_words_count;
1120 u32 i;
1121 u32 first_free_command;
1122 u32 first_command;
1123
1124 spin_lock(&comm->lock);
1125
1126 first_command = tf_read_reg32(
1127 &comm->l1_buffer->first_command);
1128 first_free_command = tf_read_reg32(
1129 &comm->l1_buffer->first_free_command);
1130
1131 queue_words_count = first_free_command - first_command;
1132 command_size = command->header.message_size +
1133 sizeof(struct tf_command_header)/sizeof(u32);
1134 if ((queue_words_count + command_size) <
1135 TF_N_MESSAGE_QUEUE_CAPACITY) {
1136 /*
1137 * Command queue is not full.
1138 * If the Command queue is full,
1139 * the command will be copied at
1140 * another iteration
1141 * of the current function.
1142 */
1143
1144 /*
1145 * Change the conn state
1146 */
1147 if (connection == NULL)
1148 goto copy;
1149
1150 spin_lock(&(connection->state_lock));
1151
1152 if ((connection->state ==
1153 TF_CONN_STATE_NO_DEVICE_CONTEXT)
1154 &&
1155 (command->header.message_type ==
1156 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
1157
1158 dprintk(KERN_INFO
1159 "tf_copy_command(%p):"
1160 "Conn state is DEVICE_CONTEXT_SENT\n",
1161 connection);
1162 connection->state =
1163 TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT;
1164 } else if ((connection->state !=
1165 TF_CONN_STATE_VALID_DEVICE_CONTEXT)
1166 &&
1167 (command->header.message_type !=
1168 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT)) {
1169 /* The connection
1170 * is no longer valid.
1171 * We may not send any command on it,
1172 * not even another
1173 * DESTROY_DEVICE_CONTEXT.
1174 */
1175 dprintk(KERN_INFO
1176 "[pid=%d] tf_copy_command(%p): "
1177 "Connection no longer valid."
1178 "ABORT\n",
1179 current->pid, connection);
1180 *command_status =
1181 TF_COMMAND_STATE_ABORTED;
1182 spin_unlock(
1183 &(connection->state_lock));
1184 spin_unlock(
1185 &comm->lock);
1186 return;
1187 } else if (
1188 (command->header.message_type ==
1189 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) &&
1190 (connection->state ==
1191 TF_CONN_STATE_VALID_DEVICE_CONTEXT)
1192 ) {
1193 dprintk(KERN_INFO
1194 "[pid=%d] tf_copy_command(%p): "
1195 "Conn state is "
1196 "DESTROY_DEVICE_CONTEXT_SENT\n",
1197 current->pid, connection);
1198 connection->state =
1199 TF_CONN_STATE_DESTROY_DEVICE_CONTEXT_SENT;
1200 }
1201 spin_unlock(&(connection->state_lock));
1202copy:
1203 /*
1204 * Copy the command to L1 Buffer
1205 */
1206 dprintk(KERN_INFO
1207 "[pid=%d] tf_copy_command(%p): "
1208 "Write Message in the queue\n",
1209 current->pid, command);
1210 tf_dump_command(command);
1211
1212 for (i = 0; i < command_size; i++)
1213 comm->l1_buffer->command_queue[
1214 (first_free_command + i) %
1215 TF_N_MESSAGE_QUEUE_CAPACITY] =
1216 ((uint32_t *) command)[i];
1217
1218 *command_status =
1219 TF_COMMAND_STATE_SENT;
1220 first_free_command += command_size;
1221
1222 tf_write_reg32(
1223 &comm->
1224 l1_buffer->first_free_command,
1225 first_free_command);
1226 }
1227 spin_unlock(&comm->lock);
1228 }
1229 }
1230}
1231
1232/*
1233 * Sends the specified message through the specified communication channel.
1234 *
1235 * This function sends the command and waits for the answer
1236 *
1237 * Returns zero upon successful completion, or an appropriate error code upon
1238 * failure.
1239 */
1240static int tf_send_recv(struct tf_comm *comm,
1241 union tf_command *command,
1242 struct tf_answer_struct *answerStruct,
1243 struct tf_connection *connection,
1244 int bKillable
1245 )
1246{
1247 int result;
1248 u64 timeout;
1249 signed long nRelativeTimeoutJiffies;
1250 bool wait_prepared = false;
1251 enum TF_COMMAND_STATE command_status = TF_COMMAND_STATE_PENDING;
1252 DEFINE_WAIT(wait);
1253#ifdef CONFIG_FREEZER
1254 unsigned long saved_flags;
1255#endif
1256 dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p)\n",
1257 current->pid, command);
1258
1259#ifdef CONFIG_TF_ZEBRA
1260 tf_clock_timer_start();
1261#endif
1262
1263#ifdef CONFIG_FREEZER
1264 saved_flags = current->flags;
1265 current->flags |= PF_FREEZER_NOSIG;
1266#endif
1267
1268 /*
1269 * Read all answers from the answer queue
1270 */
1271copy_answers:
1272 tf_copy_answers(comm);
1273
1274 tf_copy_command(comm, command, connection, &command_status);
1275
1276 /*
1277 * Notify all waiting threads
1278 */
1279 wake_up(&(comm->wait_queue));
1280
1281#ifdef CONFIG_FREEZER
1282 if (unlikely(freezing(current))) {
1283
1284 dprintk(KERN_INFO
1285 "Entering refrigerator.\n");
1286 refrigerator();
1287 dprintk(KERN_INFO
1288 "Left refrigerator.\n");
1289 goto copy_answers;
1290 }
1291#endif
1292
1293#ifndef CONFIG_PREEMPT
1294 if (need_resched())
1295 schedule();
1296#endif
1297
1298#ifdef CONFIG_TF_ZEBRA
1299 /*
1300 * Handle RPC (if any)
1301 */
1302 if (tf_rpc_execute(comm) == RPC_NON_YIELD)
1303 goto schedule_secure_world;
1304#endif
1305
1306 /*
1307 * Join wait queue
1308 */
1309 /*dprintk(KERN_INFO "[pid=%d] tf_send_recv(%p): Prepare to wait\n",
1310 current->pid, command);*/
1311 prepare_to_wait(&comm->wait_queue, &wait,
1312 bKillable ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1313 wait_prepared = true;
1314
1315 /*
1316 * Check if our answer is available
1317 */
1318 if (command_status == TF_COMMAND_STATE_ABORTED) {
1319 /* Not waiting for an answer, return error code */
1320 result = -EINTR;
1321 dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
1322 "Command status is ABORTED."
1323 "Exit with 0x%x\n",
1324 current->pid, result);
1325 goto exit;
1326 }
1327 if (answerStruct->answer_copied) {
1328 dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
1329 "Received answer (type 0x%02X)\n",
1330 current->pid,
1331 answerStruct->answer->header.message_type);
1332 result = 0;
1333 goto exit;
1334 }
1335
1336 /*
1337 * Check if a signal is pending
1338 */
1339 if (bKillable && (sigkill_pending())) {
1340 if (command_status == TF_COMMAND_STATE_PENDING)
1341 /*Command was not sent. */
1342 result = -EINTR;
1343 else
1344 /* Command was sent but no answer was received yet. */
1345 result = -EIO;
1346
1347 dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
1348 "Signal Pending. Return error %d\n",
1349 current->pid, result);
1350 goto exit;
1351 }
1352
1353 /*
1354 * Check if secure world is schedulable. It is schedulable if at
1355 * least one of the following conditions holds:
1356 * + it is still initializing (TF_COMM_FLAG_L1_SHARED_ALLOCATED
1357 * is not set);
1358 * + there is a command in the queue;
1359 * + the secure world timeout is zero.
1360 */
1361 if (test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) {
1362 u32 first_free_command;
1363 u32 first_command;
1364 spin_lock(&comm->lock);
1365 first_command = tf_read_reg32(
1366 &comm->l1_buffer->first_command);
1367 first_free_command = tf_read_reg32(
1368 &comm->l1_buffer->first_free_command);
1369 spin_unlock(&comm->lock);
1370 tf_read_timeout(comm, &timeout);
1371 if ((first_free_command == first_command) &&
1372 (tf_test_s_timeout(timeout,
1373 &nRelativeTimeoutJiffies) == 0))
1374 /*
1375 * If command queue is empty and if timeout has not
1376 * expired secure world is not schedulable
1377 */
1378 goto wait;
1379 }
1380
1381 finish_wait(&comm->wait_queue, &wait);
1382 wait_prepared = false;
1383
1384 /*
1385 * Yield to the Secure World
1386 */
1387#ifdef CONFIG_TF_ZEBRA
1388schedule_secure_world:
1389#endif
1390
1391 result = tf_schedule_secure_world(comm);
1392 if (result < 0)
1393 goto exit;
1394 goto copy_answers;
1395
1396wait:
1397 if (bKillable && (sigkill_pending())) {
1398 if (command_status == TF_COMMAND_STATE_PENDING)
1399 result = -EINTR; /* Command was not sent. */
1400 else
1401 /* Command was sent but no answer was received yet. */
1402 result = -EIO;
1403
1404 dprintk(KERN_ERR "[pid=%d] tf_send_recv: "
1405 "Signal Pending while waiting. Return error %d\n",
1406 current->pid, result);
1407 goto exit;
1408 }
1409
1410 if (nRelativeTimeoutJiffies == MAX_SCHEDULE_TIMEOUT)
1411 dprintk(KERN_INFO "[pid=%d] tf_send_recv: "
1412 "prepare to sleep infinitely\n", current->pid);
1413 else
1414 dprintk(KERN_INFO "tf_send_recv: "
1415 "prepare to sleep 0x%lx jiffies\n",
1416 nRelativeTimeoutJiffies);
1417
1418 /* go to sleep */
1419 if (schedule_timeout(nRelativeTimeoutJiffies) == 0)
1420 dprintk(KERN_INFO
1421 "tf_send_recv: timeout expired\n");
1422 else
1423 dprintk(KERN_INFO
1424 "tf_send_recv: signal delivered\n");
1425
1426 finish_wait(&comm->wait_queue, &wait);
1427 wait_prepared = false;
1428 goto copy_answers;
1429
1430exit:
1431 if (wait_prepared) {
1432 finish_wait(&comm->wait_queue, &wait);
1433 wait_prepared = false;
1434 }
1435
1436#ifdef CONFIG_FREEZER
1437 current->flags &= ~(PF_FREEZER_NOSIG);
1438 current->flags |= (saved_flags & PF_FREEZER_NOSIG);
1439#endif
1440
1441 return result;
1442}
1443
1444/*
1445 * Sends the specified message through the specified communication channel.
1446 *
1447 * This function sends the message and waits for the corresponding answer
1448 * It may return if a signal needs to be delivered.
1449 *
1450 * Returns zero upon successful completion, or an appropriate error code upon
1451 * failure.
1452 */
1453int tf_send_receive(struct tf_comm *comm,
1454 union tf_command *command,
1455 union tf_answer *answer,
1456 struct tf_connection *connection,
1457 bool bKillable)
1458{
1459 int error;
1460 struct tf_answer_struct answerStructure;
1461#ifdef CONFIG_SMP
1462 long ret_affinity;
1463 cpumask_t saved_cpu_mask;
1464 cpumask_t local_cpu_mask = CPU_MASK_NONE;
1465#endif
1466
1467 answerStructure.answer = answer;
1468 answerStructure.answer_copied = false;
1469
1470 if (command != NULL)
1471 command->header.operation_id = (u32) &answerStructure;
1472
1473 dprintk(KERN_INFO "tf_send_receive\n");
1474
1475#ifdef CONFIG_TF_ZEBRA
1476 if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
1477 dprintk(KERN_ERR "tf_send_receive(%p): "
1478 "Secure world not started\n", comm);
1479
1480 return -EFAULT;
1481 }
1482#endif
1483
1484 if (test_bit(TF_COMM_FLAG_TERMINATING, &(comm->flags)) != 0) {
1485 dprintk(KERN_DEBUG
1486 "tf_send_receive: Flag Terminating is set\n");
1487 return 0;
1488 }
1489
1490#ifdef CONFIG_SMP
1491 cpu_set(0, local_cpu_mask);
1492 sched_getaffinity(0, &saved_cpu_mask);
1493 ret_affinity = sched_setaffinity(0, &local_cpu_mask);
1494 if (ret_affinity != 0)
1495 dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity);
1496#endif
1497
1498
1499 /*
1500 * Send the command
1501 */
1502 error = tf_send_recv(comm,
1503 command, &answerStructure, connection, bKillable);
1504
1505 if (!bKillable && sigkill_pending()) {
1506 if ((command->header.message_type ==
1507 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) &&
1508 (answer->create_device_context.error_code ==
1509 S_SUCCESS)) {
1510
1511 /*
1512 * CREATE_DEVICE_CONTEXT was interrupted.
1513 */
1514 dprintk(KERN_INFO "tf_send_receive: "
1515 "sending DESTROY_DEVICE_CONTEXT\n");
1516 answerStructure.answer = answer;
1517 answerStructure.answer_copied = false;
1518
1519 command->header.message_type =
1520 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
1521 command->header.message_size =
1522 (sizeof(struct
1523 tf_command_destroy_device_context) -
1524 sizeof(struct tf_command_header))/sizeof(u32);
1525 command->header.operation_id =
1526 (u32) &answerStructure;
1527 command->destroy_device_context.device_context =
1528 answer->create_device_context.
1529 device_context;
1530
1531 goto destroy_context;
1532 }
1533 }
1534
1535 if (error == 0) {
1536 /*
1537 * tf_send_recv returned Success.
1538 */
1539 if (command->header.message_type ==
1540 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT) {
1541 spin_lock(&(connection->state_lock));
1542 connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
1543 spin_unlock(&(connection->state_lock));
1544 } else if (command->header.message_type ==
1545 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
1546 spin_lock(&(connection->state_lock));
1547 connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1548 spin_unlock(&(connection->state_lock));
1549 }
1550 } else if (error == -EINTR) {
1551 /*
1552 * No command was sent, return failure.
1553 */
1554 dprintk(KERN_ERR
1555 "tf_send_receive: "
1556 "tf_send_recv failed (error %d) !\n",
1557 error);
1558 } else if (error == -EIO) {
1559 /*
1560 * A command was sent but its answer is still pending.
1561 */
1562
1563 /* means bKillable is true */
1564 dprintk(KERN_ERR
1565 "tf_send_receive: "
1566 "tf_send_recv interrupted (error %d)."
1567 "Send DESTROY_DEVICE_CONTEXT.\n", error);
1568
1569 /* Send the DESTROY_DEVICE_CONTEXT. */
1570 answerStructure.answer = answer;
1571 answerStructure.answer_copied = false;
1572
1573 command->header.message_type =
1574 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
1575 command->header.message_size =
1576 (sizeof(struct tf_command_destroy_device_context) -
1577 sizeof(struct tf_command_header))/sizeof(u32);
1578 command->header.operation_id =
1579 (u32) &answerStructure;
1580 command->destroy_device_context.device_context =
1581 connection->device_context;
1582
1583 error = tf_send_recv(comm,
1584 command, &answerStructure, connection, false);
1585 if (error == -EINTR) {
1586 /*
1587 * Another thread already sent
1588 * DESTROY_DEVICE_CONTEXT.
1589 * We must still wait for the answer
1590 * to the original command.
1591 */
1592 command = NULL;
1593 goto destroy_context;
1594 } else {
1595 /* An answer was received.
1596 * Check if it is the answer
1597 * to the DESTROY_DEVICE_CONTEXT.
1598 */
1599 spin_lock(&comm->lock);
1600 if (answer->header.message_type !=
1601 TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT) {
1602 answerStructure.answer_copied = false;
1603 }
1604 spin_unlock(&comm->lock);
1605 if (!answerStructure.answer_copied) {
1606 /* Answer to DESTROY_DEVICE_CONTEXT
1607 * was not yet received.
1608 * Wait for the answer.
1609 */
1610 dprintk(KERN_INFO
1611 "[pid=%d] tf_send_receive:"
1612 "Answer to DESTROY_DEVICE_CONTEXT"
1613 "not yet received.Retry\n",
1614 current->pid);
1615 command = NULL;
1616 goto destroy_context;
1617 }
1618 }
1619 }
1620
1621 dprintk(KERN_INFO "tf_send_receive(): Message answer ready\n");
1622 goto exit;
1623
1624destroy_context:
1625 error = tf_send_recv(comm,
1626 command, &answerStructure, connection, false);
1627
1628 /*
1629 * tf_send_recv cannot return an error because
1630 * it's not killable and not within a connection
1631 */
1632 BUG_ON(error != 0);
1633
1634 /* Reset the state, so a new CREATE DEVICE CONTEXT can be sent */
1635 spin_lock(&(connection->state_lock));
1636 connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1637 spin_unlock(&(connection->state_lock));
1638
1639exit:
1640
1641#ifdef CONFIG_SMP
1642 ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
1643 if (ret_affinity != 0)
1644 dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity);
1645#endif
1646 return error;
1647}
1648
1649/*----------------------------------------------------------------------------
1650 * Power management
1651 *----------------------------------------------------------------------------*/
1652
1653
1654/*
1655 * Handles all the power management calls.
1656 * The operation is the type of power management
1657 * operation to be performed.
1658 *
1659 * This routine will only return if a failure occured or if
1660 * the required opwer management is of type "resume".
1661 * "Hibernate" and "Shutdown" should lock when doing the
1662 * corresponding SMC to the Secure World
1663 */
1664int tf_power_management(struct tf_comm *comm,
1665 enum TF_POWER_OPERATION operation)
1666{
1667 u32 status;
1668 int error = 0;
1669
1670 dprintk(KERN_INFO "tf_power_management(%d)\n", operation);
1671
1672#ifdef CONFIG_TF_ZEBRA
1673 if (!test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
1674 dprintk(KERN_INFO "tf_power_management(%p): "
1675 "succeeded (not started)\n", comm);
1676
1677 return 0;
1678 }
1679#endif
1680
1681 status = ((tf_read_reg32(&(comm->l1_buffer->status_s))
1682 & TF_STATUS_POWER_STATE_MASK)
1683 >> TF_STATUS_POWER_STATE_SHIFT);
1684
1685 switch (operation) {
1686 case TF_POWER_OPERATION_SHUTDOWN:
1687 switch (status) {
1688 case TF_POWER_MODE_ACTIVE:
1689 error = tf_pm_shutdown(comm);
1690
1691 if (error) {
1692 dprintk(KERN_ERR "tf_power_management(): "
1693 "Failed with error code 0x%08x\n",
1694 error);
1695 goto error;
1696 }
1697 break;
1698
1699 default:
1700 goto not_allowed;
1701 }
1702 break;
1703
1704 case TF_POWER_OPERATION_HIBERNATE:
1705 switch (status) {
1706 case TF_POWER_MODE_ACTIVE:
1707 error = tf_pm_hibernate(comm);
1708
1709 if (error) {
1710 dprintk(KERN_ERR "tf_power_management(): "
1711 "Failed with error code 0x%08x\n",
1712 error);
1713 goto error;
1714 }
1715 break;
1716
1717 default:
1718 goto not_allowed;
1719 }
1720 break;
1721
1722 case TF_POWER_OPERATION_RESUME:
1723 error = tf_pm_resume(comm);
1724
1725 if (error != 0) {
1726 dprintk(KERN_ERR "tf_power_management(): "
1727 "Failed with error code 0x%08x\n",
1728 error);
1729 goto error;
1730 }
1731 break;
1732 }
1733
1734 dprintk(KERN_INFO "tf_power_management(): succeeded\n");
1735 return 0;
1736
1737not_allowed:
1738 dprintk(KERN_ERR "tf_power_management(): "
1739 "Power command not allowed in current "
1740 "Secure World state %d\n", status);
1741 error = -ENOTTY;
1742error:
1743 return error;
1744}
1745