aboutsummaryrefslogtreecommitdiffstats
path: root/security/tf_driver/tf_conn.c
diff options
context:
space:
mode:
Diffstat (limited to 'security/tf_driver/tf_conn.c')
-rw-r--r--security/tf_driver/tf_conn.c1574
1 files changed, 1574 insertions, 0 deletions
diff --git a/security/tf_driver/tf_conn.c b/security/tf_driver/tf_conn.c
new file mode 100644
index 00000000000..3148fec4635
--- /dev/null
+++ b/security/tf_driver/tf_conn.c
@@ -0,0 +1,1574 @@
1/**
2 * Copyright (c) 2011 Trusted Logic S.A.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
17 * MA 02111-1307 USA
18 */
19
20#include <linux/atomic.h>
21#include <linux/uaccess.h>
22#include <linux/delay.h>
23#include <linux/errno.h>
24#include <linux/list.h>
25#include <linux/mm.h>
26#include <linux/pagemap.h>
27#include <linux/stddef.h>
28#include <linux/types.h>
29
30#include "s_version.h"
31
32#include "tf_protocol.h"
33#include "tf_defs.h"
34#include "tf_util.h"
35#include "tf_comm.h"
36#include "tf_conn.h"
37
38#ifdef CONFIG_TF_ZEBRA
39#include "tf_crypto.h"
40#endif
41
42#ifdef CONFIG_ANDROID
43#define TF_PRIVILEGED_UID_GID 1000 /* Android system AID */
44#else
45#define TF_PRIVILEGED_UID_GID 0
46#endif
47
48/*----------------------------------------------------------------------------
49 * Management of the shared memory blocks.
50 *
51 * Shared memory blocks are the blocks registered through
52 * the commands REGISTER_SHARED_MEMORY and POWER_MANAGEMENT
53 *----------------------------------------------------------------------------*/
54
55/**
56 * Unmaps a shared memory
57 **/
58void tf_unmap_shmem(
59 struct tf_connection *connection,
60 struct tf_shmem_desc *shmem_desc,
61 u32 full_cleanup)
62{
63 /* check shmem_desc contains a descriptor */
64 if (shmem_desc == NULL)
65 return;
66
67 dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc);
68
69retry:
70 mutex_lock(&(connection->shmem_mutex));
71 if (atomic_read(&shmem_desc->ref_count) > 1) {
72 /*
73 * Shared mem still in use, wait for other operations completion
74 * before actually unmapping it.
75 */
76 dprintk(KERN_INFO "Descriptor in use\n");
77 mutex_unlock(&(connection->shmem_mutex));
78 schedule();
79 goto retry;
80 }
81
82 tf_cleanup_shared_memory(
83 &(connection->cpt_alloc_context),
84 shmem_desc,
85 full_cleanup);
86
87 list_del(&(shmem_desc->list));
88
89 if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
90 (full_cleanup != 0)) {
91 internal_kfree(shmem_desc);
92
93 atomic_dec(&(connection->shmem_count));
94 } else {
95 /*
96 * This is a preallocated shared memory, add to free list
97 * Since the device context is unmapped last, it is
98 * always the first element of the free list if no
99 * device context has been created
100 */
101 shmem_desc->block_identifier = 0;
102 list_add(&(shmem_desc->list), &(connection->free_shmem_list));
103 }
104
105 mutex_unlock(&(connection->shmem_mutex));
106}
107
108
109/**
110 * Find the first available slot for a new block of shared memory
111 * and map the user buffer.
112 * Update the descriptors to L1 descriptors
113 * Update the buffer_start_offset and buffer_size fields
114 * shmem_desc is updated to the mapped shared memory descriptor
115 **/
116int tf_map_shmem(
117 struct tf_connection *connection,
118 u32 buffer,
119 /* flags for read-write access rights on the memory */
120 u32 flags,
121 bool in_user_space,
122 u32 descriptors[TF_MAX_COARSE_PAGES],
123 u32 *buffer_start_offset,
124 u32 buffer_size,
125 struct tf_shmem_desc **shmem_desc,
126 u32 *descriptor_count)
127{
128 struct tf_shmem_desc *desc = NULL;
129 int error;
130
131 dprintk(KERN_INFO "tf_map_shmem(%p, %p, flags = 0x%08x)\n",
132 connection,
133 (void *) buffer,
134 flags);
135
136 mutex_lock(&(connection->shmem_mutex));
137
138 /*
139 * Check the list of free shared memory
140 * is not empty
141 */
142 if (list_empty(&(connection->free_shmem_list))) {
143 if (atomic_read(&(connection->shmem_count)) ==
144 TF_SHMEM_MAX_COUNT) {
145 printk(KERN_ERR "tf_map_shmem(%p):"
146 " maximum shared memories already registered\n",
147 connection);
148 error = -ENOMEM;
149 goto error;
150 }
151
152 /* no descriptor available, allocate a new one */
153
154 desc = (struct tf_shmem_desc *) internal_kmalloc(
155 sizeof(*desc), GFP_KERNEL);
156 if (desc == NULL) {
157 printk(KERN_ERR "tf_map_shmem(%p):"
158 " failed to allocate descriptor\n",
159 connection);
160 error = -ENOMEM;
161 goto error;
162 }
163
164 /* Initialize the structure */
165 desc->type = TF_SHMEM_TYPE_REGISTERED_SHMEM;
166 atomic_set(&desc->ref_count, 1);
167 INIT_LIST_HEAD(&(desc->list));
168
169 atomic_inc(&(connection->shmem_count));
170 } else {
171 /* take the first free shared memory descriptor */
172 desc = list_first_entry(&(connection->free_shmem_list),
173 struct tf_shmem_desc, list);
174 list_del(&(desc->list));
175 }
176
177 /* Add the descriptor to the used list */
178 list_add(&(desc->list), &(connection->used_shmem_list));
179
180 error = tf_fill_descriptor_table(
181 &(connection->cpt_alloc_context),
182 desc,
183 buffer,
184 connection->vmas,
185 descriptors,
186 buffer_size,
187 buffer_start_offset,
188 in_user_space,
189 flags,
190 descriptor_count);
191
192 if (error != 0) {
193 dprintk(KERN_ERR "tf_map_shmem(%p):"
194 " tf_fill_descriptor_table failed with error "
195 "code %d!\n",
196 connection,
197 error);
198 goto error;
199 }
200 desc->client_buffer = (u8 *) buffer;
201
202 /*
203 * Successful completion.
204 */
205 *shmem_desc = desc;
206 mutex_unlock(&(connection->shmem_mutex));
207 dprintk(KERN_DEBUG "tf_map_shmem: success\n");
208 return 0;
209
210
211 /*
212 * Error handling.
213 */
214error:
215 mutex_unlock(&(connection->shmem_mutex));
216 dprintk(KERN_ERR "tf_map_shmem: failure with error code %d\n",
217 error);
218
219 tf_unmap_shmem(
220 connection,
221 desc,
222 0);
223
224 return error;
225}
226
227
228
229/* This function is a copy of the find_vma() function
230in linux kernel 2.6.15 version with some fixes :
231 - memory block may end on vm_end
232 - check the full memory block is in the memory area
233 - guarantee NULL is returned if no memory area is found */
234struct vm_area_struct *tf_find_vma(struct mm_struct *mm,
235 unsigned long addr, unsigned long size)
236{
237 struct vm_area_struct *vma = NULL;
238
239 dprintk(KERN_INFO
240 "tf_find_vma addr=0x%lX size=0x%lX\n", addr, size);
241
242 if (mm) {
243 /* Check the cache first. */
244 /* (Cache hit rate is typically around 35%.) */
245 vma = mm->mmap_cache;
246 if (!(vma && vma->vm_end >= (addr+size) &&
247 vma->vm_start <= addr)) {
248 struct rb_node *rb_node;
249
250 rb_node = mm->mm_rb.rb_node;
251 vma = NULL;
252
253 while (rb_node) {
254 struct vm_area_struct *vma_tmp;
255
256 vma_tmp = rb_entry(rb_node,
257 struct vm_area_struct, vm_rb);
258
259 dprintk(KERN_INFO
260 "vma_tmp->vm_start=0x%lX"
261 "vma_tmp->vm_end=0x%lX\n",
262 vma_tmp->vm_start,
263 vma_tmp->vm_end);
264
265 if (vma_tmp->vm_end >= (addr+size)) {
266 vma = vma_tmp;
267 if (vma_tmp->vm_start <= addr)
268 break;
269
270 rb_node = rb_node->rb_left;
271 } else {
272 rb_node = rb_node->rb_right;
273 }
274 }
275
276 if (vma)
277 mm->mmap_cache = vma;
278 if (rb_node == NULL)
279 vma = NULL;
280 }
281 }
282 return vma;
283}
284
285int tf_validate_shmem_and_flags(
286 u32 shmem,
287 u32 shmem_size,
288 u32 flags)
289{
290 struct vm_area_struct *vma;
291 u32 chunk;
292
293 if (shmem_size == 0)
294 /* This is always valid */
295 return 0;
296
297 if ((shmem + shmem_size) < shmem)
298 /* Overflow */
299 return -EINVAL;
300
301 down_read(&current->mm->mmap_sem);
302
303 /*
304 * When looking for a memory address, split buffer into chunks of
305 * size=PAGE_SIZE.
306 */
307 chunk = PAGE_SIZE - (shmem & (PAGE_SIZE-1));
308 if (chunk > shmem_size)
309 chunk = shmem_size;
310
311 do {
312 vma = tf_find_vma(current->mm, shmem, chunk);
313
314 if (vma == NULL) {
315 dprintk(KERN_ERR "%s: area not found\n", __func__);
316 goto error;
317 }
318
319 if (flags & TF_SHMEM_TYPE_READ)
320 if (!(vma->vm_flags & VM_READ)) {
321 dprintk(KERN_ERR "%s: no read permission\n",
322 __func__);
323 goto error;
324 }
325 if (flags & TF_SHMEM_TYPE_WRITE)
326 if (!(vma->vm_flags & VM_WRITE)) {
327 dprintk(KERN_ERR "%s: no write permission\n",
328 __func__);
329 goto error;
330 }
331
332 shmem_size -= chunk;
333 shmem += chunk;
334 chunk = (shmem_size <= PAGE_SIZE ?
335 shmem_size : PAGE_SIZE);
336 } while (shmem_size != 0);
337
338 up_read(&current->mm->mmap_sem);
339 return 0;
340
341error:
342 up_read(&current->mm->mmap_sem);
343 return -EFAULT;
344}
345
346
347static int tf_map_temp_shmem(struct tf_connection *connection,
348 struct tf_command_param_temp_memref *temp_memref,
349 u32 param_type,
350 struct tf_shmem_desc **shmem_desc)
351{
352 u32 flags;
353 u32 error = S_SUCCESS;
354 bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
355
356 dprintk(KERN_INFO "tf_map_temp_shmem(%p, "
357 "0x%08x[size=0x%08x], offset=0x%08x)\n",
358 connection,
359 temp_memref->descriptor,
360 temp_memref->size,
361 temp_memref->offset);
362
363 switch (param_type) {
364 case TF_PARAM_TYPE_MEMREF_TEMP_INPUT:
365 flags = TF_SHMEM_TYPE_READ;
366 break;
367 case TF_PARAM_TYPE_MEMREF_TEMP_OUTPUT:
368 flags = TF_SHMEM_TYPE_WRITE;
369 break;
370 case TF_PARAM_TYPE_MEMREF_TEMP_INOUT:
371 flags = TF_SHMEM_TYPE_WRITE | TF_SHMEM_TYPE_READ;
372 break;
373 default:
374 error = -EINVAL;
375 goto error;
376 }
377
378 if (temp_memref->descriptor == 0) {
379 /* NULL tmpref */
380 temp_memref->offset = 0;
381 *shmem_desc = NULL;
382 } else if ((temp_memref->descriptor != 0) &&
383 (temp_memref->size == 0)) {
384 /* Empty tmpref */
385 temp_memref->offset = temp_memref->descriptor;
386 temp_memref->descriptor = 0;
387 temp_memref->size = 0;
388 *shmem_desc = NULL;
389 } else {
390 /* Map the temp shmem block */
391
392 u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
393 u32 descriptor_count;
394
395 if (in_user_space) {
396 error = tf_validate_shmem_and_flags(
397 temp_memref->descriptor,
398 temp_memref->size,
399 flags);
400 if (error != 0)
401 goto error;
402 }
403
404 error = tf_map_shmem(
405 connection,
406 temp_memref->descriptor,
407 flags,
408 in_user_space,
409 shared_mem_descriptors,
410 &(temp_memref->offset),
411 temp_memref->size,
412 shmem_desc,
413 &descriptor_count);
414 temp_memref->descriptor = shared_mem_descriptors[0];
415 }
416
417error:
418 return error;
419}
420
421/*
422 * Clean up a list of shared memory descriptors.
423 */
424static void tf_shared_memory_cleanup_list(
425 struct tf_connection *connection,
426 struct list_head *shmem_desc_list)
427{
428 while (!list_empty(shmem_desc_list)) {
429 struct tf_shmem_desc *shmem_desc;
430
431 shmem_desc = list_first_entry(shmem_desc_list,
432 struct tf_shmem_desc, list);
433
434 tf_unmap_shmem(connection, shmem_desc, 1);
435 }
436}
437
438
439/*
440 * Clean up the shared memory information in the connection.
441 * Releases all allocated pages.
442 */
443static void tf_cleanup_shared_memories(struct tf_connection *connection)
444{
445 /* clean up the list of used and free descriptors.
446 * done outside the mutex, because tf_unmap_shmem already
447 * mutex()ed
448 */
449 tf_shared_memory_cleanup_list(connection,
450 &connection->used_shmem_list);
451 tf_shared_memory_cleanup_list(connection,
452 &connection->free_shmem_list);
453
454 mutex_lock(&(connection->shmem_mutex));
455
456 /* Free the Vmas page */
457 if (connection->vmas) {
458 internal_free_page((unsigned long) connection->vmas);
459 connection->vmas = NULL;
460 }
461
462 tf_release_coarse_page_table_allocator(
463 &(connection->cpt_alloc_context));
464
465 mutex_unlock(&(connection->shmem_mutex));
466}
467
468
469/*
470 * Initialize the shared memory in a connection.
471 * Allocates the minimum memory to be provided
472 * for shared memory management
473 */
474int tf_init_shared_memory(struct tf_connection *connection)
475{
476 int error;
477 int i;
478 int coarse_page_index;
479
480 /*
481 * We only need to initialize special elements and attempt to allocate
482 * the minimum shared memory descriptors we want to support
483 */
484
485 mutex_init(&(connection->shmem_mutex));
486 INIT_LIST_HEAD(&(connection->free_shmem_list));
487 INIT_LIST_HEAD(&(connection->used_shmem_list));
488 atomic_set(&(connection->shmem_count), 0);
489
490 tf_init_coarse_page_table_allocator(
491 &(connection->cpt_alloc_context));
492
493
494 /*
495 * Preallocate 3 pages to increase the chances that a connection
496 * succeeds in allocating shared mem
497 */
498 for (i = 0;
499 i < 3;
500 i++) {
501 struct tf_shmem_desc *shmem_desc =
502 (struct tf_shmem_desc *) internal_kmalloc(
503 sizeof(*shmem_desc), GFP_KERNEL);
504
505 if (shmem_desc == NULL) {
506 printk(KERN_ERR "tf_init_shared_memory(%p):"
507 " failed to pre allocate descriptor %d\n",
508 connection,
509 i);
510 error = -ENOMEM;
511 goto error;
512 }
513
514 for (coarse_page_index = 0;
515 coarse_page_index < TF_MAX_COARSE_PAGES;
516 coarse_page_index++) {
517 struct tf_coarse_page_table *coarse_pg_table;
518
519 coarse_pg_table = tf_alloc_coarse_page_table(
520 &(connection->cpt_alloc_context),
521 TF_PAGE_DESCRIPTOR_TYPE_PREALLOCATED);
522
523 if (coarse_pg_table == NULL) {
524 printk(KERN_ERR "tf_init_shared_memory(%p)"
525 ": descriptor %d coarse page %d - "
526 "tf_alloc_coarse_page_table() "
527 "failed\n",
528 connection,
529 i,
530 coarse_page_index);
531 error = -ENOMEM;
532 goto error;
533 }
534
535 shmem_desc->coarse_pg_table[coarse_page_index] =
536 coarse_pg_table;
537 }
538 shmem_desc->coarse_pg_table_count = 0;
539
540 shmem_desc->type = TF_SHMEM_TYPE_PREALLOC_REGISTERED_SHMEM;
541 atomic_set(&shmem_desc->ref_count, 1);
542
543 /*
544 * add this preallocated descriptor to the list of free
545 * descriptors Keep the device context specific one at the
546 * beginning of the list
547 */
548 INIT_LIST_HEAD(&(shmem_desc->list));
549 list_add_tail(&(shmem_desc->list),
550 &(connection->free_shmem_list));
551 }
552
553 /* allocate memory for the vmas structure */
554 connection->vmas =
555 (struct vm_area_struct **) internal_get_zeroed_page(GFP_KERNEL);
556 if (connection->vmas == NULL) {
557 printk(KERN_ERR "tf_init_shared_memory(%p):"
558 " vmas - failed to get_zeroed_page\n",
559 connection);
560 error = -ENOMEM;
561 goto error;
562 }
563
564 return 0;
565
566error:
567 tf_cleanup_shared_memories(connection);
568 return error;
569}
570
571/*----------------------------------------------------------------------------
572 * Connection operations to the Secure World
573 *----------------------------------------------------------------------------*/
574
575int tf_create_device_context(
576 struct tf_connection *connection)
577{
578 union tf_command command;
579 union tf_answer answer;
580 int error = 0;
581
582 dprintk(KERN_INFO "tf_create_device_context(%p)\n",
583 connection);
584
585 command.create_device_context.message_type =
586 TF_MESSAGE_TYPE_CREATE_DEVICE_CONTEXT;
587 command.create_device_context.message_size =
588 (sizeof(struct tf_command_create_device_context)
589 - sizeof(struct tf_command_header))/sizeof(u32);
590 command.create_device_context.operation_id = (u32) &answer;
591 command.create_device_context.device_context_id = (u32) connection;
592
593 error = tf_send_receive(
594 &connection->dev->sm,
595 &command,
596 &answer,
597 connection,
598 true);
599
600 if ((error != 0) ||
601 (answer.create_device_context.error_code != S_SUCCESS))
602 goto error;
603
604 /*
605 * CREATE_DEVICE_CONTEXT succeeded,
606 * store device context handler and update connection status
607 */
608 connection->device_context =
609 answer.create_device_context.device_context;
610 spin_lock(&(connection->state_lock));
611 connection->state = TF_CONN_STATE_VALID_DEVICE_CONTEXT;
612 spin_unlock(&(connection->state_lock));
613
614 /* successful completion */
615 dprintk(KERN_INFO "tf_create_device_context(%p):"
616 " device_context=0x%08x\n",
617 connection,
618 answer.create_device_context.device_context);
619 return 0;
620
621error:
622 if (error != 0) {
623 dprintk(KERN_ERR "tf_create_device_context failed with "
624 "error %d\n", error);
625 } else {
626 /*
627 * We sent a DeviceCreateContext. The state is now
628 * TF_CONN_STATE_CREATE_DEVICE_CONTEXT_SENT It has to be
629 * reset if we ever want to send a DeviceCreateContext again
630 */
631 spin_lock(&(connection->state_lock));
632 connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
633 spin_unlock(&(connection->state_lock));
634 dprintk(KERN_ERR "tf_create_device_context failed with "
635 "error_code 0x%08X\n",
636 answer.create_device_context.error_code);
637 if (answer.create_device_context.error_code ==
638 S_ERROR_OUT_OF_MEMORY)
639 error = -ENOMEM;
640 else
641 error = -EFAULT;
642 }
643
644 return error;
645}
646
647/* Check that the current application belongs to the
648 * requested GID */
649static bool tf_check_gid(gid_t requested_gid)
650{
651 if (requested_gid == current_egid()) {
652 return true;
653 } else {
654 u32 size;
655 u32 i;
656 /* Look in the supplementary GIDs */
657 get_group_info(GROUP_INFO);
658 size = GROUP_INFO->ngroups;
659 for (i = 0; i < size; i++)
660 if (requested_gid == GROUP_AT(GROUP_INFO , i))
661 return true;
662 }
663 return false;
664}
665
666/*
667 * Opens a client session to the Secure World
668 */
669int tf_open_client_session(
670 struct tf_connection *connection,
671 union tf_command *command,
672 union tf_answer *answer)
673{
674 int error = 0;
675 struct tf_shmem_desc *shmem_desc[4] = {NULL};
676 u32 i;
677
678 dprintk(KERN_INFO "tf_open_client_session(%p)\n", connection);
679
680 /*
681 * Initialize the message size with no login data. This will be later
682 * adjusted the the cases below
683 */
684 command->open_client_session.message_size =
685 (sizeof(struct tf_command_open_client_session) - 20
686 - sizeof(struct tf_command_header))/4;
687
688 switch (command->open_client_session.login_type) {
689 case TF_LOGIN_PUBLIC:
690 /* Nothing to do */
691 break;
692
693 case TF_LOGIN_USER:
694 /*
695 * Send the EUID of the calling application in the login data.
696 * Update message size.
697 */
698 *(u32 *) &command->open_client_session.login_data =
699 current_euid();
700#ifndef CONFIG_ANDROID
701 command->open_client_session.login_type =
702 (u32) TF_LOGIN_USER_LINUX_EUID;
703#else
704 command->open_client_session.login_type =
705 (u32) TF_LOGIN_USER_ANDROID_EUID;
706#endif
707
708 /* Added one word */
709 command->open_client_session.message_size += 1;
710 break;
711
712 case TF_LOGIN_GROUP: {
713 /* Check requested GID */
714 gid_t requested_gid =
715 *(u32 *) command->open_client_session.login_data;
716
717 if (!tf_check_gid(requested_gid)) {
718 dprintk(KERN_ERR "tf_open_client_session(%p) "
719 "TF_LOGIN_GROUP: requested GID (0x%x) does "
720 "not match real eGID (0x%x)"
721 "or any of the supplementary GIDs\n",
722 connection, requested_gid, current_egid());
723 error = -EACCES;
724 goto error;
725 }
726#ifndef CONFIG_ANDROID
727 command->open_client_session.login_type =
728 TF_LOGIN_GROUP_LINUX_GID;
729#else
730 command->open_client_session.login_type =
731 TF_LOGIN_GROUP_ANDROID_GID;
732#endif
733
734 command->open_client_session.message_size += 1; /* GID */
735 break;
736 }
737
738#ifndef CONFIG_ANDROID
739 case TF_LOGIN_APPLICATION: {
740 /*
741 * Compute SHA-1 hash of the application fully-qualified path
742 * name. Truncate the hash to 16 bytes and send it as login
743 * data. Update message size.
744 */
745 u8 pSHA1Hash[SHA1_DIGEST_SIZE];
746
747 error = tf_hash_application_path_and_data(pSHA1Hash,
748 NULL, 0);
749 if (error != 0) {
750 dprintk(KERN_ERR "tf_open_client_session: "
751 "error in tf_hash_application_path_and_data\n");
752 goto error;
753 }
754 memcpy(&command->open_client_session.login_data,
755 pSHA1Hash, 16);
756 command->open_client_session.login_type =
757 TF_LOGIN_APPLICATION_LINUX_PATH_SHA1_HASH;
758 /* 16 bytes */
759 command->open_client_session.message_size += 4;
760 break;
761 }
762#else
763 case TF_LOGIN_APPLICATION:
764 /*
765 * Send the real UID of the calling application in the login
766 * data. Update message size.
767 */
768 *(u32 *) &command->open_client_session.login_data =
769 current_uid();
770
771 command->open_client_session.login_type =
772 (u32) TF_LOGIN_APPLICATION_ANDROID_UID;
773
774 /* Added one word */
775 command->open_client_session.message_size += 1;
776 break;
777#endif
778
779#ifndef CONFIG_ANDROID
780 case TF_LOGIN_APPLICATION_USER: {
781 /*
782 * Compute SHA-1 hash of the concatenation of the application
783 * fully-qualified path name and the EUID of the calling
784 * application. Truncate the hash to 16 bytes and send it as
785 * login data. Update message size.
786 */
787 u8 pSHA1Hash[SHA1_DIGEST_SIZE];
788
789 error = tf_hash_application_path_and_data(pSHA1Hash,
790 (u8 *) &(current_euid()), sizeof(current_euid()));
791 if (error != 0) {
792 dprintk(KERN_ERR "tf_open_client_session: "
793 "error in tf_hash_application_path_and_data\n");
794 goto error;
795 }
796 memcpy(&command->open_client_session.login_data,
797 pSHA1Hash, 16);
798 command->open_client_session.login_type =
799 TF_LOGIN_APPLICATION_USER_LINUX_PATH_EUID_SHA1_HASH;
800
801 /* 16 bytes */
802 command->open_client_session.message_size += 4;
803
804 break;
805 }
806#else
807 case TF_LOGIN_APPLICATION_USER:
808 /*
809 * Send the real UID and the EUID of the calling application in
810 * the login data. Update message size.
811 */
812 *(u32 *) &command->open_client_session.login_data =
813 current_uid();
814 *(u32 *) &command->open_client_session.login_data[4] =
815 current_euid();
816
817 command->open_client_session.login_type =
818 TF_LOGIN_APPLICATION_USER_ANDROID_UID_EUID;
819
820 /* Added two words */
821 command->open_client_session.message_size += 2;
822 break;
823#endif
824
825#ifndef CONFIG_ANDROID
826 case TF_LOGIN_APPLICATION_GROUP: {
827 /*
828 * Check requested GID. Compute SHA-1 hash of the concatenation
829 * of the application fully-qualified path name and the
830 * requested GID. Update message size
831 */
832 gid_t requested_gid;
833 u8 pSHA1Hash[SHA1_DIGEST_SIZE];
834
835 requested_gid = *(u32 *) &command->open_client_session.
836 login_data;
837
838 if (!tf_check_gid(requested_gid)) {
839 dprintk(KERN_ERR "tf_open_client_session(%p) "
840 "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
841 "does not match real eGID (0x%x)"
842 "or any of the supplementary GIDs\n",
843 connection, requested_gid, current_egid());
844 error = -EACCES;
845 goto error;
846 }
847
848 error = tf_hash_application_path_and_data(pSHA1Hash,
849 &requested_gid, sizeof(u32));
850 if (error != 0) {
851 dprintk(KERN_ERR "tf_open_client_session: "
852 "error in tf_hash_application_path_and_data\n");
853 goto error;
854 }
855
856 memcpy(&command->open_client_session.login_data,
857 pSHA1Hash, 16);
858 command->open_client_session.login_type =
859 TF_LOGIN_APPLICATION_GROUP_LINUX_PATH_GID_SHA1_HASH;
860
861 /* 16 bytes */
862 command->open_client_session.message_size += 4;
863 break;
864 }
865#else
866 case TF_LOGIN_APPLICATION_GROUP: {
867 /*
868 * Check requested GID. Send the real UID and the requested GID
869 * in the login data. Update message size.
870 */
871 gid_t requested_gid;
872
873 requested_gid = *(u32 *) &command->open_client_session.
874 login_data;
875
876 if (!tf_check_gid(requested_gid)) {
877 dprintk(KERN_ERR "tf_open_client_session(%p) "
878 "TF_LOGIN_APPLICATION_GROUP: requested GID (0x%x) "
879 "does not match real eGID (0x%x)"
880 "or any of the supplementary GIDs\n",
881 connection, requested_gid, current_egid());
882 error = -EACCES;
883 goto error;
884 }
885
886 *(u32 *) &command->open_client_session.login_data =
887 current_uid();
888 *(u32 *) &command->open_client_session.login_data[4] =
889 requested_gid;
890
891 command->open_client_session.login_type =
892 TF_LOGIN_APPLICATION_GROUP_ANDROID_UID_GID;
893
894 /* Added two words */
895 command->open_client_session.message_size += 2;
896
897 break;
898 }
899#endif
900
901 case TF_LOGIN_PRIVILEGED:
902 /* A privileged login may be performed only on behalf of the
903 kernel itself or on behalf of a process with euid=0 or
904 egid=0 or euid=system or egid=system. */
905 if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
906 dprintk(KERN_DEBUG "tf_open_client_session: "
907 "TF_LOGIN_PRIVILEGED for kernel API\n");
908 } else if ((current_euid() != TF_PRIVILEGED_UID_GID) &&
909 (current_egid() != TF_PRIVILEGED_UID_GID) &&
910 (current_euid() != 0) && (current_egid() != 0)) {
911 dprintk(KERN_ERR "tf_open_client_session: "
912 " user %d, group %d not allowed to open "
913 "session with TF_LOGIN_PRIVILEGED\n",
914 current_euid(), current_egid());
915 error = -EACCES;
916 goto error;
917 } else {
918 dprintk(KERN_DEBUG "tf_open_client_session: "
919 "TF_LOGIN_PRIVILEGED for %u:%u\n",
920 current_euid(), current_egid());
921 }
922 command->open_client_session.login_type =
923 TF_LOGIN_PRIVILEGED;
924 break;
925
926 case TF_LOGIN_AUTHENTICATION: {
927 /*
928 * Compute SHA-1 hash of the application binary
929 * Send this hash as the login data (20 bytes)
930 */
931
932 u8 *hash;
933 hash = &(command->open_client_session.login_data[0]);
934
935 error = tf_get_current_process_hash(hash);
936 if (error != 0) {
937 dprintk(KERN_ERR "tf_open_client_session: "
938 "error in tf_get_current_process_hash\n");
939 goto error;
940 }
941 command->open_client_session.login_type =
942 TF_LOGIN_AUTHENTICATION_BINARY_SHA1_HASH;
943
944 /* 20 bytes */
945 command->open_client_session.message_size += 5;
946 break;
947 }
948
949 case TF_LOGIN_PRIVILEGED_KERNEL:
950 /* A kernel login may be performed only on behalf of the
951 kernel itself. */
952 if (connection->owner == TF_CONNECTION_OWNER_KERNEL) {
953 dprintk(KERN_DEBUG "tf_open_client_session: "
954 "TF_LOGIN_PRIVILEGED_KERNEL for kernel API\n");
955 command->open_client_session.login_type =
956 TF_LOGIN_PRIVILEGED_KERNEL;
957 } else {
958 dprintk(KERN_ERR "tf_open_client_session: "
959 " user %d, group %d not allowed to open "
960 "session with TF_LOGIN_PRIVILEGED_KERNEL\n",
961 current_euid(), current_egid());
962 error = -EACCES;
963 goto error;
964 }
965 command->open_client_session.login_type =
966 TF_LOGIN_PRIVILEGED_KERNEL;
967 break;
968
969 default:
970 dprintk(KERN_ERR "tf_open_client_session: "
971 "unknown login_type(%08X)\n",
972 command->open_client_session.login_type);
973 error = -EOPNOTSUPP;
974 goto error;
975 }
976
977 /* Map the temporary memory references */
978 for (i = 0; i < 4; i++) {
979 int param_type;
980 param_type = TF_GET_PARAM_TYPE(
981 command->open_client_session.param_types, i);
982 if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
983 TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
984 == TF_PARAM_TYPE_MEMREF_FLAG) {
985 /* Map temp mem ref */
986 error = tf_map_temp_shmem(connection,
987 &command->open_client_session.
988 params[i].temp_memref,
989 param_type,
990 &shmem_desc[i]);
991 if (error != 0) {
992 dprintk(KERN_ERR "tf_open_client_session: "
993 "unable to map temporary memory block "
994 "(%08X)\n", error);
995 goto error;
996 }
997 }
998 }
999
1000 /* Fill the handle of the Device Context */
1001 command->open_client_session.device_context =
1002 connection->device_context;
1003
1004 error = tf_send_receive(
1005 &connection->dev->sm,
1006 command,
1007 answer,
1008 connection,
1009 true);
1010
1011error:
1012 /* Unmap the temporary memory references */
1013 for (i = 0; i < 4; i++)
1014 if (shmem_desc[i] != NULL)
1015 tf_unmap_shmem(connection, shmem_desc[i], 0);
1016
1017 if (error != 0)
1018 dprintk(KERN_ERR "tf_open_client_session returns %d\n",
1019 error);
1020 else
1021 dprintk(KERN_ERR "tf_open_client_session returns "
1022 "error_code 0x%08X\n",
1023 answer->open_client_session.error_code);
1024
1025 return error;
1026}
1027
1028
1029/*
1030 * Closes a client session from the Secure World
1031 */
1032int tf_close_client_session(
1033 struct tf_connection *connection,
1034 union tf_command *command,
1035 union tf_answer *answer)
1036{
1037 int error = 0;
1038
1039 dprintk(KERN_DEBUG "tf_close_client_session(%p)\n", connection);
1040
1041 command->close_client_session.message_size =
1042 (sizeof(struct tf_command_close_client_session) -
1043 sizeof(struct tf_command_header)) / 4;
1044 command->close_client_session.device_context =
1045 connection->device_context;
1046
1047 error = tf_send_receive(
1048 &connection->dev->sm,
1049 command,
1050 answer,
1051 connection,
1052 true);
1053
1054 if (error != 0)
1055 dprintk(KERN_ERR "tf_close_client_session returns %d\n",
1056 error);
1057 else
1058 dprintk(KERN_ERR "tf_close_client_session returns "
1059 "error 0x%08X\n",
1060 answer->close_client_session.error_code);
1061
1062 return error;
1063}
1064
1065
1066/*
1067 * Registers a shared memory to the Secure World
1068 */
1069int tf_register_shared_memory(
1070 struct tf_connection *connection,
1071 union tf_command *command,
1072 union tf_answer *answer)
1073{
1074 int error = 0;
1075 struct tf_shmem_desc *shmem_desc = NULL;
1076 bool in_user_space = connection->owner != TF_CONNECTION_OWNER_KERNEL;
1077 struct tf_command_register_shared_memory *msg =
1078 &command->register_shared_memory;
1079
1080 dprintk(KERN_INFO "tf_register_shared_memory(%p) "
1081 "%p[0x%08X][0x%08x]\n",
1082 connection,
1083 (void *)msg->shared_mem_descriptors[0],
1084 msg->shared_mem_size,
1085 (u32)msg->memory_flags);
1086
1087 if (in_user_space) {
1088 error = tf_validate_shmem_and_flags(
1089 msg->shared_mem_descriptors[0],
1090 msg->shared_mem_size,
1091 (u32)msg->memory_flags);
1092 if (error != 0)
1093 goto error;
1094 }
1095
1096 /* Initialize message_size with no descriptors */
1097 msg->message_size
1098 = (offsetof(struct tf_command_register_shared_memory,
1099 shared_mem_descriptors) -
1100 sizeof(struct tf_command_header)) / 4;
1101
1102 /* Map the shmem block and update the message */
1103 if (msg->shared_mem_size == 0) {
1104 /* Empty shared mem */
1105 msg->shared_mem_start_offset = msg->shared_mem_descriptors[0];
1106 } else {
1107 u32 descriptor_count;
1108 error = tf_map_shmem(
1109 connection,
1110 msg->shared_mem_descriptors[0],
1111 msg->memory_flags,
1112 in_user_space,
1113 msg->shared_mem_descriptors,
1114 &(msg->shared_mem_start_offset),
1115 msg->shared_mem_size,
1116 &shmem_desc,
1117 &descriptor_count);
1118 if (error != 0) {
1119 dprintk(KERN_ERR "tf_register_shared_memory: "
1120 "unable to map shared memory block\n");
1121 goto error;
1122 }
1123 msg->message_size += descriptor_count;
1124 }
1125
1126 /*
1127 * write the correct device context handle and the address of the shared
1128 * memory descriptor in the message
1129 */
1130 msg->device_context = connection->device_context;
1131 msg->block_id = (u32)shmem_desc;
1132
1133 /* Send the updated message */
1134 error = tf_send_receive(
1135 &connection->dev->sm,
1136 command,
1137 answer,
1138 connection,
1139 true);
1140
1141 if ((error != 0) ||
1142 (answer->register_shared_memory.error_code
1143 != S_SUCCESS)) {
1144 dprintk(KERN_ERR "tf_register_shared_memory: "
1145 "operation failed. Unmap block\n");
1146 goto error;
1147 }
1148
1149 /* Saves the block handle returned by the secure world */
1150 if (shmem_desc != NULL)
1151 shmem_desc->block_identifier =
1152 answer->register_shared_memory.block;
1153
1154 /* successful completion */
1155 dprintk(KERN_INFO "tf_register_shared_memory(%p):"
1156 " block_id=0x%08x block=0x%08x\n",
1157 connection, msg->block_id,
1158 answer->register_shared_memory.block);
1159 return 0;
1160
1161 /* error completion */
1162error:
1163 tf_unmap_shmem(
1164 connection,
1165 shmem_desc,
1166 0);
1167
1168 if (error != 0)
1169 dprintk(KERN_ERR "tf_register_shared_memory returns %d\n",
1170 error);
1171 else
1172 dprintk(KERN_ERR "tf_register_shared_memory returns "
1173 "error_code 0x%08X\n",
1174 answer->register_shared_memory.error_code);
1175
1176 return error;
1177}
1178
1179
1180/*
1181 * Releases a shared memory from the Secure World
1182 */
1183int tf_release_shared_memory(
1184 struct tf_connection *connection,
1185 union tf_command *command,
1186 union tf_answer *answer)
1187{
1188 int error = 0;
1189
1190 dprintk(KERN_DEBUG "tf_release_shared_memory(%p)\n", connection);
1191
1192 command->release_shared_memory.message_size =
1193 (sizeof(struct tf_command_release_shared_memory) -
1194 sizeof(struct tf_command_header)) / 4;
1195 command->release_shared_memory.device_context =
1196 connection->device_context;
1197
1198 error = tf_send_receive(
1199 &connection->dev->sm,
1200 command,
1201 answer,
1202 connection,
1203 true);
1204
1205 if ((error != 0) ||
1206 (answer->release_shared_memory.error_code != S_SUCCESS))
1207 goto error;
1208
1209 /* Use block_id to get back the pointer to shmem_desc */
1210 tf_unmap_shmem(
1211 connection,
1212 (struct tf_shmem_desc *)
1213 answer->release_shared_memory.block_id,
1214 0);
1215
1216 /* successful completion */
1217 dprintk(KERN_INFO "tf_release_shared_memory(%p):"
1218 " block_id=0x%08x block=0x%08x\n",
1219 connection, answer->release_shared_memory.block_id,
1220 command->release_shared_memory.block);
1221 return 0;
1222
1223
1224error:
1225 if (error != 0)
1226 dprintk(KERN_ERR "tf_release_shared_memory returns %d\n",
1227 error);
1228 else
1229 dprintk(KERN_ERR "tf_release_shared_memory returns "
1230 "nChannelStatus 0x%08X\n",
1231 answer->release_shared_memory.error_code);
1232
1233 return error;
1234
1235}
1236
1237
1238/*
1239 * Invokes a client command to the Secure World
1240 */
1241int tf_invoke_client_command(
1242 struct tf_connection *connection,
1243 union tf_command *command,
1244 union tf_answer *answer)
1245{
1246 int error = 0;
1247 struct tf_shmem_desc *shmem_desc[4] = {NULL};
1248 int i;
1249
1250 dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection);
1251
1252 command->release_shared_memory.message_size =
1253 (sizeof(struct tf_command_invoke_client_command) -
1254 sizeof(struct tf_command_header)) / 4;
1255
1256#ifdef CONFIG_TF_ZEBRA
1257 error = tf_crypto_try_shortcuted_update(connection,
1258 (struct tf_command_invoke_client_command *) command,
1259 (struct tf_answer_invoke_client_command *) answer);
1260 if (error == 0)
1261 return error;
1262#endif
1263
1264 /* Map the tmprefs */
1265 for (i = 0; i < 4; i++) {
1266 int param_type = TF_GET_PARAM_TYPE(
1267 command->invoke_client_command.param_types, i);
1268 if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG |
1269 TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG))
1270 == TF_PARAM_TYPE_MEMREF_FLAG) {
1271 /* A temporary memref: map it */
1272 error = tf_map_temp_shmem(connection,
1273 &command->invoke_client_command.
1274 params[i].temp_memref,
1275 param_type, &shmem_desc[i]);
1276 if (error != 0) {
1277 dprintk(KERN_ERR
1278 "tf_invoke_client_command: "
1279 "unable to map temporary memory "
1280 "block\n (%08X)", error);
1281 goto error;
1282 }
1283 }
1284 }
1285
1286 command->invoke_client_command.device_context =
1287 connection->device_context;
1288
1289 error = tf_send_receive(&connection->dev->sm, command,
1290 answer, connection, true);
1291
1292error:
1293 /* Unmap de temp mem refs */
1294 for (i = 0; i < 4; i++) {
1295 if (shmem_desc[i] != NULL) {
1296 dprintk(KERN_INFO "tf_invoke_client_command: "
1297 "UnMatemp_memref %d\n ", i);
1298 tf_unmap_shmem(connection, shmem_desc[i], 0);
1299 }
1300 }
1301
1302 if (error != 0)
1303 dprintk(KERN_ERR "tf_invoke_client_command returns %d\n",
1304 error);
1305 else
1306 dprintk(KERN_ERR "tf_invoke_client_command returns "
1307 "error_code 0x%08X\n",
1308 answer->invoke_client_command.error_code);
1309
1310 return error;
1311}
1312
1313
1314/*
1315 * Cancels a client command from the Secure World
1316 */
1317int tf_cancel_client_command(
1318 struct tf_connection *connection,
1319 union tf_command *command,
1320 union tf_answer *answer)
1321{
1322 int error = 0;
1323
1324 dprintk(KERN_DEBUG "tf_cancel_client_command(%p)\n", connection);
1325
1326 command->cancel_client_operation.device_context =
1327 connection->device_context;
1328 command->cancel_client_operation.message_size =
1329 (sizeof(struct tf_command_cancel_client_operation) -
1330 sizeof(struct tf_command_header)) / 4;
1331
1332 error = tf_send_receive(
1333 &connection->dev->sm,
1334 command,
1335 answer,
1336 connection,
1337 true);
1338
1339 if ((error != 0) ||
1340 (answer->cancel_client_operation.error_code != S_SUCCESS))
1341 goto error;
1342
1343
1344 /* successful completion */
1345 return 0;
1346
1347error:
1348 if (error != 0)
1349 dprintk(KERN_ERR "tf_cancel_client_command returns %d\n",
1350 error);
1351 else
1352 dprintk(KERN_ERR "tf_cancel_client_command returns "
1353 "nChannelStatus 0x%08X\n",
1354 answer->cancel_client_operation.error_code);
1355
1356 return error;
1357}
1358
1359
1360
1361/*
1362 * Destroys a device context from the Secure World
1363 */
1364int tf_destroy_device_context(
1365 struct tf_connection *connection)
1366{
1367 int error;
1368 /*
1369 * AFY: better use the specialized tf_command_destroy_device_context
1370 * structure: this will save stack
1371 */
1372 union tf_command command;
1373 union tf_answer answer;
1374
1375 dprintk(KERN_INFO "tf_destroy_device_context(%p)\n", connection);
1376
1377 BUG_ON(connection == NULL);
1378
1379 command.header.message_type = TF_MESSAGE_TYPE_DESTROY_DEVICE_CONTEXT;
1380 command.header.message_size =
1381 (sizeof(struct tf_command_destroy_device_context) -
1382 sizeof(struct tf_command_header))/sizeof(u32);
1383
1384 /*
1385 * fill in the device context handler
1386 * it is guarantied that the first shared memory descriptor describes
1387 * the device context
1388 */
1389 command.destroy_device_context.device_context =
1390 connection->device_context;
1391
1392 error = tf_send_receive(
1393 &connection->dev->sm,
1394 &command,
1395 &answer,
1396 connection,
1397 false);
1398
1399 if ((error != 0) ||
1400 (answer.destroy_device_context.error_code != S_SUCCESS))
1401 goto error;
1402
1403 spin_lock(&(connection->state_lock));
1404 connection->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1405 spin_unlock(&(connection->state_lock));
1406
1407 /* successful completion */
1408 dprintk(KERN_INFO "tf_destroy_device_context(%p)\n",
1409 connection);
1410 return 0;
1411
1412error:
1413 if (error != 0) {
1414 dprintk(KERN_ERR "tf_destroy_device_context failed with "
1415 "error %d\n", error);
1416 } else {
1417 dprintk(KERN_ERR "tf_destroy_device_context failed with "
1418 "error_code 0x%08X\n",
1419 answer.destroy_device_context.error_code);
1420 if (answer.destroy_device_context.error_code ==
1421 S_ERROR_OUT_OF_MEMORY)
1422 error = -ENOMEM;
1423 else
1424 error = -EFAULT;
1425 }
1426
1427 return error;
1428}
1429
1430
1431/*----------------------------------------------------------------------------
1432 * Connection initialization and cleanup operations
1433 *----------------------------------------------------------------------------*/
1434
1435/*
1436 * Opens a connection to the specified device.
1437 *
1438 * The placeholder referenced by connection is set to the address of the
1439 * new connection; it is set to NULL upon failure.
1440 *
1441 * Returns zero upon successful completion, or an appropriate error code upon
1442 * failure.
1443 */
1444int tf_open(struct tf_device *dev,
1445 struct file *file,
1446 struct tf_connection **connection)
1447{
1448 int error;
1449 struct tf_connection *conn = NULL;
1450
1451 dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection);
1452
1453 /*
1454 * Allocate and initialize the conn.
1455 * kmalloc only allocates sizeof(*conn) virtual memory
1456 */
1457 conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn),
1458 GFP_KERNEL);
1459 if (conn == NULL) {
1460 printk(KERN_ERR "tf_open(): "
1461 "Out of memory for conn!\n");
1462 error = -ENOMEM;
1463 goto error;
1464 }
1465
1466 memset(conn, 0, sizeof(*conn));
1467
1468 conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
1469 conn->dev = dev;
1470 spin_lock_init(&(conn->state_lock));
1471 atomic_set(&(conn->pending_op_count), 0);
1472 INIT_LIST_HEAD(&(conn->list));
1473
1474 /*
1475 * Initialize the shared memory
1476 */
1477 error = tf_init_shared_memory(conn);
1478 if (error != 0)
1479 goto error;
1480
1481#ifdef CONFIG_TF_ZEBRA
1482 /*
1483 * Initialize CUS specifics
1484 */
1485 tf_crypto_init_cus(conn);
1486#endif
1487
1488 /*
1489 * Attach the conn to the device.
1490 */
1491 spin_lock(&(dev->connection_list_lock));
1492 list_add(&(conn->list), &(dev->connection_list));
1493 spin_unlock(&(dev->connection_list_lock));
1494
1495 /*
1496 * Successful completion.
1497 */
1498
1499 *connection = conn;
1500
1501 dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn);
1502 return 0;
1503
1504 /*
1505 * Error handling.
1506 */
1507
1508error:
1509 dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error);
1510 /* Deallocate the descriptor pages if necessary */
1511 internal_kfree(conn);
1512 *connection = NULL;
1513 return error;
1514}
1515
1516
1517/*
1518 * Closes the specified connection.
1519 *
1520 * Upon return, the connection has been destroyed and cannot be used anymore.
1521 *
1522 * This function does nothing if connection is set to NULL.
1523 */
1524void tf_close(struct tf_connection *connection)
1525{
1526 int error;
1527 enum TF_CONN_STATE state;
1528
1529 dprintk(KERN_DEBUG "tf_close(%p)\n", connection);
1530
1531 if (connection == NULL)
1532 return;
1533
1534 /*
1535 * Assumption: Linux guarantees that no other operation is in progress
1536 * and that no other operation will be started when close is called
1537 */
1538 BUG_ON(atomic_read(&(connection->pending_op_count)) != 0);
1539
1540 /*
1541 * Exchange a Destroy Device Context message if needed.
1542 */
1543 spin_lock(&(connection->state_lock));
1544 state = connection->state;
1545 spin_unlock(&(connection->state_lock));
1546 if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) {
1547 /*
1548 * A DestroyDeviceContext operation was not performed. Do it
1549 * now.
1550 */
1551 error = tf_destroy_device_context(connection);
1552 if (error != 0)
1553 /* avoid cleanup if destroy device context fails */
1554 goto error;
1555 }
1556
1557 /*
1558 * Clean up the shared memory
1559 */
1560 tf_cleanup_shared_memories(connection);
1561
1562 spin_lock(&(connection->dev->connection_list_lock));
1563 list_del(&(connection->list));
1564 spin_unlock(&(connection->dev->connection_list_lock));
1565
1566 internal_kfree(connection);
1567
1568 return;
1569
1570error:
1571 dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n",
1572 connection, error);
1573}
1574