summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/tee/optee/Makefile1
-rw-r--r--drivers/tee/optee/call.c179
-rw-r--r--drivers/tee/optee/core.c152
-rw-r--r--drivers/tee/optee/optee_msg.h38
-rw-r--r--drivers/tee/optee/optee_private.h27
-rw-r--r--drivers/tee/optee/optee_smc.h7
-rw-r--r--drivers/tee/optee/rpc.c77
-rw-r--r--drivers/tee/optee/shm_pool.c75
-rw-r--r--drivers/tee/optee/shm_pool.h23
-rw-r--r--drivers/tee/tee_core.c81
-rw-r--r--drivers/tee/tee_private.h60
-rw-r--r--drivers/tee/tee_shm.c228
-rw-r--r--drivers/tee/tee_shm_pool.c165
-rw-r--r--include/linux/tee_drv.h183
-rw-r--r--include/uapi/linux/tee.h30
15 files changed, 1105 insertions, 221 deletions
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index d526fb88d9c5..48d262ae2f04 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -4,3 +4,4 @@ optee-objs += core.o
4optee-objs += call.o 4optee-objs += call.o
5optee-objs += rpc.o 5optee-objs += rpc.o
6optee-objs += supp.o 6optee-objs += supp.o
7optee-objs += shm_pool.o
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index f7b7b404c990..e675e82ff095 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -11,6 +11,7 @@
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 * 12 *
13 */ 13 */
14#include <asm/pgtable.h>
14#include <linux/arm-smccc.h> 15#include <linux/arm-smccc.h>
15#include <linux/device.h> 16#include <linux/device.h>
16#include <linux/err.h> 17#include <linux/err.h>
@@ -135,6 +136,7 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
135 struct optee *optee = tee_get_drvdata(ctx->teedev); 136 struct optee *optee = tee_get_drvdata(ctx->teedev);
136 struct optee_call_waiter w; 137 struct optee_call_waiter w;
137 struct optee_rpc_param param = { }; 138 struct optee_rpc_param param = { };
139 struct optee_call_ctx call_ctx = { };
138 u32 ret; 140 u32 ret;
139 141
140 param.a0 = OPTEE_SMC_CALL_WITH_ARG; 142 param.a0 = OPTEE_SMC_CALL_WITH_ARG;
@@ -159,13 +161,14 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
159 param.a1 = res.a1; 161 param.a1 = res.a1;
160 param.a2 = res.a2; 162 param.a2 = res.a2;
161 param.a3 = res.a3; 163 param.a3 = res.a3;
162 optee_handle_rpc(ctx, &param); 164 optee_handle_rpc(ctx, &param, &call_ctx);
163 } else { 165 } else {
164 ret = res.a0; 166 ret = res.a0;
165 break; 167 break;
166 } 168 }
167 } 169 }
168 170
171 optee_rpc_finalize_call(&call_ctx);
169 /* 172 /*
170 * We're done with our thread in secure world, if there's any 173 * We're done with our thread in secure world, if there's any
171 * thread waiters wake up one. 174 * thread waiters wake up one.
@@ -442,3 +445,177 @@ void optee_disable_shm_cache(struct optee *optee)
442 } 445 }
443 optee_cq_wait_final(&optee->call_queue, &w); 446 optee_cq_wait_final(&optee->call_queue, &w);
444} 447}
448
449#define PAGELIST_ENTRIES_PER_PAGE \
450 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
451
452/**
453 * optee_fill_pages_list() - write list of user pages to given shared
454 * buffer.
455 *
456 * @dst: page-aligned buffer where list of pages will be stored
457 * @pages: array of pages that represents shared buffer
458 * @num_pages: number of entries in @pages
459 * @page_offset: offset of user buffer from page start
460 *
461 * @dst should be big enough to hold list of user page addresses and
462 * links to the next pages of buffer
463 */
464void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
465 size_t page_offset)
466{
467 int n = 0;
468 phys_addr_t optee_page;
469 /*
470 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
471 * for details.
472 */
473 struct {
474 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
475 u64 next_page_data;
476 } *pages_data;
477
478 /*
479 * Currently OP-TEE uses 4k page size and it does not looks
480 * like this will change in the future. On other hand, there are
481 * no know ARM architectures with page size < 4k.
482 * Thus the next built assert looks redundant. But the following
483 * code heavily relies on this assumption, so it is better be
484 * safe than sorry.
485 */
486 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
487
488 pages_data = (void *)dst;
489 /*
490 * If linux page is bigger than 4k, and user buffer offset is
491 * larger than 4k/8k/12k/etc this will skip first 4k pages,
492 * because they bear no value data for OP-TEE.
493 */
494 optee_page = page_to_phys(*pages) +
495 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
496
497 while (true) {
498 pages_data->pages_list[n++] = optee_page;
499
500 if (n == PAGELIST_ENTRIES_PER_PAGE) {
501 pages_data->next_page_data =
502 virt_to_phys(pages_data + 1);
503 pages_data++;
504 n = 0;
505 }
506
507 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
508 if (!(optee_page & ~PAGE_MASK)) {
509 if (!--num_pages)
510 break;
511 pages++;
512 optee_page = page_to_phys(*pages);
513 }
514 }
515}
516
517/*
518 * The final entry in each pagelist page is a pointer to the next
519 * pagelist page.
520 */
521static size_t get_pages_list_size(size_t num_entries)
522{
523 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
524
525 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
526}
527
528u64 *optee_allocate_pages_list(size_t num_entries)
529{
530 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
531}
532
533void optee_free_pages_list(void *list, size_t num_entries)
534{
535 free_pages_exact(list, get_pages_list_size(num_entries));
536}
537
538int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
539 struct page **pages, size_t num_pages)
540{
541 struct tee_shm *shm_arg = NULL;
542 struct optee_msg_arg *msg_arg;
543 u64 *pages_list;
544 phys_addr_t msg_parg;
545 int rc = 0;
546
547 if (!num_pages)
548 return -EINVAL;
549
550 pages_list = optee_allocate_pages_list(num_pages);
551 if (!pages_list)
552 return -ENOMEM;
553
554 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
555 if (IS_ERR(shm_arg)) {
556 rc = PTR_ERR(shm_arg);
557 goto out;
558 }
559
560 optee_fill_pages_list(pages_list, pages, num_pages,
561 tee_shm_get_page_offset(shm));
562
563 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
564 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
565 OPTEE_MSG_ATTR_NONCONTIG;
566 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
567 msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
568 /*
569 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
570 * store buffer offset from 4k page, as described in OP-TEE ABI.
571 */
572 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
573 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
574
575 if (optee_do_call_with_arg(ctx, msg_parg) ||
576 msg_arg->ret != TEEC_SUCCESS)
577 rc = -EINVAL;
578
579 tee_shm_free(shm_arg);
580out:
581 optee_free_pages_list(pages_list, num_pages);
582 return rc;
583}
584
585int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
586{
587 struct tee_shm *shm_arg;
588 struct optee_msg_arg *msg_arg;
589 phys_addr_t msg_parg;
590 int rc = 0;
591
592 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
593 if (IS_ERR(shm_arg))
594 return PTR_ERR(shm_arg);
595
596 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
597
598 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
599 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
600
601 if (optee_do_call_with_arg(ctx, msg_parg) ||
602 msg_arg->ret != TEEC_SUCCESS)
603 rc = -EINVAL;
604 tee_shm_free(shm_arg);
605 return rc;
606}
607
608int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
609 struct page **pages, size_t num_pages)
610{
611 /*
612 * We don't want to register supplicant memory in OP-TEE.
613 * Instead information about it will be passed in RPC code.
614 */
615 return 0;
616}
617
618int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
619{
620 return 0;
621}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index d0dd09219795..e9843c53fe31 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -28,6 +28,7 @@
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include "optee_private.h" 29#include "optee_private.h"
30#include "optee_smc.h" 30#include "optee_smc.h"
31#include "shm_pool.h"
31 32
32#define DRIVER_NAME "optee" 33#define DRIVER_NAME "optee"
33 34
@@ -97,6 +98,25 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
97 return rc; 98 return rc;
98 } 99 }
99 break; 100 break;
101 case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
102 case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
103 case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
104 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
105 attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
106 p->u.memref.size = mp->u.rmem.size;
107 shm = (struct tee_shm *)(unsigned long)
108 mp->u.rmem.shm_ref;
109
110 if (!shm) {
111 p->u.memref.shm_offs = 0;
112 p->u.memref.shm = NULL;
113 break;
114 }
115 p->u.memref.shm_offs = mp->u.rmem.offs;
116 p->u.memref.shm = shm;
117
118 break;
119
100 default: 120 default:
101 return -EINVAL; 121 return -EINVAL;
102 } 122 }
@@ -104,6 +124,46 @@ int optee_from_msg_param(struct tee_param *params, size_t num_params,
104 return 0; 124 return 0;
105} 125}
106 126
127static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
128 const struct tee_param *p)
129{
130 int rc;
131 phys_addr_t pa;
132
133 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
134 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
135
136 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
137 mp->u.tmem.size = p->u.memref.size;
138
139 if (!p->u.memref.shm) {
140 mp->u.tmem.buf_ptr = 0;
141 return 0;
142 }
143
144 rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
145 if (rc)
146 return rc;
147
148 mp->u.tmem.buf_ptr = pa;
149 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
150 OPTEE_MSG_ATTR_CACHE_SHIFT;
151
152 return 0;
153}
154
155static int to_msg_param_reg_mem(struct optee_msg_param *mp,
156 const struct tee_param *p)
157{
158 mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
159 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
160
161 mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
162 mp->u.rmem.size = p->u.memref.size;
163 mp->u.rmem.offs = p->u.memref.shm_offs;
164 return 0;
165}
166
107/** 167/**
108 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters 168 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
109 * @msg_params: OPTEE_MSG parameters 169 * @msg_params: OPTEE_MSG parameters
@@ -116,7 +176,6 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
116{ 176{
117 int rc; 177 int rc;
118 size_t n; 178 size_t n;
119 phys_addr_t pa;
120 179
121 for (n = 0; n < num_params; n++) { 180 for (n = 0; n < num_params; n++) {
122 const struct tee_param *p = params + n; 181 const struct tee_param *p = params + n;
@@ -139,22 +198,12 @@ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
139 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: 198 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
140 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: 199 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
141 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: 200 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
142 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + 201 if (tee_shm_is_registered(p->u.memref.shm))
143 p->attr - 202 rc = to_msg_param_reg_mem(mp, p);
144 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; 203 else
145 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; 204 rc = to_msg_param_tmp_mem(mp, p);
146 mp->u.tmem.size = p->u.memref.size;
147 if (!p->u.memref.shm) {
148 mp->u.tmem.buf_ptr = 0;
149 break;
150 }
151 rc = tee_shm_get_pa(p->u.memref.shm,
152 p->u.memref.shm_offs, &pa);
153 if (rc) 205 if (rc)
154 return rc; 206 return rc;
155 mp->u.tmem.buf_ptr = pa;
156 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
157 OPTEE_MSG_ATTR_CACHE_SHIFT;
158 break; 207 break;
159 default: 208 default:
160 return -EINVAL; 209 return -EINVAL;
@@ -171,6 +220,10 @@ static void optee_get_version(struct tee_device *teedev,
171 .impl_caps = TEE_OPTEE_CAP_TZ, 220 .impl_caps = TEE_OPTEE_CAP_TZ,
172 .gen_caps = TEE_GEN_CAP_GP, 221 .gen_caps = TEE_GEN_CAP_GP,
173 }; 222 };
223 struct optee *optee = tee_get_drvdata(teedev);
224
225 if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
226 v.gen_caps |= TEE_GEN_CAP_REG_MEM;
174 *vers = v; 227 *vers = v;
175} 228}
176 229
@@ -264,6 +317,8 @@ static const struct tee_driver_ops optee_ops = {
264 .close_session = optee_close_session, 317 .close_session = optee_close_session,
265 .invoke_func = optee_invoke_func, 318 .invoke_func = optee_invoke_func,
266 .cancel_req = optee_cancel_req, 319 .cancel_req = optee_cancel_req,
320 .shm_register = optee_shm_register,
321 .shm_unregister = optee_shm_unregister,
267}; 322};
268 323
269static const struct tee_desc optee_desc = { 324static const struct tee_desc optee_desc = {
@@ -278,6 +333,8 @@ static const struct tee_driver_ops optee_supp_ops = {
278 .release = optee_release, 333 .release = optee_release,
279 .supp_recv = optee_supp_recv, 334 .supp_recv = optee_supp_recv,
280 .supp_send = optee_supp_send, 335 .supp_send = optee_supp_send,
336 .shm_register = optee_shm_register_supp,
337 .shm_unregister = optee_shm_unregister_supp,
281}; 338};
282 339
283static const struct tee_desc optee_supp_desc = { 340static const struct tee_desc optee_supp_desc = {
@@ -342,21 +399,22 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
342} 399}
343 400
344static struct tee_shm_pool * 401static struct tee_shm_pool *
345optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) 402optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm,
403 u32 sec_caps)
346{ 404{
347 union { 405 union {
348 struct arm_smccc_res smccc; 406 struct arm_smccc_res smccc;
349 struct optee_smc_get_shm_config_result result; 407 struct optee_smc_get_shm_config_result result;
350 } res; 408 } res;
351 struct tee_shm_pool *pool;
352 unsigned long vaddr; 409 unsigned long vaddr;
353 phys_addr_t paddr; 410 phys_addr_t paddr;
354 size_t size; 411 size_t size;
355 phys_addr_t begin; 412 phys_addr_t begin;
356 phys_addr_t end; 413 phys_addr_t end;
357 void *va; 414 void *va;
358 struct tee_shm_pool_mem_info priv_info; 415 struct tee_shm_pool_mgr *priv_mgr;
359 struct tee_shm_pool_mem_info dmabuf_info; 416 struct tee_shm_pool_mgr *dmabuf_mgr;
417 void *rc;
360 418
361 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); 419 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
362 if (res.result.status != OPTEE_SMC_RETURN_OK) { 420 if (res.result.status != OPTEE_SMC_RETURN_OK) {
@@ -386,22 +444,49 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
386 } 444 }
387 vaddr = (unsigned long)va; 445 vaddr = (unsigned long)va;
388 446
389 priv_info.vaddr = vaddr; 447 /*
390 priv_info.paddr = paddr; 448 * If OP-TEE can work with unregistered SHM, we will use own pool
391 priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; 449 * for private shm
392 dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; 450 */
393 dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; 451 if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM) {
394 dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; 452 rc = optee_shm_pool_alloc_pages();
395 453 if (IS_ERR(rc))
396 pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info); 454 goto err_memunmap;
397 if (IS_ERR(pool)) { 455 priv_mgr = rc;
398 memunmap(va); 456 } else {
399 goto out; 457 const size_t sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
458
459 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
460 3 /* 8 bytes aligned */);
461 if (IS_ERR(rc))
462 goto err_memunmap;
463 priv_mgr = rc;
464
465 vaddr += sz;
466 paddr += sz;
467 size -= sz;
400 } 468 }
401 469
470 rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
471 if (IS_ERR(rc))
472 goto err_free_priv_mgr;
473 dmabuf_mgr = rc;
474
475 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
476 if (IS_ERR(rc))
477 goto err_free_dmabuf_mgr;
478
402 *memremaped_shm = va; 479 *memremaped_shm = va;
403out: 480
404 return pool; 481 return rc;
482
483err_free_dmabuf_mgr:
484 tee_shm_pool_mgr_destroy(dmabuf_mgr);
485err_free_priv_mgr:
486 tee_shm_pool_mgr_destroy(priv_mgr);
487err_memunmap:
488 memunmap(va);
489 return rc;
405} 490}
406 491
407/* Simple wrapper functions to be able to use a function pointer */ 492/* Simple wrapper functions to be able to use a function pointer */
@@ -479,7 +564,7 @@ static struct optee *optee_probe(struct device_node *np)
479 if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) 564 if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
480 return ERR_PTR(-EINVAL); 565 return ERR_PTR(-EINVAL);
481 566
482 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); 567 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm, sec_caps);
483 if (IS_ERR(pool)) 568 if (IS_ERR(pool))
484 return (void *)pool; 569 return (void *)pool;
485 570
@@ -490,6 +575,7 @@ static struct optee *optee_probe(struct device_node *np)
490 } 575 }
491 576
492 optee->invoke_fn = invoke_fn; 577 optee->invoke_fn = invoke_fn;
578 optee->sec_caps = sec_caps;
493 579
494 teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); 580 teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
495 if (IS_ERR(teedev)) { 581 if (IS_ERR(teedev)) {
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index dd7a06ee0462..30504901be80 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -67,11 +67,32 @@
67#define OPTEE_MSG_ATTR_META BIT(8) 67#define OPTEE_MSG_ATTR_META BIT(8)
68 68
69/* 69/*
70 * The temporary shared memory object is not physically contigous and this 70 * Pointer to a list of pages used to register user-defined SHM buffer.
71 * temp memref is followed by another fragment until the last temp memref 71 * Used with OPTEE_MSG_ATTR_TYPE_TMEM_*.
72 * that doesn't have this bit set. 72 * buf_ptr should point to the beginning of the buffer. Buffer will contain
73 * list of page addresses. OP-TEE core can reconstruct contiguous buffer from
74 * that page addresses list. Page addresses are stored as 64 bit values.
75 * Last entry on a page should point to the next page of buffer.
76 * Every entry in buffer should point to a 4k page beginning (12 least
77 * significant bits must be equal to zero).
78 *
79 * 12 least significant bints of optee_msg_param.u.tmem.buf_ptr should hold page
80 * offset of the user buffer.
81 *
82 * So, entries should be placed like members of this structure:
83 *
84 * struct page_data {
85 * uint64_t pages_array[OPTEE_MSG_NONCONTIG_PAGE_SIZE/sizeof(uint64_t) - 1];
86 * uint64_t next_page_data;
87 * };
88 *
89 * Structure is designed to exactly fit into the page size
90 * OPTEE_MSG_NONCONTIG_PAGE_SIZE which is a standard 4KB page.
91 *
92 * The size of 4KB is chosen because this is the smallest page size for ARM
93 * architectures. If REE uses larger pages, it should divide them to 4KB ones.
73 */ 94 */
74#define OPTEE_MSG_ATTR_FRAGMENT BIT(9) 95#define OPTEE_MSG_ATTR_NONCONTIG BIT(9)
75 96
76/* 97/*
77 * Memory attributes for caching passed with temp memrefs. The actual value 98 * Memory attributes for caching passed with temp memrefs. The actual value
@@ -94,6 +115,11 @@
94#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005 115#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
95#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006 116#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
96 117
118/*
119 * Page size used in non-contiguous buffer entries
120 */
121#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096
122
97/** 123/**
98 * struct optee_msg_param_tmem - temporary memory reference parameter 124 * struct optee_msg_param_tmem - temporary memory reference parameter
99 * @buf_ptr: Address of the buffer 125 * @buf_ptr: Address of the buffer
@@ -145,8 +171,8 @@ struct optee_msg_param_value {
145 * 171 *
146 * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in 172 * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
147 * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, 173 * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
148 * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and 174 * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
149 * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem. 175 * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
150 * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. 176 * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
151 */ 177 */
152struct optee_msg_param { 178struct optee_msg_param {
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 3e7da187acbe..a85a24725e31 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -84,6 +84,8 @@ struct optee_supp {
84 * @supp: supplicant synchronization struct for RPC to supplicant 84 * @supp: supplicant synchronization struct for RPC to supplicant
85 * @pool: shared memory pool 85 * @pool: shared memory pool
86 * @memremaped_shm virtual address of memory in shared memory pool 86 * @memremaped_shm virtual address of memory in shared memory pool
87 * @sec_caps: secure world capabilities defined by
88 * OPTEE_SMC_SEC_CAP_* in optee_smc.h
87 */ 89 */
88struct optee { 90struct optee {
89 struct tee_device *supp_teedev; 91 struct tee_device *supp_teedev;
@@ -94,6 +96,7 @@ struct optee {
94 struct optee_supp supp; 96 struct optee_supp supp;
95 struct tee_shm_pool *pool; 97 struct tee_shm_pool *pool;
96 void *memremaped_shm; 98 void *memremaped_shm;
99 u32 sec_caps;
97}; 100};
98 101
99struct optee_session { 102struct optee_session {
@@ -118,7 +121,16 @@ struct optee_rpc_param {
118 u32 a7; 121 u32 a7;
119}; 122};
120 123
121void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param); 124/* Holds context that is preserved during one STD call */
125struct optee_call_ctx {
126 /* information about pages list used in last allocation */
127 void *pages_list;
128 size_t num_entries;
129};
130
131void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
132 struct optee_call_ctx *call_ctx);
133void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
122 134
123void optee_wait_queue_init(struct optee_wait_queue *wq); 135void optee_wait_queue_init(struct optee_wait_queue *wq);
124void optee_wait_queue_exit(struct optee_wait_queue *wq); 136void optee_wait_queue_exit(struct optee_wait_queue *wq);
@@ -149,11 +161,24 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
149void optee_enable_shm_cache(struct optee *optee); 161void optee_enable_shm_cache(struct optee *optee);
150void optee_disable_shm_cache(struct optee *optee); 162void optee_disable_shm_cache(struct optee *optee);
151 163
164int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
165 struct page **pages, size_t num_pages);
166int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm);
167
168int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
169 struct page **pages, size_t num_pages);
170int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
171
152int optee_from_msg_param(struct tee_param *params, size_t num_params, 172int optee_from_msg_param(struct tee_param *params, size_t num_params,
153 const struct optee_msg_param *msg_params); 173 const struct optee_msg_param *msg_params);
154int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, 174int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
155 const struct tee_param *params); 175 const struct tee_param *params);
156 176
177u64 *optee_allocate_pages_list(size_t num_entries);
178void optee_free_pages_list(void *array, size_t num_entries);
179void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
180 size_t page_offset);
181
157/* 182/*
158 * Small helpers 183 * Small helpers
159 */ 184 */
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
index 069c8e1429de..7cd327243ada 100644
--- a/drivers/tee/optee/optee_smc.h
+++ b/drivers/tee/optee/optee_smc.h
@@ -222,6 +222,13 @@ struct optee_smc_get_shm_config_result {
222#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0) 222#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
223/* Secure world can communicate via previously unregistered shared memory */ 223/* Secure world can communicate via previously unregistered shared memory */
224#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1) 224#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
225
226/*
227 * Secure world supports commands "register/unregister shared memory",
228 * secure world accepts command buffers located in any parts of non-secure RAM
229 */
230#define OPTEE_SMC_SEC_CAP_DYNAMIC_SHM BIT(2)
231
225#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9 232#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
226#define OPTEE_SMC_EXCHANGE_CAPABILITIES \ 233#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
227 OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES) 234 OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index c6df4317ca9f..41aea12e2bcc 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -200,7 +200,8 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
200} 200}
201 201
202static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx, 202static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
203 struct optee_msg_arg *arg) 203 struct optee_msg_arg *arg,
204 struct optee_call_ctx *call_ctx)
204{ 205{
205 phys_addr_t pa; 206 phys_addr_t pa;
206 struct tee_shm *shm; 207 struct tee_shm *shm;
@@ -245,10 +246,49 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
245 goto bad; 246 goto bad;
246 } 247 }
247 248
248 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT; 249 sz = tee_shm_get_size(shm);
249 arg->params[0].u.tmem.buf_ptr = pa; 250
250 arg->params[0].u.tmem.size = sz; 251 if (tee_shm_is_registered(shm)) {
251 arg->params[0].u.tmem.shm_ref = (unsigned long)shm; 252 struct page **pages;
253 u64 *pages_list;
254 size_t page_num;
255
256 pages = tee_shm_get_pages(shm, &page_num);
257 if (!pages || !page_num) {
258 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
259 goto bad;
260 }
261
262 pages_list = optee_allocate_pages_list(page_num);
263 if (!pages_list) {
264 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
265 goto bad;
266 }
267
268 call_ctx->pages_list = pages_list;
269 call_ctx->num_entries = page_num;
270
271 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
272 OPTEE_MSG_ATTR_NONCONTIG;
273 /*
274 * In the least bits of u.tmem.buf_ptr we store buffer offset
275 * from 4k page, as described in OP-TEE ABI.
276 */
277 arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
278 (tee_shm_get_page_offset(shm) &
279 (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
280 arg->params[0].u.tmem.size = tee_shm_get_size(shm);
281 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
282
283 optee_fill_pages_list(pages_list, pages, page_num,
284 tee_shm_get_page_offset(shm));
285 } else {
286 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
287 arg->params[0].u.tmem.buf_ptr = pa;
288 arg->params[0].u.tmem.size = sz;
289 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
290 }
291
252 arg->ret = TEEC_SUCCESS; 292 arg->ret = TEEC_SUCCESS;
253 return; 293 return;
254bad: 294bad:
@@ -307,8 +347,24 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
307 arg->ret = TEEC_SUCCESS; 347 arg->ret = TEEC_SUCCESS;
308} 348}
309 349
350static void free_pages_list(struct optee_call_ctx *call_ctx)
351{
352 if (call_ctx->pages_list) {
353 optee_free_pages_list(call_ctx->pages_list,
354 call_ctx->num_entries);
355 call_ctx->pages_list = NULL;
356 call_ctx->num_entries = 0;
357 }
358}
359
360void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
361{
362 free_pages_list(call_ctx);
363}
364
310static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee, 365static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
311 struct tee_shm *shm) 366 struct tee_shm *shm,
367 struct optee_call_ctx *call_ctx)
312{ 368{
313 struct optee_msg_arg *arg; 369 struct optee_msg_arg *arg;
314 370
@@ -329,7 +385,8 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
329 handle_rpc_func_cmd_wait(arg); 385 handle_rpc_func_cmd_wait(arg);
330 break; 386 break;
331 case OPTEE_MSG_RPC_CMD_SHM_ALLOC: 387 case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
332 handle_rpc_func_cmd_shm_alloc(ctx, arg); 388 free_pages_list(call_ctx);
389 handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
333 break; 390 break;
334 case OPTEE_MSG_RPC_CMD_SHM_FREE: 391 case OPTEE_MSG_RPC_CMD_SHM_FREE:
335 handle_rpc_func_cmd_shm_free(ctx, arg); 392 handle_rpc_func_cmd_shm_free(ctx, arg);
@@ -343,10 +400,12 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
343 * optee_handle_rpc() - handle RPC from secure world 400 * optee_handle_rpc() - handle RPC from secure world
344 * @ctx: context doing the RPC 401 * @ctx: context doing the RPC
345 * @param: value of registers for the RPC 402 * @param: value of registers for the RPC
403 * @call_ctx: call context. Preserved during one OP-TEE invocation
346 * 404 *
347 * Result of RPC is written back into @param. 405 * Result of RPC is written back into @param.
348 */ 406 */
349void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param) 407void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
408 struct optee_call_ctx *call_ctx)
350{ 409{
351 struct tee_device *teedev = ctx->teedev; 410 struct tee_device *teedev = ctx->teedev;
352 struct optee *optee = tee_get_drvdata(teedev); 411 struct optee *optee = tee_get_drvdata(teedev);
@@ -381,7 +440,7 @@ void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
381 break; 440 break;
382 case OPTEE_SMC_RPC_FUNC_CMD: 441 case OPTEE_SMC_RPC_FUNC_CMD:
383 shm = reg_pair_to_ptr(param->a1, param->a2); 442 shm = reg_pair_to_ptr(param->a1, param->a2);
384 handle_rpc_func_cmd(ctx, optee, shm); 443 handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
385 break; 444 break;
386 default: 445 default:
387 pr_warn("Unknown RPC func 0x%x\n", 446 pr_warn("Unknown RPC func 0x%x\n",
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
new file mode 100644
index 000000000000..49397813fff1
--- /dev/null
+++ b/drivers/tee/optee/shm_pool.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright (c) 2015, Linaro Limited
3 * Copyright (c) 2017, EPAM Systems
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15#include <linux/device.h>
16#include <linux/dma-buf.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/tee_drv.h>
20#include "optee_private.h"
21#include "optee_smc.h"
22#include "shm_pool.h"
23
24static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
25 struct tee_shm *shm, size_t size)
26{
27 unsigned int order = get_order(size);
28 struct page *page;
29
30 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
31 if (!page)
32 return -ENOMEM;
33
34 shm->kaddr = page_address(page);
35 shm->paddr = page_to_phys(page);
36 shm->size = PAGE_SIZE << order;
37
38 return 0;
39}
40
41static void pool_op_free(struct tee_shm_pool_mgr *poolm,
42 struct tee_shm *shm)
43{
44 free_pages((unsigned long)shm->kaddr, get_order(shm->size));
45 shm->kaddr = NULL;
46}
47
48static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
49{
50 kfree(poolm);
51}
52
53static const struct tee_shm_pool_mgr_ops pool_ops = {
54 .alloc = pool_op_alloc,
55 .free = pool_op_free,
56 .destroy_poolmgr = pool_op_destroy_poolmgr,
57};
58
59/**
60 * optee_shm_pool_alloc_pages() - create page-based allocator pool
61 *
62 * This pool is used when OP-TEE supports dymanic SHM. In this case
63 * command buffers and such are allocated from kernel's own memory.
64 */
65struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
66{
67 struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
68
69 if (!mgr)
70 return ERR_PTR(-ENOMEM);
71
72 mgr->ops = &pool_ops;
73
74 return mgr;
75}
diff --git a/drivers/tee/optee/shm_pool.h b/drivers/tee/optee/shm_pool.h
new file mode 100644
index 000000000000..4e753c3bf7ec
--- /dev/null
+++ b/drivers/tee/optee/shm_pool.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (c) 2015, Linaro Limited
3 * Copyright (c) 2016, EPAM Systems
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 */
15
16#ifndef SHM_POOL_H
17#define SHM_POOL_H
18
19#include <linux/tee_drv.h>
20
21struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void);
22
23#endif
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
index 4d0ce606f0fc..6c4b200a4560 100644
--- a/drivers/tee/tee_core.c
+++ b/drivers/tee/tee_core.c
@@ -54,6 +54,7 @@ static int tee_open(struct inode *inode, struct file *filp)
54 goto err; 54 goto err;
55 } 55 }
56 56
57 kref_init(&ctx->refcount);
57 ctx->teedev = teedev; 58 ctx->teedev = teedev;
58 INIT_LIST_HEAD(&ctx->list_shm); 59 INIT_LIST_HEAD(&ctx->list_shm);
59 filp->private_data = ctx; 60 filp->private_data = ctx;
@@ -68,19 +69,40 @@ err:
68 return rc; 69 return rc;
69} 70}
70 71
71static int tee_release(struct inode *inode, struct file *filp) 72void teedev_ctx_get(struct tee_context *ctx)
72{ 73{
73 struct tee_context *ctx = filp->private_data; 74 if (ctx->releasing)
74 struct tee_device *teedev = ctx->teedev; 75 return;
75 struct tee_shm *shm;
76 76
77 kref_get(&ctx->refcount);
78}
79
80static void teedev_ctx_release(struct kref *ref)
81{
82 struct tee_context *ctx = container_of(ref, struct tee_context,
83 refcount);
84 ctx->releasing = true;
77 ctx->teedev->desc->ops->release(ctx); 85 ctx->teedev->desc->ops->release(ctx);
78 mutex_lock(&ctx->teedev->mutex);
79 list_for_each_entry(shm, &ctx->list_shm, link)
80 shm->ctx = NULL;
81 mutex_unlock(&ctx->teedev->mutex);
82 kfree(ctx); 86 kfree(ctx);
83 tee_device_put(teedev); 87}
88
89void teedev_ctx_put(struct tee_context *ctx)
90{
91 if (ctx->releasing)
92 return;
93
94 kref_put(&ctx->refcount, teedev_ctx_release);
95}
96
97static void teedev_close_context(struct tee_context *ctx)
98{
99 tee_device_put(ctx->teedev);
100 teedev_ctx_put(ctx);
101}
102
103static int tee_release(struct inode *inode, struct file *filp)
104{
105 teedev_close_context(filp->private_data);
84 return 0; 106 return 0;
85} 107}
86 108
@@ -114,8 +136,6 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
114 if (data.flags) 136 if (data.flags)
115 return -EINVAL; 137 return -EINVAL;
116 138
117 data.id = -1;
118
119 shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF); 139 shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
120 if (IS_ERR(shm)) 140 if (IS_ERR(shm))
121 return PTR_ERR(shm); 141 return PTR_ERR(shm);
@@ -138,6 +158,43 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
138 return ret; 158 return ret;
139} 159}
140 160
161static int
162tee_ioctl_shm_register(struct tee_context *ctx,
163 struct tee_ioctl_shm_register_data __user *udata)
164{
165 long ret;
166 struct tee_ioctl_shm_register_data data;
167 struct tee_shm *shm;
168
169 if (copy_from_user(&data, udata, sizeof(data)))
170 return -EFAULT;
171
172 /* Currently no input flags are supported */
173 if (data.flags)
174 return -EINVAL;
175
176 shm = tee_shm_register(ctx, data.addr, data.length,
177 TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
178 if (IS_ERR(shm))
179 return PTR_ERR(shm);
180
181 data.id = shm->id;
182 data.flags = shm->flags;
183 data.length = shm->size;
184
185 if (copy_to_user(udata, &data, sizeof(data)))
186 ret = -EFAULT;
187 else
188 ret = tee_shm_get_fd(shm);
189 /*
190 * When user space closes the file descriptor the shared memory
191 * should be freed or if tee_shm_get_fd() failed then it will
192 * be freed immediately.
193 */
194 tee_shm_put(shm);
195 return ret;
196}
197
141static int params_from_user(struct tee_context *ctx, struct tee_param *params, 198static int params_from_user(struct tee_context *ctx, struct tee_param *params,
142 size_t num_params, 199 size_t num_params,
143 struct tee_ioctl_param __user *uparams) 200 struct tee_ioctl_param __user *uparams)
@@ -578,6 +635,8 @@ static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
578 return tee_ioctl_version(ctx, uarg); 635 return tee_ioctl_version(ctx, uarg);
579 case TEE_IOC_SHM_ALLOC: 636 case TEE_IOC_SHM_ALLOC:
580 return tee_ioctl_shm_alloc(ctx, uarg); 637 return tee_ioctl_shm_alloc(ctx, uarg);
638 case TEE_IOC_SHM_REGISTER:
639 return tee_ioctl_shm_register(ctx, uarg);
581 case TEE_IOC_OPEN_SESSION: 640 case TEE_IOC_OPEN_SESSION:
582 return tee_ioctl_open_session(ctx, uarg); 641 return tee_ioctl_open_session(ctx, uarg);
583 case TEE_IOC_INVOKE: 642 case TEE_IOC_INVOKE:
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
index 21cb6be8bce9..85d99d621603 100644
--- a/drivers/tee/tee_private.h
+++ b/drivers/tee/tee_private.h
@@ -21,68 +21,15 @@
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/types.h> 22#include <linux/types.h>
23 23
24struct tee_device;
25
26/**
27 * struct tee_shm - shared memory object
28 * @teedev: device used to allocate the object
29 * @ctx: context using the object, if NULL the context is gone
30 * @link link element
31 * @paddr: physical address of the shared memory
32 * @kaddr: virtual address of the shared memory
33 * @size: size of shared memory
34 * @dmabuf: dmabuf used to for exporting to user space
35 * @flags: defined by TEE_SHM_* in tee_drv.h
36 * @id: unique id of a shared memory object on this device
37 */
38struct tee_shm {
39 struct tee_device *teedev;
40 struct tee_context *ctx;
41 struct list_head link;
42 phys_addr_t paddr;
43 void *kaddr;
44 size_t size;
45 struct dma_buf *dmabuf;
46 u32 flags;
47 int id;
48};
49
50struct tee_shm_pool_mgr;
51
52/**
53 * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
54 * @alloc: called when allocating shared memory
55 * @free: called when freeing shared memory
56 */
57struct tee_shm_pool_mgr_ops {
58 int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
59 size_t size);
60 void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
61};
62
63/**
64 * struct tee_shm_pool_mgr - shared memory manager
65 * @ops: operations
66 * @private_data: private data for the shared memory manager
67 */
68struct tee_shm_pool_mgr {
69 const struct tee_shm_pool_mgr_ops *ops;
70 void *private_data;
71};
72
73/** 24/**
74 * struct tee_shm_pool - shared memory pool 25 * struct tee_shm_pool - shared memory pool
75 * @private_mgr: pool manager for shared memory only between kernel 26 * @private_mgr: pool manager for shared memory only between kernel
76 * and secure world 27 * and secure world
77 * @dma_buf_mgr: pool manager for shared memory exported to user space 28 * @dma_buf_mgr: pool manager for shared memory exported to user space
78 * @destroy: called when destroying the pool
79 * @private_data: private data for the pool
80 */ 29 */
81struct tee_shm_pool { 30struct tee_shm_pool {
82 struct tee_shm_pool_mgr private_mgr; 31 struct tee_shm_pool_mgr *private_mgr;
83 struct tee_shm_pool_mgr dma_buf_mgr; 32 struct tee_shm_pool_mgr *dma_buf_mgr;
84 void (*destroy)(struct tee_shm_pool *pool);
85 void *private_data;
86}; 33};
87 34
88#define TEE_DEVICE_FLAG_REGISTERED 0x1 35#define TEE_DEVICE_FLAG_REGISTERED 0x1
@@ -126,4 +73,7 @@ int tee_shm_get_fd(struct tee_shm *shm);
126bool tee_device_get(struct tee_device *teedev); 73bool tee_device_get(struct tee_device *teedev);
127void tee_device_put(struct tee_device *teedev); 74void tee_device_put(struct tee_device *teedev);
128 75
76void teedev_ctx_get(struct tee_context *ctx);
77void teedev_ctx_put(struct tee_context *ctx);
78
129#endif /*TEE_PRIVATE_H*/ 79#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 4bc7956cefc4..04e1b8b37046 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -23,7 +23,6 @@
23static void tee_shm_release(struct tee_shm *shm) 23static void tee_shm_release(struct tee_shm *shm)
24{ 24{
25 struct tee_device *teedev = shm->teedev; 25 struct tee_device *teedev = shm->teedev;
26 struct tee_shm_pool_mgr *poolm;
27 26
28 mutex_lock(&teedev->mutex); 27 mutex_lock(&teedev->mutex);
29 idr_remove(&teedev->idr, shm->id); 28 idr_remove(&teedev->idr, shm->id);
@@ -31,12 +30,32 @@ static void tee_shm_release(struct tee_shm *shm)
31 list_del(&shm->link); 30 list_del(&shm->link);
32 mutex_unlock(&teedev->mutex); 31 mutex_unlock(&teedev->mutex);
33 32
34 if (shm->flags & TEE_SHM_DMA_BUF) 33 if (shm->flags & TEE_SHM_POOL) {
35 poolm = &teedev->pool->dma_buf_mgr; 34 struct tee_shm_pool_mgr *poolm;
36 else 35
37 poolm = &teedev->pool->private_mgr; 36 if (shm->flags & TEE_SHM_DMA_BUF)
37 poolm = teedev->pool->dma_buf_mgr;
38 else
39 poolm = teedev->pool->private_mgr;
40
41 poolm->ops->free(poolm, shm);
42 } else if (shm->flags & TEE_SHM_REGISTER) {
43 size_t n;
44 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
45
46 if (rc)
47 dev_err(teedev->dev.parent,
48 "unregister shm %p failed: %d", shm, rc);
49
50 for (n = 0; n < shm->num_pages; n++)
51 put_page(shm->pages[n]);
52
53 kfree(shm->pages);
54 }
55
56 if (shm->ctx)
57 teedev_ctx_put(shm->ctx);
38 58
39 poolm->ops->free(poolm, shm);
40 kfree(shm); 59 kfree(shm);
41 60
42 tee_device_put(teedev); 61 tee_device_put(teedev);
@@ -76,6 +95,10 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
76 struct tee_shm *shm = dmabuf->priv; 95 struct tee_shm *shm = dmabuf->priv;
77 size_t size = vma->vm_end - vma->vm_start; 96 size_t size = vma->vm_end - vma->vm_start;
78 97
98 /* Refuse sharing shared memory provided by application */
99 if (shm->flags & TEE_SHM_REGISTER)
100 return -EINVAL;
101
79 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 102 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
80 size, vma->vm_page_prot); 103 size, vma->vm_page_prot);
81} 104}
@@ -89,26 +112,20 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
89 .mmap = tee_shm_op_mmap, 112 .mmap = tee_shm_op_mmap,
90}; 113};
91 114
92/** 115struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
93 * tee_shm_alloc() - Allocate shared memory 116 struct tee_device *teedev,
94 * @ctx: Context that allocates the shared memory 117 size_t size, u32 flags)
95 * @size: Requested size of shared memory
96 * @flags: Flags setting properties for the requested shared memory.
97 *
98 * Memory allocated as global shared memory is automatically freed when the
99 * TEE file pointer is closed. The @flags field uses the bits defined by
100 * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
101 * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
102 * associated with a dma-buf handle, else driver private memory.
103 */
104struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
105{ 118{
106 struct tee_device *teedev = ctx->teedev;
107 struct tee_shm_pool_mgr *poolm = NULL; 119 struct tee_shm_pool_mgr *poolm = NULL;
108 struct tee_shm *shm; 120 struct tee_shm *shm;
109 void *ret; 121 void *ret;
110 int rc; 122 int rc;
111 123
124 if (ctx && ctx->teedev != teedev) {
125 dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
126 return ERR_PTR(-EINVAL);
127 }
128
112 if (!(flags & TEE_SHM_MAPPED)) { 129 if (!(flags & TEE_SHM_MAPPED)) {
113 dev_err(teedev->dev.parent, 130 dev_err(teedev->dev.parent,
114 "only mapped allocations supported\n"); 131 "only mapped allocations supported\n");
@@ -135,13 +152,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
135 goto err_dev_put; 152 goto err_dev_put;
136 } 153 }
137 154
138 shm->flags = flags; 155 shm->flags = flags | TEE_SHM_POOL;
139 shm->teedev = teedev; 156 shm->teedev = teedev;
140 shm->ctx = ctx; 157 shm->ctx = ctx;
141 if (flags & TEE_SHM_DMA_BUF) 158 if (flags & TEE_SHM_DMA_BUF)
142 poolm = &teedev->pool->dma_buf_mgr; 159 poolm = teedev->pool->dma_buf_mgr;
143 else 160 else
144 poolm = &teedev->pool->private_mgr; 161 poolm = teedev->pool->private_mgr;
145 162
146 rc = poolm->ops->alloc(poolm, shm, size); 163 rc = poolm->ops->alloc(poolm, shm, size);
147 if (rc) { 164 if (rc) {
@@ -171,9 +188,13 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
171 goto err_rem; 188 goto err_rem;
172 } 189 }
173 } 190 }
174 mutex_lock(&teedev->mutex); 191
175 list_add_tail(&shm->link, &ctx->list_shm); 192 if (ctx) {
176 mutex_unlock(&teedev->mutex); 193 teedev_ctx_get(ctx);
194 mutex_lock(&teedev->mutex);
195 list_add_tail(&shm->link, &ctx->list_shm);
196 mutex_unlock(&teedev->mutex);
197 }
177 198
178 return shm; 199 return shm;
179err_rem: 200err_rem:
@@ -188,8 +209,143 @@ err_dev_put:
188 tee_device_put(teedev); 209 tee_device_put(teedev);
189 return ret; 210 return ret;
190} 211}
212
213/**
214 * tee_shm_alloc() - Allocate shared memory
215 * @ctx: Context that allocates the shared memory
216 * @size: Requested size of shared memory
217 * @flags: Flags setting properties for the requested shared memory.
218 *
219 * Memory allocated as global shared memory is automatically freed when the
220 * TEE file pointer is closed. The @flags field uses the bits defined by
221 * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
222 * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
223 * associated with a dma-buf handle, else driver private memory.
224 */
225struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
226{
227 return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
228}
191EXPORT_SYMBOL_GPL(tee_shm_alloc); 229EXPORT_SYMBOL_GPL(tee_shm_alloc);
192 230
231struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
232{
233 return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
234}
235EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
236
237struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
238 size_t length, u32 flags)
239{
240 struct tee_device *teedev = ctx->teedev;
241 const u32 req_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
242 struct tee_shm *shm;
243 void *ret;
244 int rc;
245 int num_pages;
246 unsigned long start;
247
248 if (flags != req_flags)
249 return ERR_PTR(-ENOTSUPP);
250
251 if (!tee_device_get(teedev))
252 return ERR_PTR(-EINVAL);
253
254 if (!teedev->desc->ops->shm_register ||
255 !teedev->desc->ops->shm_unregister) {
256 tee_device_put(teedev);
257 return ERR_PTR(-ENOTSUPP);
258 }
259
260 teedev_ctx_get(ctx);
261
262 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
263 if (!shm) {
264 ret = ERR_PTR(-ENOMEM);
265 goto err;
266 }
267
268 shm->flags = flags | TEE_SHM_REGISTER;
269 shm->teedev = teedev;
270 shm->ctx = ctx;
271 shm->id = -1;
272 start = rounddown(addr, PAGE_SIZE);
273 shm->offset = addr - start;
274 shm->size = length;
275 num_pages = (roundup(addr + length, PAGE_SIZE) - start) / PAGE_SIZE;
276 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
277 if (!shm->pages) {
278 ret = ERR_PTR(-ENOMEM);
279 goto err;
280 }
281
282 rc = get_user_pages_fast(start, num_pages, 1, shm->pages);
283 if (rc > 0)
284 shm->num_pages = rc;
285 if (rc != num_pages) {
286 if (rc > 0)
287 rc = -ENOMEM;
288 ret = ERR_PTR(rc);
289 goto err;
290 }
291
292 mutex_lock(&teedev->mutex);
293 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
294 mutex_unlock(&teedev->mutex);
295
296 if (shm->id < 0) {
297 ret = ERR_PTR(shm->id);
298 goto err;
299 }
300
301 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
302 shm->num_pages);
303 if (rc) {
304 ret = ERR_PTR(rc);
305 goto err;
306 }
307
308 if (flags & TEE_SHM_DMA_BUF) {
309 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
310
311 exp_info.ops = &tee_shm_dma_buf_ops;
312 exp_info.size = shm->size;
313 exp_info.flags = O_RDWR;
314 exp_info.priv = shm;
315
316 shm->dmabuf = dma_buf_export(&exp_info);
317 if (IS_ERR(shm->dmabuf)) {
318 ret = ERR_CAST(shm->dmabuf);
319 teedev->desc->ops->shm_unregister(ctx, shm);
320 goto err;
321 }
322 }
323
324 mutex_lock(&teedev->mutex);
325 list_add_tail(&shm->link, &ctx->list_shm);
326 mutex_unlock(&teedev->mutex);
327
328 return shm;
329err:
330 if (shm) {
331 size_t n;
332
333 if (shm->id >= 0) {
334 mutex_lock(&teedev->mutex);
335 idr_remove(&teedev->idr, shm->id);
336 mutex_unlock(&teedev->mutex);
337 }
338 for (n = 0; n < shm->num_pages; n++)
339 put_page(shm->pages[n]);
340 kfree(shm->pages);
341 }
342 kfree(shm);
343 teedev_ctx_put(ctx);
344 tee_device_put(teedev);
345 return ret;
346}
347EXPORT_SYMBOL_GPL(tee_shm_register);
348
193/** 349/**
194 * tee_shm_get_fd() - Increase reference count and return file descriptor 350 * tee_shm_get_fd() - Increase reference count and return file descriptor
195 * @shm: Shared memory handle 351 * @shm: Shared memory handle
@@ -197,10 +353,9 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc);
197 */ 353 */
198int tee_shm_get_fd(struct tee_shm *shm) 354int tee_shm_get_fd(struct tee_shm *shm)
199{ 355{
200 u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
201 int fd; 356 int fd;
202 357
203 if ((shm->flags & req_flags) != req_flags) 358 if (!(shm->flags & TEE_SHM_DMA_BUF))
204 return -EINVAL; 359 return -EINVAL;
205 360
206 fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); 361 fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
@@ -238,6 +393,8 @@ EXPORT_SYMBOL_GPL(tee_shm_free);
238 */ 393 */
239int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) 394int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
240{ 395{
396 if (!(shm->flags & TEE_SHM_MAPPED))
397 return -EINVAL;
241 /* Check that we're in the range of the shm */ 398 /* Check that we're in the range of the shm */
242 if ((char *)va < (char *)shm->kaddr) 399 if ((char *)va < (char *)shm->kaddr)
243 return -EINVAL; 400 return -EINVAL;
@@ -258,6 +415,8 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa);
258 */ 415 */
259int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) 416int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
260{ 417{
418 if (!(shm->flags & TEE_SHM_MAPPED))
419 return -EINVAL;
261 /* Check that we're in the range of the shm */ 420 /* Check that we're in the range of the shm */
262 if (pa < shm->paddr) 421 if (pa < shm->paddr)
263 return -EINVAL; 422 return -EINVAL;
@@ -284,6 +443,8 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va);
284 */ 443 */
285void *tee_shm_get_va(struct tee_shm *shm, size_t offs) 444void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
286{ 445{
446 if (!(shm->flags & TEE_SHM_MAPPED))
447 return ERR_PTR(-EINVAL);
287 if (offs >= shm->size) 448 if (offs >= shm->size)
288 return ERR_PTR(-EINVAL); 449 return ERR_PTR(-EINVAL);
289 return (char *)shm->kaddr + offs; 450 return (char *)shm->kaddr + offs;
@@ -336,17 +497,6 @@ struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
336EXPORT_SYMBOL_GPL(tee_shm_get_from_id); 497EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
337 498
338/** 499/**
339 * tee_shm_get_id() - Get id of a shared memory object
340 * @shm: Shared memory handle
341 * @returns id
342 */
343int tee_shm_get_id(struct tee_shm *shm)
344{
345 return shm->id;
346}
347EXPORT_SYMBOL_GPL(tee_shm_get_id);
348
349/**
350 * tee_shm_put() - Decrease reference count on a shared memory handle 500 * tee_shm_put() - Decrease reference count on a shared memory handle
351 * @shm: Shared memory handle 501 * @shm: Shared memory handle
352 */ 502 */
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
index fb4f8522a526..e6d4b9e4a864 100644
--- a/drivers/tee/tee_shm_pool.c
+++ b/drivers/tee/tee_shm_pool.c
@@ -44,49 +44,18 @@ static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
44 shm->kaddr = NULL; 44 shm->kaddr = NULL;
45} 45}
46 46
47static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
48{
49 gen_pool_destroy(poolm->private_data);
50 kfree(poolm);
51}
52
47static const struct tee_shm_pool_mgr_ops pool_ops_generic = { 53static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
48 .alloc = pool_op_gen_alloc, 54 .alloc = pool_op_gen_alloc,
49 .free = pool_op_gen_free, 55 .free = pool_op_gen_free,
56 .destroy_poolmgr = pool_op_gen_destroy_poolmgr,
50}; 57};
51 58
52static void pool_res_mem_destroy(struct tee_shm_pool *pool)
53{
54 gen_pool_destroy(pool->private_mgr.private_data);
55 gen_pool_destroy(pool->dma_buf_mgr.private_data);
56}
57
58static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
59 struct tee_shm_pool_mem_info *info,
60 int min_alloc_order)
61{
62 size_t page_mask = PAGE_SIZE - 1;
63 struct gen_pool *genpool = NULL;
64 int rc;
65
66 /*
67 * Start and end must be page aligned
68 */
69 if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
70 (info->size & page_mask))
71 return -EINVAL;
72
73 genpool = gen_pool_create(min_alloc_order, -1);
74 if (!genpool)
75 return -ENOMEM;
76
77 gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
78 rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
79 -1);
80 if (rc) {
81 gen_pool_destroy(genpool);
82 return rc;
83 }
84
85 mgr->private_data = genpool;
86 mgr->ops = &pool_ops_generic;
87 return 0;
88}
89
90/** 59/**
91 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved 60 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
92 * memory range 61 * memory range
@@ -104,42 +73,109 @@ struct tee_shm_pool *
104tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, 73tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
105 struct tee_shm_pool_mem_info *dmabuf_info) 74 struct tee_shm_pool_mem_info *dmabuf_info)
106{ 75{
107 struct tee_shm_pool *pool = NULL; 76 struct tee_shm_pool_mgr *priv_mgr;
108 int ret; 77 struct tee_shm_pool_mgr *dmabuf_mgr;
109 78 void *rc;
110 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
111 if (!pool) {
112 ret = -ENOMEM;
113 goto err;
114 }
115 79
116 /* 80 /*
117 * Create the pool for driver private shared memory 81 * Create the pool for driver private shared memory
118 */ 82 */
119 ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info, 83 rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
120 3 /* 8 byte aligned */); 84 priv_info->size,
121 if (ret) 85 3 /* 8 byte aligned */);
122 goto err; 86 if (IS_ERR(rc))
87 return rc;
88 priv_mgr = rc;
123 89
124 /* 90 /*
125 * Create the pool for dma_buf shared memory 91 * Create the pool for dma_buf shared memory
126 */ 92 */
127 ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info, 93 rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
128 PAGE_SHIFT); 94 dmabuf_info->paddr,
129 if (ret) 95 dmabuf_info->size, PAGE_SHIFT);
96 if (IS_ERR(rc))
97 goto err_free_priv_mgr;
98 dmabuf_mgr = rc;
99
100 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
101 if (IS_ERR(rc))
102 goto err_free_dmabuf_mgr;
103
104 return rc;
105
106err_free_dmabuf_mgr:
107 tee_shm_pool_mgr_destroy(dmabuf_mgr);
108err_free_priv_mgr:
109 tee_shm_pool_mgr_destroy(priv_mgr);
110
111 return rc;
112}
113EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
114
115struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
116 phys_addr_t paddr,
117 size_t size,
118 int min_alloc_order)
119{
120 const size_t page_mask = PAGE_SIZE - 1;
121 struct tee_shm_pool_mgr *mgr;
122 int rc;
123
124 /* Start and end must be page aligned */
125 if (vaddr & page_mask || paddr & page_mask || size & page_mask)
126 return ERR_PTR(-EINVAL);
127
128 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
129 if (!mgr)
130 return ERR_PTR(-ENOMEM);
131
132 mgr->private_data = gen_pool_create(min_alloc_order, -1);
133 if (!mgr->private_data) {
134 rc = -ENOMEM;
130 goto err; 135 goto err;
136 }
131 137
132 pool->destroy = pool_res_mem_destroy; 138 gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
133 return pool; 139 rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
140 if (rc) {
141 gen_pool_destroy(mgr->private_data);
142 goto err;
143 }
144
145 mgr->ops = &pool_ops_generic;
146
147 return mgr;
134err: 148err:
135 if (ret == -ENOMEM) 149 kfree(mgr);
136 pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__); 150
137 if (pool && pool->private_mgr.private_data) 151 return ERR_PTR(rc);
138 gen_pool_destroy(pool->private_mgr.private_data);
139 kfree(pool);
140 return ERR_PTR(ret);
141} 152}
142EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); 153EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
154
155static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
156{
157 return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
158 mgr->ops->destroy_poolmgr;
159}
160
161struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
162 struct tee_shm_pool_mgr *dmabuf_mgr)
163{
164 struct tee_shm_pool *pool;
165
166 if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
167 return ERR_PTR(-EINVAL);
168
169 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
170 if (!pool)
171 return ERR_PTR(-ENOMEM);
172
173 pool->private_mgr = priv_mgr;
174 pool->dma_buf_mgr = dmabuf_mgr;
175
176 return pool;
177}
178EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
143 179
144/** 180/**
145 * tee_shm_pool_free() - Free a shared memory pool 181 * tee_shm_pool_free() - Free a shared memory pool
@@ -150,7 +186,10 @@ EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
150 */ 186 */
151void tee_shm_pool_free(struct tee_shm_pool *pool) 187void tee_shm_pool_free(struct tee_shm_pool *pool)
152{ 188{
153 pool->destroy(pool); 189 if (pool->private_mgr)
190 tee_shm_pool_mgr_destroy(pool->private_mgr);
191 if (pool->dma_buf_mgr)
192 tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
154 kfree(pool); 193 kfree(pool);
155} 194}
156EXPORT_SYMBOL_GPL(tee_shm_pool_free); 195EXPORT_SYMBOL_GPL(tee_shm_pool_free);
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
index f4a0ac05ebb4..41bd4bded28c 100644
--- a/include/linux/tee_drv.h
+++ b/include/linux/tee_drv.h
@@ -17,6 +17,7 @@
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19#include <linux/idr.h> 19#include <linux/idr.h>
20#include <linux/kref.h>
20#include <linux/list.h> 21#include <linux/list.h>
21#include <linux/tee.h> 22#include <linux/tee.h>
22 23
@@ -25,8 +26,12 @@
25 * specific TEE driver. 26 * specific TEE driver.
26 */ 27 */
27 28
28#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */ 29#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */
29#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */ 30#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */
31#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */
32#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
33#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
34#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
30 35
31struct device; 36struct device;
32struct tee_device; 37struct tee_device;
@@ -38,11 +43,17 @@ struct tee_shm_pool;
38 * @teedev: pointer to this drivers struct tee_device 43 * @teedev: pointer to this drivers struct tee_device
39 * @list_shm: List of shared memory object owned by this context 44 * @list_shm: List of shared memory object owned by this context
40 * @data: driver specific context data, managed by the driver 45 * @data: driver specific context data, managed by the driver
46 * @refcount: reference counter for this structure
47 * @releasing: flag that indicates if context is being released right now.
48 * It is needed to break circular dependency on context during
49 * shared memory release.
41 */ 50 */
42struct tee_context { 51struct tee_context {
43 struct tee_device *teedev; 52 struct tee_device *teedev;
44 struct list_head list_shm; 53 struct list_head list_shm;
45 void *data; 54 void *data;
55 struct kref refcount;
56 bool releasing;
46}; 57};
47 58
48struct tee_param_memref { 59struct tee_param_memref {
@@ -76,6 +87,8 @@ struct tee_param {
76 * @cancel_req: request cancel of an ongoing invoke or open 87 * @cancel_req: request cancel of an ongoing invoke or open
77 * @supp_revc: called for supplicant to get a command 88 * @supp_revc: called for supplicant to get a command
78 * @supp_send: called for supplicant to send a response 89 * @supp_send: called for supplicant to send a response
90 * @shm_register: register shared memory buffer in TEE
91 * @shm_unregister: unregister shared memory buffer in TEE
79 */ 92 */
80struct tee_driver_ops { 93struct tee_driver_ops {
81 void (*get_version)(struct tee_device *teedev, 94 void (*get_version)(struct tee_device *teedev,
@@ -94,6 +107,9 @@ struct tee_driver_ops {
94 struct tee_param *param); 107 struct tee_param *param);
95 int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, 108 int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
96 struct tee_param *param); 109 struct tee_param *param);
110 int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm,
111 struct page **pages, size_t num_pages);
112 int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm);
97}; 113};
98 114
99/** 115/**
@@ -150,6 +166,97 @@ int tee_device_register(struct tee_device *teedev);
150void tee_device_unregister(struct tee_device *teedev); 166void tee_device_unregister(struct tee_device *teedev);
151 167
152/** 168/**
169 * struct tee_shm - shared memory object
170 * @teedev: device used to allocate the object
171 * @ctx: context using the object, if NULL the context is gone
172 * @link link element
173 * @paddr: physical address of the shared memory
174 * @kaddr: virtual address of the shared memory
175 * @size: size of shared memory
176 * @offset: offset of buffer in user space
177 * @pages: locked pages from userspace
178 * @num_pages: number of locked pages
179 * @dmabuf: dmabuf used to for exporting to user space
180 * @flags: defined by TEE_SHM_* in tee_drv.h
181 * @id: unique id of a shared memory object on this device
182 *
183 * This pool is only supposed to be accessed directly from the TEE
184 * subsystem and from drivers that implements their own shm pool manager.
185 */
186struct tee_shm {
187 struct tee_device *teedev;
188 struct tee_context *ctx;
189 struct list_head link;
190 phys_addr_t paddr;
191 void *kaddr;
192 size_t size;
193 unsigned int offset;
194 struct page **pages;
195 size_t num_pages;
196 struct dma_buf *dmabuf;
197 u32 flags;
198 int id;
199};
200
201/**
202 * struct tee_shm_pool_mgr - shared memory manager
203 * @ops: operations
204 * @private_data: private data for the shared memory manager
205 */
206struct tee_shm_pool_mgr {
207 const struct tee_shm_pool_mgr_ops *ops;
208 void *private_data;
209};
210
211/**
212 * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
213 * @alloc: called when allocating shared memory
214 * @free: called when freeing shared memory
215 * @destroy_poolmgr: called when destroying the pool manager
216 */
217struct tee_shm_pool_mgr_ops {
218 int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
219 size_t size);
220 void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
221 void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr);
222};
223
224/**
225 * tee_shm_pool_alloc() - Create a shared memory pool from shm managers
226 * @priv_mgr: manager for driver private shared memory allocations
227 * @dmabuf_mgr: manager for dma-buf shared memory allocations
228 *
229 * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
230 * in @dmabuf, others will use the range provided by @priv.
231 *
232 * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
233 */
234struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
235 struct tee_shm_pool_mgr *dmabuf_mgr);
236
237/*
238 * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved
239 * memory
240 * @vaddr: Virtual address of start of pool
241 * @paddr: Physical address of start of pool
242 * @size: Size in bytes of the pool
243 *
244 * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure.
245 */
246struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
247 phys_addr_t paddr,
248 size_t size,
249 int min_alloc_order);
250
251/**
252 * tee_shm_pool_mgr_destroy() - Free a shared memory manager
253 */
254static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm)
255{
256 poolm->ops->destroy_poolmgr(poolm);
257}
258
259/**
153 * struct tee_shm_pool_mem_info - holds information needed to create a shared 260 * struct tee_shm_pool_mem_info - holds information needed to create a shared
154 * memory pool 261 * memory pool
155 * @vaddr: Virtual address of start of pool 262 * @vaddr: Virtual address of start of pool
@@ -211,6 +318,40 @@ void *tee_get_drvdata(struct tee_device *teedev);
211struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); 318struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
212 319
213/** 320/**
321 * tee_shm_priv_alloc() - Allocate shared memory privately
322 * @dev: Device that allocates the shared memory
323 * @size: Requested size of shared memory
324 *
325 * Allocates shared memory buffer that is not associated with any client
326 * context. Such buffers are owned by TEE driver and used for internal calls.
327 *
328 * @returns a pointer to 'struct tee_shm'
329 */
330struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
331
332/**
333 * tee_shm_register() - Register shared memory buffer
334 * @ctx: Context that registers the shared memory
335 * @addr: Address is userspace of the shared buffer
336 * @length: Length of the shared buffer
337 * @flags: Flags setting properties for the requested shared memory.
338 *
339 * @returns a pointer to 'struct tee_shm'
340 */
341struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
342 size_t length, u32 flags);
343
344/**
345 * tee_shm_is_registered() - Check if shared memory object in registered in TEE
346 * @shm: Shared memory handle
347 * @returns true if object is registered in TEE
348 */
349static inline bool tee_shm_is_registered(struct tee_shm *shm)
350{
351 return shm && (shm->flags & TEE_SHM_REGISTER);
352}
353
354/**
214 * tee_shm_free() - Free shared memory 355 * tee_shm_free() - Free shared memory
215 * @shm: Handle to shared memory to free 356 * @shm: Handle to shared memory to free
216 */ 357 */
@@ -260,11 +401,47 @@ void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
260int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); 401int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
261 402
262/** 403/**
404 * tee_shm_get_size() - Get size of shared memory buffer
405 * @shm: Shared memory handle
406 * @returns size of shared memory
407 */
408static inline size_t tee_shm_get_size(struct tee_shm *shm)
409{
410 return shm->size;
411}
412
413/**
414 * tee_shm_get_pages() - Get list of pages that hold shared buffer
415 * @shm: Shared memory handle
416 * @num_pages: Number of pages will be stored there
417 * @returns pointer to pages array
418 */
419static inline struct page **tee_shm_get_pages(struct tee_shm *shm,
420 size_t *num_pages)
421{
422 *num_pages = shm->num_pages;
423 return shm->pages;
424}
425
426/**
427 * tee_shm_get_page_offset() - Get shared buffer offset from page start
428 * @shm: Shared memory handle
429 * @returns page offset of shared buffer
430 */
431static inline size_t tee_shm_get_page_offset(struct tee_shm *shm)
432{
433 return shm->offset;
434}
435
436/**
263 * tee_shm_get_id() - Get id of a shared memory object 437 * tee_shm_get_id() - Get id of a shared memory object
264 * @shm: Shared memory handle 438 * @shm: Shared memory handle
265 * @returns id 439 * @returns id
266 */ 440 */
267int tee_shm_get_id(struct tee_shm *shm); 441static inline int tee_shm_get_id(struct tee_shm *shm)
442{
443 return shm->id;
444}
268 445
269/** 446/**
270 * tee_shm_get_from_id() - Find shared memory object and increase reference 447 * tee_shm_get_from_id() - Find shared memory object and increase reference
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 267c12e7fd79..4b9eb064d7e7 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -50,6 +50,7 @@
50 50
51#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */ 51#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
52#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */ 52#define TEE_GEN_CAP_PRIVILEGED (1 << 1)/* Privileged device (for supplicant) */
53#define TEE_GEN_CAP_REG_MEM (1 << 2)/* Supports registering shared memory */
53 54
54/* 55/*
55 * TEE Implementation ID 56 * TEE Implementation ID
@@ -339,6 +340,35 @@ struct tee_iocl_supp_send_arg {
339#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \ 340#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
340 struct tee_ioctl_buf_data) 341 struct tee_ioctl_buf_data)
341 342
343/**
344 * struct tee_ioctl_shm_register_data - Shared memory register argument
345 * @addr: [in] Start address of shared memory to register
346 * @length: [in/out] Length of shared memory to register
347 * @flags: [in/out] Flags to/from registration.
348 * @id: [out] Identifier of the shared memory
349 *
350 * The flags field should currently be zero as input. Updated by the call
351 * with actual flags as defined by TEE_IOCTL_SHM_* above.
352 * This structure is used as argument for TEE_IOC_SHM_REGISTER below.
353 */
354struct tee_ioctl_shm_register_data {
355 __u64 addr;
356 __u64 length;
357 __u32 flags;
358 __s32 id;
359};
360
361/**
362 * TEE_IOC_SHM_REGISTER - Register shared memory argument
363 *
364 * Registers shared memory between the user space process and secure OS.
365 *
366 * Returns a file descriptor on success or < 0 on failure
367 *
368 * The shared memory is unregisterred when the descriptor is closed.
369 */
370#define TEE_IOC_SHM_REGISTER _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 9, \
371 struct tee_ioctl_shm_register_data)
342/* 372/*
343 * Five syscalls are used when communicating with the TEE driver. 373 * Five syscalls are used when communicating with the TEE driver.
344 * open(): opens the device associated with the driver 374 * open(): opens the device associated with the driver