aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-10 14:20:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-10 14:20:09 -0400
commita2d9214c730f54ff72c2940bcd7f22d1fccb26ec (patch)
treea50a1187ebba2c345213f27b4372939a3f39987b
parentde4d195308ad589626571dbe5789cebf9695a204 (diff)
parent414d06ace9cca3725b6c2072e1951e1e03807f63 (diff)
Merge tag 'armsoc-tee' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull TEE driver infrastructure and OP-TEE drivers from Arnd Bergmann: "This introduces a generic TEE framework in the kernel, to handle trusted environemtns (security coprocessor or software implementations such as OP-TEE/TrustZone). I'm sending it separately from the other arm-soc driver changes to give it a little more visibility, once the subsystem is merged, we will likely keep this in the armâ‚‹soc drivers branch or have the maintainers submit pull requests directly, depending on the patch volume. I have reviewed earlier versions in the past, and have reviewed the latest version in person during Linaro Connect BUD17. Here is my overall assessment of the subsystem: - There is clearly demand for this, both for the generic infrastructure and the specific OP-TEE implementation. - The code has gone through a large number of reviews, and the review comments have all been addressed, but the reviews were not coming up with serious issues any more and nobody volunteered to vouch for the quality. - The user space ioctl interface is sufficient to work with the OP-TEE driver, and it should in principle work with other TEE implementations that follow the GlobalPlatform[1] standards, but it might need to be extended in minor ways depending on specific requirements of future TEE implementations - The main downside of the API to me is how the user space is tied to the TEE implementation in hardware or firmware, but uses a generic way to communicate with it. This seems to be an inherent problem with what it is trying to do, and I could not come up with any better solution than what is implemented here. For a detailed history of the patch series, see https://lkml.org/lkml/2017/3/10/1277" * tag 'armsoc-tee' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: arm64: dt: hikey: Add optee node Documentation: tee subsystem and op-tee driver tee: add OP-TEE driver tee: generic TEE subsystem dt/bindings: add bindings for optee
-rw-r--r--Documentation/00-INDEX2
-rw-r--r--Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt31
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/ioctl/ioctl-number.txt1
-rw-r--r--Documentation/tee.txt118
-rw-r--r--MAINTAINERS13
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts7
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/tee/Kconfig18
-rw-r--r--drivers/tee/Makefile5
-rw-r--r--drivers/tee/optee/Kconfig7
-rw-r--r--drivers/tee/optee/Makefile5
-rw-r--r--drivers/tee/optee/call.c444
-rw-r--r--drivers/tee/optee/core.c622
-rw-r--r--drivers/tee/optee/optee_msg.h418
-rw-r--r--drivers/tee/optee/optee_private.h183
-rw-r--r--drivers/tee/optee/optee_smc.h450
-rw-r--r--drivers/tee/optee/rpc.c396
-rw-r--r--drivers/tee/optee/supp.c273
-rw-r--r--drivers/tee/tee_core.c893
-rw-r--r--drivers/tee/tee_private.h129
-rw-r--r--drivers/tee/tee_shm.c358
-rw-r--r--drivers/tee/tee_shm_pool.c156
-rw-r--r--include/linux/tee_drv.h277
-rw-r--r--include/uapi/linux/tee.h346
26 files changed, 5156 insertions, 0 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 793acf999e9e..ed3e5e949fce 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -412,6 +412,8 @@ sysctl/
412 - directory with info on the /proc/sys/* files. 412 - directory with info on the /proc/sys/* files.
413target/ 413target/
414 - directory with info on generating TCM v4 fabric .ko modules 414 - directory with info on generating TCM v4 fabric .ko modules
415tee.txt
416 - info on the TEE subsystem and drivers
415this_cpu_ops.txt 417this_cpu_ops.txt
416 - List rationale behind and the way to use this_cpu operations. 418 - List rationale behind and the way to use this_cpu operations.
417thermal/ 419thermal/
diff --git a/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt
new file mode 100644
index 000000000000..d38834c67dff
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/firmware/linaro,optee-tz.txt
@@ -0,0 +1,31 @@
1OP-TEE Device Tree Bindings
2
3OP-TEE is a piece of software using hardware features to provide a Trusted
4Execution Environment. The security can be provided with ARM TrustZone, but
5also by virtualization or a separate chip.
6
7We're using "linaro" as the first part of the compatible property for
8the reference implementation maintained by Linaro.
9
10* OP-TEE based on ARM TrustZone required properties:
11
12- compatible : should contain "linaro,optee-tz"
13
14- method : The method of calling the OP-TEE Trusted OS. Permitted
15 values are:
16
17 "smc" : SMC #0, with the register assignments specified
18 in drivers/tee/optee/optee_smc.h
19
20 "hvc" : HVC #0, with the register assignments specified
21 in drivers/tee/optee/optee_smc.h
22
23
24
25Example:
26 firmware {
27 optee {
28 compatible = "linaro,optee-tz";
29 method = "smc";
30 };
31 };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index f9fe94535b46..12e27844bb7b 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -173,6 +173,7 @@ lego LEGO Systems A/S
173lenovo Lenovo Group Ltd. 173lenovo Lenovo Group Ltd.
174lg LG Corporation 174lg LG Corporation
175licheepi Lichee Pi 175licheepi Lichee Pi
176linaro Linaro Limited
176linux Linux-specific binding 177linux Linux-specific binding
177lltc Linear Technology Corporation 178lltc Linear Technology Corporation
178lsi LSI Corp. (LSI Logic) 179lsi LSI Corp. (LSI Logic)
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index eccb675a2852..1e9fcb4d0ec8 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -309,6 +309,7 @@ Code Seq#(hex) Include File Comments
3090xA3 80-8F Port ACL in development: 3090xA3 80-8F Port ACL in development:
310 <mailto:tlewis@mindspring.com> 310 <mailto:tlewis@mindspring.com>
3110xA3 90-9F linux/dtlk.h 3110xA3 90-9F linux/dtlk.h
3120xA4 00-1F uapi/linux/tee.h Generic TEE subsystem
3120xAA 00-3F linux/uapi/linux/userfaultfd.h 3130xAA 00-3F linux/uapi/linux/userfaultfd.h
3130xAB 00-1F linux/nbd.h 3140xAB 00-1F linux/nbd.h
3140xAC 00-1F linux/raw.h 3150xAC 00-1F linux/raw.h
diff --git a/Documentation/tee.txt b/Documentation/tee.txt
new file mode 100644
index 000000000000..718599357596
--- /dev/null
+++ b/Documentation/tee.txt
@@ -0,0 +1,118 @@
1TEE subsystem
2This document describes the TEE subsystem in Linux.
3
4A TEE (Trusted Execution Environment) is a trusted OS running in some
5secure environment, for example, TrustZone on ARM CPUs, or a separate
6secure co-processor etc. A TEE driver handles the details needed to
7communicate with the TEE.
8
9This subsystem deals with:
10
11- Registration of TEE drivers
12
13- Managing shared memory between Linux and the TEE
14
15- Providing a generic API to the TEE
16
17The TEE interface
18=================
19
20include/uapi/linux/tee.h defines the generic interface to a TEE.
21
22User space (the client) connects to the driver by opening /dev/tee[0-9]* or
23/dev/teepriv[0-9]*.
24
25- TEE_IOC_SHM_ALLOC allocates shared memory and returns a file descriptor
26 which user space can mmap. When user space doesn't need the file
27 descriptor any more, it should be closed. When shared memory isn't needed
28 any longer it should be unmapped with munmap() to allow the reuse of
29 memory.
30
31- TEE_IOC_VERSION lets user space know which TEE this driver handles and
32 the its capabilities.
33
34- TEE_IOC_OPEN_SESSION opens a new session to a Trusted Application.
35
36- TEE_IOC_INVOKE invokes a function in a Trusted Application.
37
38- TEE_IOC_CANCEL may cancel an ongoing TEE_IOC_OPEN_SESSION or TEE_IOC_INVOKE.
39
40- TEE_IOC_CLOSE_SESSION closes a session to a Trusted Application.
41
42There are two classes of clients, normal clients and supplicants. The latter is
43a helper process for the TEE to access resources in Linux, for example file
44system access. A normal client opens /dev/tee[0-9]* and a supplicant opens
45/dev/teepriv[0-9].
46
47Much of the communication between clients and the TEE is opaque to the
48driver. The main job for the driver is to receive requests from the
49clients, forward them to the TEE and send back the results. In the case of
50supplicants the communication goes in the other direction, the TEE sends
51requests to the supplicant which then sends back the result.
52
53OP-TEE driver
54=============
55
56The OP-TEE driver handles OP-TEE [1] based TEEs. Currently it is only the ARM
57TrustZone based OP-TEE solution that is supported.
58
59Lowest level of communication with OP-TEE builds on ARM SMC Calling
60Convention (SMCCC) [2], which is the foundation for OP-TEE's SMC interface
61[3] used internally by the driver. Stacked on top of that is OP-TEE Message
62Protocol [4].
63
64OP-TEE SMC interface provides the basic functions required by SMCCC and some
65additional functions specific for OP-TEE. The most interesting functions are:
66
67- OPTEE_SMC_FUNCID_CALLS_UID (part of SMCCC) returns the version information
68 which is then returned by TEE_IOC_VERSION
69
70- OPTEE_SMC_CALL_GET_OS_UUID returns the particular OP-TEE implementation, used
71 to tell, for instance, a TrustZone OP-TEE apart from an OP-TEE running on a
72 separate secure co-processor.
73
74- OPTEE_SMC_CALL_WITH_ARG drives the OP-TEE message protocol
75
76- OPTEE_SMC_GET_SHM_CONFIG lets the driver and OP-TEE agree on which memory
77 range to used for shared memory between Linux and OP-TEE.
78
79The GlobalPlatform TEE Client API [5] is implemented on top of the generic
80TEE API.
81
82Picture of the relationship between the different components in the
83OP-TEE architecture.
84
85 User space Kernel Secure world
86 ~~~~~~~~~~ ~~~~~~ ~~~~~~~~~~~~
87 +--------+ +-------------+
88 | Client | | Trusted |
89 +--------+ | Application |
90 /\ +-------------+
91 || +----------+ /\
92 || |tee- | ||
93 || |supplicant| \/
94 || +----------+ +-------------+
95 \/ /\ | TEE Internal|
96 +-------+ || | API |
97 + TEE | || +--------+--------+ +-------------+
98 | Client| || | TEE | OP-TEE | | OP-TEE |
99 | API | \/ | subsys | driver | | Trusted OS |
100 +-------+----------------+----+-------+----+-----------+-------------+
101 | Generic TEE API | | OP-TEE MSG |
102 | IOCTL (TEE_IOC_*) | | SMCCC (OPTEE_SMC_CALL_*) |
103 +-----------------------------+ +------------------------------+
104
105RPC (Remote Procedure Call) are requests from secure world to kernel driver
106or tee-supplicant. An RPC is identified by a special range of SMCCC return
107values from OPTEE_SMC_CALL_WITH_ARG. RPC messages which are intended for the
108kernel are handled by the kernel driver. Other RPC messages will be forwarded to
109tee-supplicant without further involvement of the driver, except switching
110shared memory buffer representation.
111
112References:
113[1] https://github.com/OP-TEE/optee_os
114[2] http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
115[3] drivers/tee/optee/optee_smc.h
116[4] drivers/tee/optee/optee_msg.h
117[5] http://www.globalplatform.org/specificationsdevice.asp look for
118 "TEE Client API Specification v1.0" and click download.
diff --git a/MAINTAINERS b/MAINTAINERS
index 0cfa573ecfef..2decf40d28e1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9518,6 +9518,11 @@ F: arch/*/oprofile/
9518F: drivers/oprofile/ 9518F: drivers/oprofile/
9519F: include/linux/oprofile.h 9519F: include/linux/oprofile.h
9520 9520
9521OP-TEE DRIVER
9522M: Jens Wiklander <jens.wiklander@linaro.org>
9523S: Maintained
9524F: drivers/tee/optee/
9525
9521ORACLE CLUSTER FILESYSTEM 2 (OCFS2) 9526ORACLE CLUSTER FILESYSTEM 2 (OCFS2)
9522M: Mark Fasheh <mfasheh@versity.com> 9527M: Mark Fasheh <mfasheh@versity.com>
9523M: Joel Becker <jlbec@evilplan.org> 9528M: Joel Becker <jlbec@evilplan.org>
@@ -11299,6 +11304,14 @@ F: drivers/hwtracing/stm/
11299F: include/linux/stm.h 11304F: include/linux/stm.h
11300F: include/uapi/linux/stm.h 11305F: include/uapi/linux/stm.h
11301 11306
11307TEE SUBSYSTEM
11308M: Jens Wiklander <jens.wiklander@linaro.org>
11309S: Maintained
11310F: include/linux/tee_drv.h
11311F: include/uapi/linux/tee.h
11312F: drivers/tee/
11313F: Documentation/tee.txt
11314
11302THUNDERBOLT DRIVER 11315THUNDERBOLT DRIVER
11303M: Andreas Noever <andreas.noever@gmail.com> 11316M: Andreas Noever <andreas.noever@gmail.com>
11304S: Maintained 11317S: Maintained
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index 9b4ba7169210..75bce2d0b1a8 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -411,6 +411,13 @@
411 }; 411 };
412 }; 412 };
413 }; 413 };
414
415 firmware {
416 optee {
417 compatible = "linaro,optee-tz";
418 method = "smc";
419 };
420 };
414}; 421};
415 422
416&uart2 { 423&uart2 {
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 117ca14ccf85..ba2901e76769 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -204,4 +204,6 @@ source "drivers/fpga/Kconfig"
204 204
205source "drivers/fsi/Kconfig" 205source "drivers/fsi/Kconfig"
206 206
207source "drivers/tee/Kconfig"
208
207endmenu 209endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index edba1edc6654..cfabd141dba2 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -180,3 +180,4 @@ obj-$(CONFIG_ANDROID) += android/
180obj-$(CONFIG_NVMEM) += nvmem/ 180obj-$(CONFIG_NVMEM) += nvmem/
181obj-$(CONFIG_FPGA) += fpga/ 181obj-$(CONFIG_FPGA) += fpga/
182obj-$(CONFIG_FSI) += fsi/ 182obj-$(CONFIG_FSI) += fsi/
183obj-$(CONFIG_TEE) += tee/
diff --git a/drivers/tee/Kconfig b/drivers/tee/Kconfig
new file mode 100644
index 000000000000..2330a4eb4e8b
--- /dev/null
+++ b/drivers/tee/Kconfig
@@ -0,0 +1,18 @@
1# Generic Trusted Execution Environment Configuration
2config TEE
3 tristate "Trusted Execution Environment support"
4 select DMA_SHARED_BUFFER
5 select GENERIC_ALLOCATOR
6 help
7 This implements a generic interface towards a Trusted Execution
8 Environment (TEE).
9
10if TEE
11
12menu "TEE drivers"
13
14source "drivers/tee/optee/Kconfig"
15
16endmenu
17
18endif
diff --git a/drivers/tee/Makefile b/drivers/tee/Makefile
new file mode 100644
index 000000000000..7a4e4a1ac39c
--- /dev/null
+++ b/drivers/tee/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_TEE) += tee.o
2tee-objs += tee_core.o
3tee-objs += tee_shm.o
4tee-objs += tee_shm_pool.o
5obj-$(CONFIG_OPTEE) += optee/
diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig
new file mode 100644
index 000000000000..0126de898036
--- /dev/null
+++ b/drivers/tee/optee/Kconfig
@@ -0,0 +1,7 @@
1# OP-TEE Trusted Execution Environment Configuration
2config OPTEE
3 tristate "OP-TEE"
4 depends on HAVE_ARM_SMCCC
5 help
6 This implements the OP-TEE Trusted Execution Environment (TEE)
7 driver.
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
new file mode 100644
index 000000000000..92fe5789bcce
--- /dev/null
+++ b/drivers/tee/optee/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_OPTEE) += optee.o
2optee-objs += core.o
3optee-objs += call.o
4optee-objs += rpc.o
5optee-objs += supp.o
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
new file mode 100644
index 000000000000..f7b7b404c990
--- /dev/null
+++ b/drivers/tee/optee/call.c
@@ -0,0 +1,444 @@
1/*
2 * Copyright (c) 2015, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/arm-smccc.h>
15#include <linux/device.h>
16#include <linux/err.h>
17#include <linux/errno.h>
18#include <linux/slab.h>
19#include <linux/tee_drv.h>
20#include <linux/types.h>
21#include <linux/uaccess.h>
22#include "optee_private.h"
23#include "optee_smc.h"
24
25struct optee_call_waiter {
26 struct list_head list_node;
27 struct completion c;
28};
29
30static void optee_cq_wait_init(struct optee_call_queue *cq,
31 struct optee_call_waiter *w)
32{
33 /*
34 * We're preparing to make a call to secure world. In case we can't
35 * allocate a thread in secure world we'll end up waiting in
36 * optee_cq_wait_for_completion().
37 *
38 * Normally if there's no contention in secure world the call will
39 * complete and we can cleanup directly with optee_cq_wait_final().
40 */
41 mutex_lock(&cq->mutex);
42
43 /*
44 * We add ourselves to the queue, but we don't wait. This
45 * guarantees that we don't lose a completion if secure world
46 * returns busy and another thread just exited and try to complete
47 * someone.
48 */
49 init_completion(&w->c);
50 list_add_tail(&w->list_node, &cq->waiters);
51
52 mutex_unlock(&cq->mutex);
53}
54
55static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
56 struct optee_call_waiter *w)
57{
58 wait_for_completion(&w->c);
59
60 mutex_lock(&cq->mutex);
61
62 /* Move to end of list to get out of the way for other waiters */
63 list_del(&w->list_node);
64 reinit_completion(&w->c);
65 list_add_tail(&w->list_node, &cq->waiters);
66
67 mutex_unlock(&cq->mutex);
68}
69
70static void optee_cq_complete_one(struct optee_call_queue *cq)
71{
72 struct optee_call_waiter *w;
73
74 list_for_each_entry(w, &cq->waiters, list_node) {
75 if (!completion_done(&w->c)) {
76 complete(&w->c);
77 break;
78 }
79 }
80}
81
82static void optee_cq_wait_final(struct optee_call_queue *cq,
83 struct optee_call_waiter *w)
84{
85 /*
86 * We're done with the call to secure world. The thread in secure
87 * world that was used for this call is now available for some
88 * other task to use.
89 */
90 mutex_lock(&cq->mutex);
91
92 /* Get out of the list */
93 list_del(&w->list_node);
94
95 /* Wake up one eventual waiting task */
96 optee_cq_complete_one(cq);
97
98 /*
99 * If we're completed we've got a completion from another task that
100 * was just done with its call to secure world. Since yet another
101 * thread now is available in secure world wake up another eventual
102 * waiting task.
103 */
104 if (completion_done(&w->c))
105 optee_cq_complete_one(cq);
106
107 mutex_unlock(&cq->mutex);
108}
109
110/* Requires the filpstate mutex to be held */
111static struct optee_session *find_session(struct optee_context_data *ctxdata,
112 u32 session_id)
113{
114 struct optee_session *sess;
115
116 list_for_each_entry(sess, &ctxdata->sess_list, list_node)
117 if (sess->session_id == session_id)
118 return sess;
119
120 return NULL;
121}
122
123/**
124 * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
125 * @ctx: calling context
126 * @parg: physical address of message to pass to secure world
127 *
128 * Does and SMC to OP-TEE in secure world and handles eventual resulting
129 * Remote Procedure Calls (RPC) from OP-TEE.
130 *
131 * Returns return code from secure world, 0 is OK
132 */
133u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
134{
135 struct optee *optee = tee_get_drvdata(ctx->teedev);
136 struct optee_call_waiter w;
137 struct optee_rpc_param param = { };
138 u32 ret;
139
140 param.a0 = OPTEE_SMC_CALL_WITH_ARG;
141 reg_pair_from_64(&param.a1, &param.a2, parg);
142 /* Initialize waiter */
143 optee_cq_wait_init(&optee->call_queue, &w);
144 while (true) {
145 struct arm_smccc_res res;
146
147 optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
148 param.a4, param.a5, param.a6, param.a7,
149 &res);
150
151 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
152 /*
153 * Out of threads in secure world, wait for a thread
154 * become available.
155 */
156 optee_cq_wait_for_completion(&optee->call_queue, &w);
157 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
158 param.a0 = res.a0;
159 param.a1 = res.a1;
160 param.a2 = res.a2;
161 param.a3 = res.a3;
162 optee_handle_rpc(ctx, &param);
163 } else {
164 ret = res.a0;
165 break;
166 }
167 }
168
169 /*
170 * We're done with our thread in secure world, if there's any
171 * thread waiters wake up one.
172 */
173 optee_cq_wait_final(&optee->call_queue, &w);
174
175 return ret;
176}
177
178static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
179 struct optee_msg_arg **msg_arg,
180 phys_addr_t *msg_parg)
181{
182 int rc;
183 struct tee_shm *shm;
184 struct optee_msg_arg *ma;
185
186 shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
187 TEE_SHM_MAPPED);
188 if (IS_ERR(shm))
189 return shm;
190
191 ma = tee_shm_get_va(shm, 0);
192 if (IS_ERR(ma)) {
193 rc = PTR_ERR(ma);
194 goto out;
195 }
196
197 rc = tee_shm_get_pa(shm, 0, msg_parg);
198 if (rc)
199 goto out;
200
201 memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
202 ma->num_params = num_params;
203 *msg_arg = ma;
204out:
205 if (rc) {
206 tee_shm_free(shm);
207 return ERR_PTR(rc);
208 }
209
210 return shm;
211}
212
213int optee_open_session(struct tee_context *ctx,
214 struct tee_ioctl_open_session_arg *arg,
215 struct tee_param *param)
216{
217 struct optee_context_data *ctxdata = ctx->data;
218 int rc;
219 struct tee_shm *shm;
220 struct optee_msg_arg *msg_arg;
221 phys_addr_t msg_parg;
222 struct optee_session *sess = NULL;
223
224 /* +2 for the meta parameters added below */
225 shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
226 if (IS_ERR(shm))
227 return PTR_ERR(shm);
228
229 msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
230 msg_arg->cancel_id = arg->cancel_id;
231
232 /*
233 * Initialize and add the meta parameters needed when opening a
234 * session.
235 */
236 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
237 OPTEE_MSG_ATTR_META;
238 msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
239 OPTEE_MSG_ATTR_META;
240 memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
241 memcpy(&msg_arg->params[1].u.value, arg->uuid, sizeof(arg->clnt_uuid));
242 msg_arg->params[1].u.value.c = arg->clnt_login;
243
244 rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
245 if (rc)
246 goto out;
247
248 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
249 if (!sess) {
250 rc = -ENOMEM;
251 goto out;
252 }
253
254 if (optee_do_call_with_arg(ctx, msg_parg)) {
255 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
256 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
257 }
258
259 if (msg_arg->ret == TEEC_SUCCESS) {
260 /* A new session has been created, add it to the list. */
261 sess->session_id = msg_arg->session;
262 mutex_lock(&ctxdata->mutex);
263 list_add(&sess->list_node, &ctxdata->sess_list);
264 mutex_unlock(&ctxdata->mutex);
265 } else {
266 kfree(sess);
267 }
268
269 if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
270 arg->ret = TEEC_ERROR_COMMUNICATION;
271 arg->ret_origin = TEEC_ORIGIN_COMMS;
272 /* Close session again to avoid leakage */
273 optee_close_session(ctx, msg_arg->session);
274 } else {
275 arg->session = msg_arg->session;
276 arg->ret = msg_arg->ret;
277 arg->ret_origin = msg_arg->ret_origin;
278 }
279out:
280 tee_shm_free(shm);
281
282 return rc;
283}
284
285int optee_close_session(struct tee_context *ctx, u32 session)
286{
287 struct optee_context_data *ctxdata = ctx->data;
288 struct tee_shm *shm;
289 struct optee_msg_arg *msg_arg;
290 phys_addr_t msg_parg;
291 struct optee_session *sess;
292
293 /* Check that the session is valid and remove it from the list */
294 mutex_lock(&ctxdata->mutex);
295 sess = find_session(ctxdata, session);
296 if (sess)
297 list_del(&sess->list_node);
298 mutex_unlock(&ctxdata->mutex);
299 if (!sess)
300 return -EINVAL;
301 kfree(sess);
302
303 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
304 if (IS_ERR(shm))
305 return PTR_ERR(shm);
306
307 msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
308 msg_arg->session = session;
309 optee_do_call_with_arg(ctx, msg_parg);
310
311 tee_shm_free(shm);
312 return 0;
313}
314
315int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
316 struct tee_param *param)
317{
318 struct optee_context_data *ctxdata = ctx->data;
319 struct tee_shm *shm;
320 struct optee_msg_arg *msg_arg;
321 phys_addr_t msg_parg;
322 struct optee_session *sess;
323 int rc;
324
325 /* Check that the session is valid */
326 mutex_lock(&ctxdata->mutex);
327 sess = find_session(ctxdata, arg->session);
328 mutex_unlock(&ctxdata->mutex);
329 if (!sess)
330 return -EINVAL;
331
332 shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
333 if (IS_ERR(shm))
334 return PTR_ERR(shm);
335 msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
336 msg_arg->func = arg->func;
337 msg_arg->session = arg->session;
338 msg_arg->cancel_id = arg->cancel_id;
339
340 rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
341 if (rc)
342 goto out;
343
344 if (optee_do_call_with_arg(ctx, msg_parg)) {
345 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
346 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
347 }
348
349 if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
350 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
351 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
352 }
353
354 arg->ret = msg_arg->ret;
355 arg->ret_origin = msg_arg->ret_origin;
356out:
357 tee_shm_free(shm);
358 return rc;
359}
360
361int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
362{
363 struct optee_context_data *ctxdata = ctx->data;
364 struct tee_shm *shm;
365 struct optee_msg_arg *msg_arg;
366 phys_addr_t msg_parg;
367 struct optee_session *sess;
368
369 /* Check that the session is valid */
370 mutex_lock(&ctxdata->mutex);
371 sess = find_session(ctxdata, session);
372 mutex_unlock(&ctxdata->mutex);
373 if (!sess)
374 return -EINVAL;
375
376 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
377 if (IS_ERR(shm))
378 return PTR_ERR(shm);
379
380 msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
381 msg_arg->session = session;
382 msg_arg->cancel_id = cancel_id;
383 optee_do_call_with_arg(ctx, msg_parg);
384
385 tee_shm_free(shm);
386 return 0;
387}
388
389/**
390 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
391 * in OP-TEE
392 * @optee: main service struct
393 */
394void optee_enable_shm_cache(struct optee *optee)
395{
396 struct optee_call_waiter w;
397
398 /* We need to retry until secure world isn't busy. */
399 optee_cq_wait_init(&optee->call_queue, &w);
400 while (true) {
401 struct arm_smccc_res res;
402
403 optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
404 0, &res);
405 if (res.a0 == OPTEE_SMC_RETURN_OK)
406 break;
407 optee_cq_wait_for_completion(&optee->call_queue, &w);
408 }
409 optee_cq_wait_final(&optee->call_queue, &w);
410}
411
412/**
413 * optee_disable_shm_cache() - Disables caching of some shared memory allocation
414 * in OP-TEE
415 * @optee: main service struct
416 */
417void optee_disable_shm_cache(struct optee *optee)
418{
419 struct optee_call_waiter w;
420
421 /* We need to retry until secure world isn't busy. */
422 optee_cq_wait_init(&optee->call_queue, &w);
423 while (true) {
424 union {
425 struct arm_smccc_res smccc;
426 struct optee_smc_disable_shm_cache_result result;
427 } res;
428
429 optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
430 0, &res.smccc);
431 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
432 break; /* All shm's freed */
433 if (res.result.status == OPTEE_SMC_RETURN_OK) {
434 struct tee_shm *shm;
435
436 shm = reg_pair_to_ptr(res.result.shm_upper32,
437 res.result.shm_lower32);
438 tee_shm_free(shm);
439 } else {
440 optee_cq_wait_for_completion(&optee->call_queue, &w);
441 }
442 }
443 optee_cq_wait_final(&optee->call_queue, &w);
444}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
new file mode 100644
index 000000000000..58169e519422
--- /dev/null
+++ b/drivers/tee/optee/core.c
@@ -0,0 +1,622 @@
1/*
2 * Copyright (c) 2015, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/arm-smccc.h>
18#include <linux/errno.h>
19#include <linux/io.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_platform.h>
23#include <linux/platform_device.h>
24#include <linux/slab.h>
25#include <linux/string.h>
26#include <linux/tee_drv.h>
27#include <linux/types.h>
28#include <linux/uaccess.h>
29#include "optee_private.h"
30#include "optee_smc.h"
31
32#define DRIVER_NAME "optee"
33
34#define OPTEE_SHM_NUM_PRIV_PAGES 1
35
36/**
37 * optee_from_msg_param() - convert from OPTEE_MSG parameters to
38 * struct tee_param
39 * @params: subsystem internal parameter representation
40 * @num_params: number of elements in the parameter arrays
41 * @msg_params: OPTEE_MSG parameters
42 * Returns 0 on success or <0 on failure
43 */
44int optee_from_msg_param(struct tee_param *params, size_t num_params,
45 const struct optee_msg_param *msg_params)
46{
47 int rc;
48 size_t n;
49 struct tee_shm *shm;
50 phys_addr_t pa;
51
52 for (n = 0; n < num_params; n++) {
53 struct tee_param *p = params + n;
54 const struct optee_msg_param *mp = msg_params + n;
55 u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
56
57 switch (attr) {
58 case OPTEE_MSG_ATTR_TYPE_NONE:
59 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
60 memset(&p->u, 0, sizeof(p->u));
61 break;
62 case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
63 case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
64 case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
65 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
66 attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
67 p->u.value.a = mp->u.value.a;
68 p->u.value.b = mp->u.value.b;
69 p->u.value.c = mp->u.value.c;
70 break;
71 case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
72 case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
73 case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
74 p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
75 attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
76 p->u.memref.size = mp->u.tmem.size;
77 shm = (struct tee_shm *)(unsigned long)
78 mp->u.tmem.shm_ref;
79 if (!shm) {
80 p->u.memref.shm_offs = 0;
81 p->u.memref.shm = NULL;
82 break;
83 }
84 rc = tee_shm_get_pa(shm, 0, &pa);
85 if (rc)
86 return rc;
87 p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
88 p->u.memref.shm = shm;
89
90 /* Check that the memref is covered by the shm object */
91 if (p->u.memref.size) {
92 size_t o = p->u.memref.shm_offs +
93 p->u.memref.size - 1;
94
95 rc = tee_shm_get_pa(shm, o, NULL);
96 if (rc)
97 return rc;
98 }
99 break;
100 default:
101 return -EINVAL;
102 }
103 }
104 return 0;
105}
106
107/**
108 * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
109 * @msg_params: OPTEE_MSG parameters
110 * @num_params: number of elements in the parameter arrays
111 * @params: subsystem itnernal parameter representation
112 * Returns 0 on success or <0 on failure
113 */
114int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
115 const struct tee_param *params)
116{
117 int rc;
118 size_t n;
119 phys_addr_t pa;
120
121 for (n = 0; n < num_params; n++) {
122 const struct tee_param *p = params + n;
123 struct optee_msg_param *mp = msg_params + n;
124
125 switch (p->attr) {
126 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
127 mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
128 memset(&mp->u, 0, sizeof(mp->u));
129 break;
130 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
131 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
132 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
133 mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
134 TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
135 mp->u.value.a = p->u.value.a;
136 mp->u.value.b = p->u.value.b;
137 mp->u.value.c = p->u.value.c;
138 break;
139 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
140 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
141 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
142 mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT +
143 p->attr -
144 TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
145 mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
146 mp->u.tmem.size = p->u.memref.size;
147 if (!p->u.memref.shm) {
148 mp->u.tmem.buf_ptr = 0;
149 break;
150 }
151 rc = tee_shm_get_pa(p->u.memref.shm,
152 p->u.memref.shm_offs, &pa);
153 if (rc)
154 return rc;
155 mp->u.tmem.buf_ptr = pa;
156 mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
157 OPTEE_MSG_ATTR_CACHE_SHIFT;
158 break;
159 default:
160 return -EINVAL;
161 }
162 }
163 return 0;
164}
165
166static void optee_get_version(struct tee_device *teedev,
167 struct tee_ioctl_version_data *vers)
168{
169 struct tee_ioctl_version_data v = {
170 .impl_id = TEE_IMPL_ID_OPTEE,
171 .impl_caps = TEE_OPTEE_CAP_TZ,
172 .gen_caps = TEE_GEN_CAP_GP,
173 };
174 *vers = v;
175}
176
177static int optee_open(struct tee_context *ctx)
178{
179 struct optee_context_data *ctxdata;
180 struct tee_device *teedev = ctx->teedev;
181 struct optee *optee = tee_get_drvdata(teedev);
182
183 ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
184 if (!ctxdata)
185 return -ENOMEM;
186
187 if (teedev == optee->supp_teedev) {
188 bool busy = true;
189
190 mutex_lock(&optee->supp.ctx_mutex);
191 if (!optee->supp.ctx) {
192 busy = false;
193 optee->supp.ctx = ctx;
194 }
195 mutex_unlock(&optee->supp.ctx_mutex);
196 if (busy) {
197 kfree(ctxdata);
198 return -EBUSY;
199 }
200 }
201
202 mutex_init(&ctxdata->mutex);
203 INIT_LIST_HEAD(&ctxdata->sess_list);
204
205 ctx->data = ctxdata;
206 return 0;
207}
208
209static void optee_release(struct tee_context *ctx)
210{
211 struct optee_context_data *ctxdata = ctx->data;
212 struct tee_device *teedev = ctx->teedev;
213 struct optee *optee = tee_get_drvdata(teedev);
214 struct tee_shm *shm;
215 struct optee_msg_arg *arg = NULL;
216 phys_addr_t parg;
217 struct optee_session *sess;
218 struct optee_session *sess_tmp;
219
220 if (!ctxdata)
221 return;
222
223 shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED);
224 if (!IS_ERR(shm)) {
225 arg = tee_shm_get_va(shm, 0);
226 /*
227 * If va2pa fails for some reason, we can't call
228 * optee_close_session(), only free the memory. Secure OS
229 * will leak sessions and finally refuse more sessions, but
230 * we will at least let normal world reclaim its memory.
231 */
232 if (!IS_ERR(arg))
233 tee_shm_va2pa(shm, arg, &parg);
234 }
235
236 list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
237 list_node) {
238 list_del(&sess->list_node);
239 if (!IS_ERR_OR_NULL(arg)) {
240 memset(arg, 0, sizeof(*arg));
241 arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
242 arg->session = sess->session_id;
243 optee_do_call_with_arg(ctx, parg);
244 }
245 kfree(sess);
246 }
247 kfree(ctxdata);
248
249 if (!IS_ERR(shm))
250 tee_shm_free(shm);
251
252 ctx->data = NULL;
253
254 if (teedev == optee->supp_teedev) {
255 mutex_lock(&optee->supp.ctx_mutex);
256 optee->supp.ctx = NULL;
257 mutex_unlock(&optee->supp.ctx_mutex);
258 }
259}
260
261static struct tee_driver_ops optee_ops = {
262 .get_version = optee_get_version,
263 .open = optee_open,
264 .release = optee_release,
265 .open_session = optee_open_session,
266 .close_session = optee_close_session,
267 .invoke_func = optee_invoke_func,
268 .cancel_req = optee_cancel_req,
269};
270
271static struct tee_desc optee_desc = {
272 .name = DRIVER_NAME "-clnt",
273 .ops = &optee_ops,
274 .owner = THIS_MODULE,
275};
276
277static struct tee_driver_ops optee_supp_ops = {
278 .get_version = optee_get_version,
279 .open = optee_open,
280 .release = optee_release,
281 .supp_recv = optee_supp_recv,
282 .supp_send = optee_supp_send,
283};
284
285static struct tee_desc optee_supp_desc = {
286 .name = DRIVER_NAME "-supp",
287 .ops = &optee_supp_ops,
288 .owner = THIS_MODULE,
289 .flags = TEE_DESC_PRIVILEGED,
290};
291
292static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
293{
294 struct arm_smccc_res res;
295
296 invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
297
298 if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
299 res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
300 return true;
301 return false;
302}
303
304static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
305{
306 union {
307 struct arm_smccc_res smccc;
308 struct optee_smc_calls_revision_result result;
309 } res;
310
311 invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
312
313 if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
314 (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
315 return true;
316 return false;
317}
318
319static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
320 u32 *sec_caps)
321{
322 union {
323 struct arm_smccc_res smccc;
324 struct optee_smc_exchange_capabilities_result result;
325 } res;
326 u32 a1 = 0;
327
328 /*
329 * TODO This isn't enough to tell if it's UP system (from kernel
330 * point of view) or not, is_smp() returns the the information
331 * needed, but can't be called directly from here.
332 */
333 if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
334 a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
335
336 invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
337 &res.smccc);
338
339 if (res.result.status != OPTEE_SMC_RETURN_OK)
340 return false;
341
342 *sec_caps = res.result.capabilities;
343 return true;
344}
345
346static struct tee_shm_pool *
347optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
348{
349 union {
350 struct arm_smccc_res smccc;
351 struct optee_smc_get_shm_config_result result;
352 } res;
353 struct tee_shm_pool *pool;
354 unsigned long vaddr;
355 phys_addr_t paddr;
356 size_t size;
357 phys_addr_t begin;
358 phys_addr_t end;
359 void *va;
360 struct tee_shm_pool_mem_info priv_info;
361 struct tee_shm_pool_mem_info dmabuf_info;
362
363 invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
364 if (res.result.status != OPTEE_SMC_RETURN_OK) {
365 pr_info("shm service not available\n");
366 return ERR_PTR(-ENOENT);
367 }
368
369 if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
370 pr_err("only normal cached shared memory supported\n");
371 return ERR_PTR(-EINVAL);
372 }
373
374 begin = roundup(res.result.start, PAGE_SIZE);
375 end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
376 paddr = begin;
377 size = end - begin;
378
379 if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
380 pr_err("too small shared memory area\n");
381 return ERR_PTR(-EINVAL);
382 }
383
384 va = memremap(paddr, size, MEMREMAP_WB);
385 if (!va) {
386 pr_err("shared memory ioremap failed\n");
387 return ERR_PTR(-EINVAL);
388 }
389 vaddr = (unsigned long)va;
390
391 priv_info.vaddr = vaddr;
392 priv_info.paddr = paddr;
393 priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
394 dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
395 dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
396 dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
397
398 pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info);
399 if (IS_ERR(pool)) {
400 memunmap(va);
401 goto out;
402 }
403
404 *memremaped_shm = va;
405out:
406 return pool;
407}
408
409/* Simple wrapper functions to be able to use a function pointer */
410static void optee_smccc_smc(unsigned long a0, unsigned long a1,
411 unsigned long a2, unsigned long a3,
412 unsigned long a4, unsigned long a5,
413 unsigned long a6, unsigned long a7,
414 struct arm_smccc_res *res)
415{
416 arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
417}
418
419static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
420 unsigned long a2, unsigned long a3,
421 unsigned long a4, unsigned long a5,
422 unsigned long a6, unsigned long a7,
423 struct arm_smccc_res *res)
424{
425 arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
426}
427
428static optee_invoke_fn *get_invoke_func(struct device_node *np)
429{
430 const char *method;
431
432 pr_info("probing for conduit method from DT.\n");
433
434 if (of_property_read_string(np, "method", &method)) {
435 pr_warn("missing \"method\" property\n");
436 return ERR_PTR(-ENXIO);
437 }
438
439 if (!strcmp("hvc", method))
440 return optee_smccc_hvc;
441 else if (!strcmp("smc", method))
442 return optee_smccc_smc;
443
444 pr_warn("invalid \"method\" property: %s\n", method);
445 return ERR_PTR(-EINVAL);
446}
447
448static struct optee *optee_probe(struct device_node *np)
449{
450 optee_invoke_fn *invoke_fn;
451 struct tee_shm_pool *pool;
452 struct optee *optee = NULL;
453 void *memremaped_shm = NULL;
454 struct tee_device *teedev;
455 u32 sec_caps;
456 int rc;
457
458 invoke_fn = get_invoke_func(np);
459 if (IS_ERR(invoke_fn))
460 return (void *)invoke_fn;
461
462 if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
463 pr_warn("api uid mismatch\n");
464 return ERR_PTR(-EINVAL);
465 }
466
467 if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
468 pr_warn("api revision mismatch\n");
469 return ERR_PTR(-EINVAL);
470 }
471
472 if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
473 pr_warn("capabilities mismatch\n");
474 return ERR_PTR(-EINVAL);
475 }
476
477 /*
478 * We have no other option for shared memory, if secure world
479 * doesn't have any reserved memory we can use we can't continue.
480 */
481 if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
482 return ERR_PTR(-EINVAL);
483
484 pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
485 if (IS_ERR(pool))
486 return (void *)pool;
487
488 optee = kzalloc(sizeof(*optee), GFP_KERNEL);
489 if (!optee) {
490 rc = -ENOMEM;
491 goto err;
492 }
493
494 optee->invoke_fn = invoke_fn;
495
496 teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
497 if (IS_ERR(teedev)) {
498 rc = PTR_ERR(teedev);
499 goto err;
500 }
501 optee->teedev = teedev;
502
503 teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
504 if (IS_ERR(teedev)) {
505 rc = PTR_ERR(teedev);
506 goto err;
507 }
508 optee->supp_teedev = teedev;
509
510 rc = tee_device_register(optee->teedev);
511 if (rc)
512 goto err;
513
514 rc = tee_device_register(optee->supp_teedev);
515 if (rc)
516 goto err;
517
518 mutex_init(&optee->call_queue.mutex);
519 INIT_LIST_HEAD(&optee->call_queue.waiters);
520 optee_wait_queue_init(&optee->wait_queue);
521 optee_supp_init(&optee->supp);
522 optee->memremaped_shm = memremaped_shm;
523 optee->pool = pool;
524
525 optee_enable_shm_cache(optee);
526
527 pr_info("initialized driver\n");
528 return optee;
529err:
530 if (optee) {
531 /*
532 * tee_device_unregister() is safe to call even if the
533 * devices hasn't been registered with
534 * tee_device_register() yet.
535 */
536 tee_device_unregister(optee->supp_teedev);
537 tee_device_unregister(optee->teedev);
538 kfree(optee);
539 }
540 if (pool)
541 tee_shm_pool_free(pool);
542 if (memremaped_shm)
543 memunmap(memremaped_shm);
544 return ERR_PTR(rc);
545}
546
547static void optee_remove(struct optee *optee)
548{
549 /*
550 * Ask OP-TEE to free all cached shared memory objects to decrease
551 * reference counters and also avoid wild pointers in secure world
552 * into the old shared memory range.
553 */
554 optee_disable_shm_cache(optee);
555
556 /*
557 * The two devices has to be unregistered before we can free the
558 * other resources.
559 */
560 tee_device_unregister(optee->supp_teedev);
561 tee_device_unregister(optee->teedev);
562
563 tee_shm_pool_free(optee->pool);
564 if (optee->memremaped_shm)
565 memunmap(optee->memremaped_shm);
566 optee_wait_queue_exit(&optee->wait_queue);
567 optee_supp_uninit(&optee->supp);
568 mutex_destroy(&optee->call_queue.mutex);
569
570 kfree(optee);
571}
572
573static const struct of_device_id optee_match[] = {
574 { .compatible = "linaro,optee-tz" },
575 {},
576};
577
578static struct optee *optee_svc;
579
580static int __init optee_driver_init(void)
581{
582 struct device_node *fw_np;
583 struct device_node *np;
584 struct optee *optee;
585
586 /* Node is supposed to be below /firmware */
587 fw_np = of_find_node_by_name(NULL, "firmware");
588 if (!fw_np)
589 return -ENODEV;
590
591 np = of_find_matching_node(fw_np, optee_match);
592 of_node_put(fw_np);
593 if (!np)
594 return -ENODEV;
595
596 optee = optee_probe(np);
597 of_node_put(np);
598
599 if (IS_ERR(optee))
600 return PTR_ERR(optee);
601
602 optee_svc = optee;
603
604 return 0;
605}
606module_init(optee_driver_init);
607
608static void __exit optee_driver_exit(void)
609{
610 struct optee *optee = optee_svc;
611
612 optee_svc = NULL;
613 if (optee)
614 optee_remove(optee);
615}
616module_exit(optee_driver_exit);
617
618MODULE_AUTHOR("Linaro");
619MODULE_DESCRIPTION("OP-TEE driver");
620MODULE_SUPPORTED_DEVICE("");
621MODULE_VERSION("1.0");
622MODULE_LICENSE("GPL v2");
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
new file mode 100644
index 000000000000..dd7a06ee0462
--- /dev/null
+++ b/drivers/tee/optee/optee_msg.h
@@ -0,0 +1,418 @@
1/*
2 * Copyright (c) 2015-2016, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27#ifndef _OPTEE_MSG_H
28#define _OPTEE_MSG_H
29
30#include <linux/bitops.h>
31#include <linux/types.h>
32
33/*
34 * This file defines the OP-TEE message protocol used to communicate
35 * with an instance of OP-TEE running in secure world.
36 *
37 * This file is divided into three sections.
38 * 1. Formatting of messages.
39 * 2. Requests from normal world
40 * 3. Requests from secure world, Remote Procedure Call (RPC), handled by
41 * tee-supplicant.
42 */
43
44/*****************************************************************************
45 * Part 1 - formatting of messages
46 *****************************************************************************/
47
48#define OPTEE_MSG_ATTR_TYPE_NONE 0x0
49#define OPTEE_MSG_ATTR_TYPE_VALUE_INPUT 0x1
50#define OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT 0x2
51#define OPTEE_MSG_ATTR_TYPE_VALUE_INOUT 0x3
52#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5
53#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6
54#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7
55#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9
56#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa
57#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb
58
59#define OPTEE_MSG_ATTR_TYPE_MASK GENMASK(7, 0)
60
61/*
62 * Meta parameter to be absorbed by the Secure OS and not passed
63 * to the Trusted Application.
64 *
65 * Currently only used with OPTEE_MSG_CMD_OPEN_SESSION.
66 */
67#define OPTEE_MSG_ATTR_META BIT(8)
68
69/*
70 * The temporary shared memory object is not physically contigous and this
71 * temp memref is followed by another fragment until the last temp memref
72 * that doesn't have this bit set.
73 */
74#define OPTEE_MSG_ATTR_FRAGMENT BIT(9)
75
76/*
77 * Memory attributes for caching passed with temp memrefs. The actual value
78 * used is defined outside the message protocol with the exception of
79 * OPTEE_MSG_ATTR_CACHE_PREDEFINED which means the attributes already
80 * defined for the memory range should be used. If optee_smc.h is used as
81 * bearer of this protocol OPTEE_SMC_SHM_* is used for values.
82 */
83#define OPTEE_MSG_ATTR_CACHE_SHIFT 16
84#define OPTEE_MSG_ATTR_CACHE_MASK GENMASK(2, 0)
85#define OPTEE_MSG_ATTR_CACHE_PREDEFINED 0
86
87/*
88 * Same values as TEE_LOGIN_* from TEE Internal API
89 */
90#define OPTEE_MSG_LOGIN_PUBLIC 0x00000000
91#define OPTEE_MSG_LOGIN_USER 0x00000001
92#define OPTEE_MSG_LOGIN_GROUP 0x00000002
93#define OPTEE_MSG_LOGIN_APPLICATION 0x00000004
94#define OPTEE_MSG_LOGIN_APPLICATION_USER 0x00000005
95#define OPTEE_MSG_LOGIN_APPLICATION_GROUP 0x00000006
96
97/**
98 * struct optee_msg_param_tmem - temporary memory reference parameter
99 * @buf_ptr: Address of the buffer
100 * @size: Size of the buffer
101 * @shm_ref: Temporary shared memory reference, pointer to a struct tee_shm
102 *
103 * Secure and normal world communicates pointers as physical address
104 * instead of the virtual address. This is because secure and normal world
105 * have completely independent memory mapping. Normal world can even have a
106 * hypervisor which need to translate the guest physical address (AKA IPA
107 * in ARM documentation) to a real physical address before passing the
108 * structure to secure world.
109 */
110struct optee_msg_param_tmem {
111 u64 buf_ptr;
112 u64 size;
113 u64 shm_ref;
114};
115
116/**
117 * struct optee_msg_param_rmem - registered memory reference parameter
118 * @offs: Offset into shared memory reference
119 * @size: Size of the buffer
120 * @shm_ref: Shared memory reference, pointer to a struct tee_shm
121 */
122struct optee_msg_param_rmem {
123 u64 offs;
124 u64 size;
125 u64 shm_ref;
126};
127
128/**
129 * struct optee_msg_param_value - opaque value parameter
130 *
131 * Value parameters are passed unchecked between normal and secure world.
132 */
133struct optee_msg_param_value {
134 u64 a;
135 u64 b;
136 u64 c;
137};
138
139/**
140 * struct optee_msg_param - parameter used together with struct optee_msg_arg
141 * @attr: attributes
142 * @tmem: parameter by temporary memory reference
143 * @rmem: parameter by registered memory reference
144 * @value: parameter by opaque value
145 *
146 * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
147 * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value,
148 * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates tmem and
149 * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates rmem.
150 * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
151 */
152struct optee_msg_param {
153 u64 attr;
154 union {
155 struct optee_msg_param_tmem tmem;
156 struct optee_msg_param_rmem rmem;
157 struct optee_msg_param_value value;
158 } u;
159};
160
161/**
162 * struct optee_msg_arg - call argument
163 * @cmd: Command, one of OPTEE_MSG_CMD_* or OPTEE_MSG_RPC_CMD_*
164 * @func: Trusted Application function, specific to the Trusted Application,
165 * used if cmd == OPTEE_MSG_CMD_INVOKE_COMMAND
166 * @session: In parameter for all OPTEE_MSG_CMD_* except
167 * OPTEE_MSG_CMD_OPEN_SESSION where it's an output parameter instead
168 * @cancel_id: Cancellation id, a unique value to identify this request
169 * @ret: return value
170 * @ret_origin: origin of the return value
171 * @num_params: number of parameters supplied to the OS Command
172 * @params: the parameters supplied to the OS Command
173 *
174 * All normal calls to Trusted OS uses this struct. If cmd requires further
175 * information than what these field holds it can be passed as a parameter
176 * tagged as meta (setting the OPTEE_MSG_ATTR_META bit in corresponding
177 * attrs field). All parameters tagged as meta has to come first.
178 *
179 * Temp memref parameters can be fragmented if supported by the Trusted OS
180 * (when optee_smc.h is bearer of this protocol this is indicated with
181 * OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM). If a logical memref parameter is
182 * fragmented then has all but the last fragment the
183 * OPTEE_MSG_ATTR_FRAGMENT bit set in attrs. Even if a memref is fragmented
184 * it will still be presented as a single logical memref to the Trusted
185 * Application.
186 */
187struct optee_msg_arg {
188 u32 cmd;
189 u32 func;
190 u32 session;
191 u32 cancel_id;
192 u32 pad;
193 u32 ret;
194 u32 ret_origin;
195 u32 num_params;
196
197 /* num_params tells the actual number of element in params */
198 struct optee_msg_param params[0];
199};
200
201/**
202 * OPTEE_MSG_GET_ARG_SIZE - return size of struct optee_msg_arg
203 *
204 * @num_params: Number of parameters embedded in the struct optee_msg_arg
205 *
206 * Returns the size of the struct optee_msg_arg together with the number
207 * of embedded parameters.
208 */
209#define OPTEE_MSG_GET_ARG_SIZE(num_params) \
210 (sizeof(struct optee_msg_arg) + \
211 sizeof(struct optee_msg_param) * (num_params))
212
213/*****************************************************************************
214 * Part 2 - requests from normal world
215 *****************************************************************************/
216
217/*
218 * Return the following UID if using API specified in this file without
219 * further extensions:
220 * 384fb3e0-e7f8-11e3-af63-0002a5d5c51b.
221 * Represented in 4 32-bit words in OPTEE_MSG_UID_0, OPTEE_MSG_UID_1,
222 * OPTEE_MSG_UID_2, OPTEE_MSG_UID_3.
223 */
224#define OPTEE_MSG_UID_0 0x384fb3e0
225#define OPTEE_MSG_UID_1 0xe7f811e3
226#define OPTEE_MSG_UID_2 0xaf630002
227#define OPTEE_MSG_UID_3 0xa5d5c51b
228#define OPTEE_MSG_FUNCID_CALLS_UID 0xFF01
229
230/*
231 * Returns 2.0 if using API specified in this file without further
232 * extensions. Represented in 2 32-bit words in OPTEE_MSG_REVISION_MAJOR
233 * and OPTEE_MSG_REVISION_MINOR
234 */
235#define OPTEE_MSG_REVISION_MAJOR 2
236#define OPTEE_MSG_REVISION_MINOR 0
237#define OPTEE_MSG_FUNCID_CALLS_REVISION 0xFF03
238
239/*
240 * Get UUID of Trusted OS.
241 *
242 * Used by non-secure world to figure out which Trusted OS is installed.
243 * Note that returned UUID is the UUID of the Trusted OS, not of the API.
244 *
245 * Returns UUID in 4 32-bit words in the same way as
246 * OPTEE_MSG_FUNCID_CALLS_UID described above.
247 */
248#define OPTEE_MSG_OS_OPTEE_UUID_0 0x486178e0
249#define OPTEE_MSG_OS_OPTEE_UUID_1 0xe7f811e3
250#define OPTEE_MSG_OS_OPTEE_UUID_2 0xbc5e0002
251#define OPTEE_MSG_OS_OPTEE_UUID_3 0xa5d5c51b
252#define OPTEE_MSG_FUNCID_GET_OS_UUID 0x0000
253
254/*
255 * Get revision of Trusted OS.
256 *
257 * Used by non-secure world to figure out which version of the Trusted OS
258 * is installed. Note that the returned revision is the revision of the
259 * Trusted OS, not of the API.
260 *
261 * Returns revision in 2 32-bit words in the same way as
262 * OPTEE_MSG_CALLS_REVISION described above.
263 */
264#define OPTEE_MSG_FUNCID_GET_OS_REVISION 0x0001
265
266/*
267 * Do a secure call with struct optee_msg_arg as argument
268 * The OPTEE_MSG_CMD_* below defines what goes in struct optee_msg_arg::cmd
269 *
270 * OPTEE_MSG_CMD_OPEN_SESSION opens a session to a Trusted Application.
271 * The first two parameters are tagged as meta, holding two value
272 * parameters to pass the following information:
273 * param[0].u.value.a-b uuid of Trusted Application
274 * param[1].u.value.a-b uuid of Client
275 * param[1].u.value.c Login class of client OPTEE_MSG_LOGIN_*
276 *
277 * OPTEE_MSG_CMD_INVOKE_COMMAND invokes a command a previously opened
278 * session to a Trusted Application. struct optee_msg_arg::func is Trusted
279 * Application function, specific to the Trusted Application.
280 *
281 * OPTEE_MSG_CMD_CLOSE_SESSION closes a previously opened session to
282 * Trusted Application.
283 *
284 * OPTEE_MSG_CMD_CANCEL cancels a currently invoked command.
285 *
286 * OPTEE_MSG_CMD_REGISTER_SHM registers a shared memory reference. The
287 * information is passed as:
288 * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_TMEM_INPUT
289 * [| OPTEE_MSG_ATTR_FRAGMENT]
290 * [in] param[0].u.tmem.buf_ptr physical address (of first fragment)
291 * [in] param[0].u.tmem.size size (of first fragment)
292 * [in] param[0].u.tmem.shm_ref holds shared memory reference
293 * ...
294 * The shared memory can optionally be fragmented, temp memrefs can follow
295 * each other with all but the last with the OPTEE_MSG_ATTR_FRAGMENT bit set.
296 *
297 * OPTEE_MSG_CMD_UNREGISTER_SHM unregisteres a previously registered shared
298 * memory reference. The information is passed as:
299 * [in] param[0].attr OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
300 * [in] param[0].u.rmem.shm_ref holds shared memory reference
301 * [in] param[0].u.rmem.offs 0
302 * [in] param[0].u.rmem.size 0
303 */
304#define OPTEE_MSG_CMD_OPEN_SESSION 0
305#define OPTEE_MSG_CMD_INVOKE_COMMAND 1
306#define OPTEE_MSG_CMD_CLOSE_SESSION 2
307#define OPTEE_MSG_CMD_CANCEL 3
308#define OPTEE_MSG_CMD_REGISTER_SHM 4
309#define OPTEE_MSG_CMD_UNREGISTER_SHM 5
310#define OPTEE_MSG_FUNCID_CALL_WITH_ARG 0x0004
311
312/*****************************************************************************
313 * Part 3 - Requests from secure world, RPC
314 *****************************************************************************/
315
316/*
317 * All RPC is done with a struct optee_msg_arg as bearer of information,
318 * struct optee_msg_arg::arg holds values defined by OPTEE_MSG_RPC_CMD_* below
319 *
320 * RPC communication with tee-supplicant is reversed compared to normal
321 * client communication desribed above. The supplicant receives requests
322 * and sends responses.
323 */
324
325/*
326 * Load a TA into memory, defined in tee-supplicant
327 */
328#define OPTEE_MSG_RPC_CMD_LOAD_TA 0
329
330/*
331 * Reserved
332 */
333#define OPTEE_MSG_RPC_CMD_RPMB 1
334
335/*
336 * File system access, defined in tee-supplicant
337 */
338#define OPTEE_MSG_RPC_CMD_FS 2
339
340/*
341 * Get time
342 *
343 * Returns number of seconds and nano seconds since the Epoch,
344 * 1970-01-01 00:00:00 +0000 (UTC).
345 *
346 * [out] param[0].u.value.a Number of seconds
347 * [out] param[0].u.value.b Number of nano seconds.
348 */
349#define OPTEE_MSG_RPC_CMD_GET_TIME 3
350
351/*
352 * Wait queue primitive, helper for secure world to implement a wait queue.
353 *
354 * If secure world need to wait for a secure world mutex it issues a sleep
355 * request instead of spinning in secure world. Conversely is a wakeup
356 * request issued when a secure world mutex with a thread waiting thread is
357 * unlocked.
358 *
359 * Waiting on a key
360 * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP
361 * [in] param[0].u.value.b wait key
362 *
363 * Waking up a key
364 * [in] param[0].u.value.a OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP
365 * [in] param[0].u.value.b wakeup key
366 */
367#define OPTEE_MSG_RPC_CMD_WAIT_QUEUE 4
368#define OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP 0
369#define OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP 1
370
371/*
372 * Suspend execution
373 *
374 * [in] param[0].value .a number of milliseconds to suspend
375 */
376#define OPTEE_MSG_RPC_CMD_SUSPEND 5
377
378/*
379 * Allocate a piece of shared memory
380 *
381 * Shared memory can optionally be fragmented, to support that additional
382 * spare param entries are allocated to make room for eventual fragments.
383 * The spare param entries has .attr = OPTEE_MSG_ATTR_TYPE_NONE when
384 * unused. All returned temp memrefs except the last should have the
385 * OPTEE_MSG_ATTR_FRAGMENT bit set in the attr field.
386 *
387 * [in] param[0].u.value.a type of memory one of
388 * OPTEE_MSG_RPC_SHM_TYPE_* below
389 * [in] param[0].u.value.b requested size
390 * [in] param[0].u.value.c required alignment
391 *
392 * [out] param[0].u.tmem.buf_ptr physical address (of first fragment)
393 * [out] param[0].u.tmem.size size (of first fragment)
394 * [out] param[0].u.tmem.shm_ref shared memory reference
395 * ...
396 * [out] param[n].u.tmem.buf_ptr physical address
397 * [out] param[n].u.tmem.size size
398 * [out] param[n].u.tmem.shm_ref shared memory reference (same value
399 * as in param[n-1].u.tmem.shm_ref)
400 */
401#define OPTEE_MSG_RPC_CMD_SHM_ALLOC 6
402/* Memory that can be shared with a non-secure user space application */
403#define OPTEE_MSG_RPC_SHM_TYPE_APPL 0
404/* Memory only shared with non-secure kernel */
405#define OPTEE_MSG_RPC_SHM_TYPE_KERNEL 1
406
407/*
408 * Free shared memory previously allocated with OPTEE_MSG_RPC_CMD_SHM_ALLOC
409 *
410 * [in] param[0].u.value.a type of memory one of
411 * OPTEE_MSG_RPC_SHM_TYPE_* above
412 * [in] param[0].u.value.b value of shared memory reference
413 * returned in param[0].u.tmem.shm_ref
414 * above
415 */
416#define OPTEE_MSG_RPC_CMD_SHM_FREE 7
417
418#endif /* _OPTEE_MSG_H */
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
new file mode 100644
index 000000000000..c374cd594314
--- /dev/null
+++ b/drivers/tee/optee/optee_private.h
@@ -0,0 +1,183 @@
1/*
2 * Copyright (c) 2015, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef OPTEE_PRIVATE_H
16#define OPTEE_PRIVATE_H
17
18#include <linux/arm-smccc.h>
19#include <linux/semaphore.h>
20#include <linux/tee_drv.h>
21#include <linux/types.h>
22#include "optee_msg.h"
23
24#define OPTEE_MAX_ARG_SIZE 1024
25
26/* Some Global Platform error codes used in this driver */
27#define TEEC_SUCCESS 0x00000000
28#define TEEC_ERROR_BAD_PARAMETERS 0xFFFF0006
29#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
30#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
31
32#define TEEC_ORIGIN_COMMS 0x00000002
33
34typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
35 unsigned long, unsigned long, unsigned long,
36 unsigned long, unsigned long,
37 struct arm_smccc_res *);
38
39struct optee_call_queue {
40 /* Serializes access to this struct */
41 struct mutex mutex;
42 struct list_head waiters;
43};
44
45struct optee_wait_queue {
46 /* Serializes access to this struct */
47 struct mutex mu;
48 struct list_head db;
49};
50
51/**
52 * struct optee_supp - supplicant synchronization struct
53 * @ctx the context of current connected supplicant.
54 * if !NULL the supplicant device is available for use,
55 * else busy
56 * @ctx_mutex: held while accessing @ctx
57 * @func: supplicant function id to call
58 * @ret: call return value
59 * @num_params: number of elements in @param
60 * @param: parameters for @func
61 * @req_posted: if true, a request has been posted to the supplicant
62 * @supp_next_send: if true, next step is for supplicant to send response
63 * @thrd_mutex: held by the thread doing a request to supplicant
64 * @supp_mutex: held by supplicant while operating on this struct
65 * @data_to_supp: supplicant is waiting on this for next request
66 * @data_from_supp: requesting thread is waiting on this to get the result
67 */
68struct optee_supp {
69 struct tee_context *ctx;
70 /* Serializes access of ctx */
71 struct mutex ctx_mutex;
72
73 u32 func;
74 u32 ret;
75 size_t num_params;
76 struct tee_param *param;
77
78 bool req_posted;
79 bool supp_next_send;
80 /* Serializes access to this struct for requesting thread */
81 struct mutex thrd_mutex;
82 /* Serializes access to this struct for supplicant threads */
83 struct mutex supp_mutex;
84 struct completion data_to_supp;
85 struct completion data_from_supp;
86};
87
88/**
89 * struct optee - main service struct
90 * @supp_teedev: supplicant device
91 * @teedev: client device
92 * @invoke_fn: function to issue smc or hvc
93 * @call_queue: queue of threads waiting to call @invoke_fn
94 * @wait_queue: queue of threads from secure world waiting for a
95 * secure world sync object
96 * @supp: supplicant synchronization struct for RPC to supplicant
97 * @pool: shared memory pool
98 * @memremaped_shm virtual address of memory in shared memory pool
99 */
100struct optee {
101 struct tee_device *supp_teedev;
102 struct tee_device *teedev;
103 optee_invoke_fn *invoke_fn;
104 struct optee_call_queue call_queue;
105 struct optee_wait_queue wait_queue;
106 struct optee_supp supp;
107 struct tee_shm_pool *pool;
108 void *memremaped_shm;
109};
110
111struct optee_session {
112 struct list_head list_node;
113 u32 session_id;
114};
115
116struct optee_context_data {
117 /* Serializes access to this struct */
118 struct mutex mutex;
119 struct list_head sess_list;
120};
121
122struct optee_rpc_param {
123 u32 a0;
124 u32 a1;
125 u32 a2;
126 u32 a3;
127 u32 a4;
128 u32 a5;
129 u32 a6;
130 u32 a7;
131};
132
133void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param);
134
135void optee_wait_queue_init(struct optee_wait_queue *wq);
136void optee_wait_queue_exit(struct optee_wait_queue *wq);
137
138u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
139 struct tee_param *param);
140
141int optee_supp_read(struct tee_context *ctx, void __user *buf, size_t len);
142int optee_supp_write(struct tee_context *ctx, void __user *buf, size_t len);
143void optee_supp_init(struct optee_supp *supp);
144void optee_supp_uninit(struct optee_supp *supp);
145
146int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
147 struct tee_param *param);
148int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
149 struct tee_param *param);
150
151u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg);
152int optee_open_session(struct tee_context *ctx,
153 struct tee_ioctl_open_session_arg *arg,
154 struct tee_param *param);
155int optee_close_session(struct tee_context *ctx, u32 session);
156int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
157 struct tee_param *param);
158int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
159
160void optee_enable_shm_cache(struct optee *optee);
161void optee_disable_shm_cache(struct optee *optee);
162
163int optee_from_msg_param(struct tee_param *params, size_t num_params,
164 const struct optee_msg_param *msg_params);
165int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
166 const struct tee_param *params);
167
168/*
169 * Small helpers
170 */
171
172static inline void *reg_pair_to_ptr(u32 reg0, u32 reg1)
173{
174 return (void *)(unsigned long)(((u64)reg0 << 32) | reg1);
175}
176
177static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
178{
179 *reg0 = val >> 32;
180 *reg1 = val;
181}
182
183#endif /*OPTEE_PRIVATE_H*/
diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h
new file mode 100644
index 000000000000..13b7c98cdf25
--- /dev/null
+++ b/drivers/tee/optee/optee_smc.h
@@ -0,0 +1,450 @@
1/*
2 * Copyright (c) 2015-2016, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27#ifndef OPTEE_SMC_H
28#define OPTEE_SMC_H
29
30#include <linux/arm-smccc.h>
31#include <linux/bitops.h>
32
33#define OPTEE_SMC_STD_CALL_VAL(func_num) \
34 ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \
35 ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
36#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
37 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
38 ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
39
40/*
41 * Function specified by SMC Calling convention.
42 */
43#define OPTEE_SMC_FUNCID_CALLS_COUNT 0xFF00
44#define OPTEE_SMC_CALLS_COUNT \
45 ARM_SMCCC_CALL_VAL(OPTEE_SMC_FAST_CALL, SMCCC_SMC_32, \
46 SMCCC_OWNER_TRUSTED_OS_END, \
47 OPTEE_SMC_FUNCID_CALLS_COUNT)
48
49/*
50 * Normal cached memory (write-back), shareable for SMP systems and not
51 * shareable for UP systems.
52 */
53#define OPTEE_SMC_SHM_CACHED 1
54
55/*
56 * a0..a7 is used as register names in the descriptions below, on arm32
57 * that translates to r0..r7 and on arm64 to w0..w7. In both cases it's
58 * 32-bit registers.
59 */
60
61/*
62 * Function specified by SMC Calling convention
63 *
64 * Return one of the following UIDs if using API specified in this file
65 * without further extentions:
66 * 65cb6b93-af0c-4617-8ed6-644a8d1140f8
67 * see also OPTEE_SMC_UID_* in optee_msg.h
68 */
69#define OPTEE_SMC_FUNCID_CALLS_UID OPTEE_MSG_FUNCID_CALLS_UID
70#define OPTEE_SMC_CALLS_UID \
71 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
72 ARM_SMCCC_OWNER_TRUSTED_OS_END, \
73 OPTEE_SMC_FUNCID_CALLS_UID)
74
75/*
76 * Function specified by SMC Calling convention
77 *
78 * Returns 2.0 if using API specified in this file without further extentions.
79 * see also OPTEE_MSG_REVISION_* in optee_msg.h
80 */
81#define OPTEE_SMC_FUNCID_CALLS_REVISION OPTEE_MSG_FUNCID_CALLS_REVISION
82#define OPTEE_SMC_CALLS_REVISION \
83 ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
84 ARM_SMCCC_OWNER_TRUSTED_OS_END, \
85 OPTEE_SMC_FUNCID_CALLS_REVISION)
86
87struct optee_smc_calls_revision_result {
88 unsigned long major;
89 unsigned long minor;
90 unsigned long reserved0;
91 unsigned long reserved1;
92};
93
94/*
95 * Get UUID of Trusted OS.
96 *
97 * Used by non-secure world to figure out which Trusted OS is installed.
98 * Note that returned UUID is the UUID of the Trusted OS, not of the API.
99 *
100 * Returns UUID in a0-4 in the same way as OPTEE_SMC_CALLS_UID
101 * described above.
102 */
103#define OPTEE_SMC_FUNCID_GET_OS_UUID OPTEE_MSG_FUNCID_GET_OS_UUID
104#define OPTEE_SMC_CALL_GET_OS_UUID \
105 OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_UUID)
106
107/*
108 * Get revision of Trusted OS.
109 *
110 * Used by non-secure world to figure out which version of the Trusted OS
111 * is installed. Note that the returned revision is the revision of the
112 * Trusted OS, not of the API.
113 *
114 * Returns revision in a0-1 in the same way as OPTEE_SMC_CALLS_REVISION
115 * described above.
116 */
117#define OPTEE_SMC_FUNCID_GET_OS_REVISION OPTEE_MSG_FUNCID_GET_OS_REVISION
118#define OPTEE_SMC_CALL_GET_OS_REVISION \
119 OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_OS_REVISION)
120
121/*
122 * Call with struct optee_msg_arg as argument
123 *
124 * Call register usage:
125 * a0 SMC Function ID, OPTEE_SMC*CALL_WITH_ARG
126 * a1 Upper 32bit of a 64bit physical pointer to a struct optee_msg_arg
127 * a2 Lower 32bit of a 64bit physical pointer to a struct optee_msg_arg
128 * a3 Cache settings, not used if physical pointer is in a predefined shared
129 * memory area else per OPTEE_SMC_SHM_*
130 * a4-6 Not used
131 * a7 Hypervisor Client ID register
132 *
133 * Normal return register usage:
134 * a0 Return value, OPTEE_SMC_RETURN_*
135 * a1-3 Not used
136 * a4-7 Preserved
137 *
138 * OPTEE_SMC_RETURN_ETHREAD_LIMIT return register usage:
139 * a0 Return value, OPTEE_SMC_RETURN_ETHREAD_LIMIT
140 * a1-3 Preserved
141 * a4-7 Preserved
142 *
143 * RPC return register usage:
144 * a0 Return value, OPTEE_SMC_RETURN_IS_RPC(val)
145 * a1-2 RPC parameters
146 * a3-7 Resume information, must be preserved
147 *
148 * Possible return values:
149 * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
150 * function.
151 * OPTEE_SMC_RETURN_OK Call completed, result updated in
152 * the previously supplied struct
153 * optee_msg_arg.
154 * OPTEE_SMC_RETURN_ETHREAD_LIMIT Number of Trusted OS threads exceeded,
155 * try again later.
156 * OPTEE_SMC_RETURN_EBADADDR Bad physcial pointer to struct
157 * optee_msg_arg.
158 * OPTEE_SMC_RETURN_EBADCMD Bad/unknown cmd in struct optee_msg_arg
159 * OPTEE_SMC_RETURN_IS_RPC() Call suspended by RPC call to normal
160 * world.
161 */
162#define OPTEE_SMC_FUNCID_CALL_WITH_ARG OPTEE_MSG_FUNCID_CALL_WITH_ARG
163#define OPTEE_SMC_CALL_WITH_ARG \
164 OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_CALL_WITH_ARG)
165
166/*
167 * Get Shared Memory Config
168 *
169 * Returns the Secure/Non-secure shared memory config.
170 *
171 * Call register usage:
172 * a0 SMC Function ID, OPTEE_SMC_GET_SHM_CONFIG
173 * a1-6 Not used
174 * a7 Hypervisor Client ID register
175 *
176 * Have config return register usage:
177 * a0 OPTEE_SMC_RETURN_OK
178 * a1 Physical address of start of SHM
179 * a2 Size of of SHM
180 * a3 Cache settings of memory, as defined by the
181 * OPTEE_SMC_SHM_* values above
182 * a4-7 Preserved
183 *
184 * Not available register usage:
185 * a0 OPTEE_SMC_RETURN_ENOTAVAIL
186 * a1-3 Not used
187 * a4-7 Preserved
188 */
189#define OPTEE_SMC_FUNCID_GET_SHM_CONFIG 7
190#define OPTEE_SMC_GET_SHM_CONFIG \
191 OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_GET_SHM_CONFIG)
192
193struct optee_smc_get_shm_config_result {
194 unsigned long status;
195 unsigned long start;
196 unsigned long size;
197 unsigned long settings;
198};
199
200/*
201 * Exchanges capabilities between normal world and secure world
202 *
203 * Call register usage:
204 * a0 SMC Function ID, OPTEE_SMC_EXCHANGE_CAPABILITIES
205 * a1 bitfield of normal world capabilities OPTEE_SMC_NSEC_CAP_*
206 * a2-6 Not used
207 * a7 Hypervisor Client ID register
208 *
209 * Normal return register usage:
210 * a0 OPTEE_SMC_RETURN_OK
211 * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
212 * a2-7 Preserved
213 *
214 * Error return register usage:
215 * a0 OPTEE_SMC_RETURN_ENOTAVAIL, can't use the capabilities from normal world
216 * a1 bitfield of secure world capabilities OPTEE_SMC_SEC_CAP_*
217 * a2-7 Preserved
218 */
219/* Normal world works as a uniprocessor system */
220#define OPTEE_SMC_NSEC_CAP_UNIPROCESSOR BIT(0)
221/* Secure world has reserved shared memory for normal world to use */
222#define OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM BIT(0)
223/* Secure world can communicate via previously unregistered shared memory */
224#define OPTEE_SMC_SEC_CAP_UNREGISTERED_SHM BIT(1)
225#define OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES 9
226#define OPTEE_SMC_EXCHANGE_CAPABILITIES \
227 OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_EXCHANGE_CAPABILITIES)
228
229struct optee_smc_exchange_capabilities_result {
230 unsigned long status;
231 unsigned long capabilities;
232 unsigned long reserved0;
233 unsigned long reserved1;
234};
235
236/*
237 * Disable and empties cache of shared memory objects
238 *
239 * Secure world can cache frequently used shared memory objects, for
240 * example objects used as RPC arguments. When secure world is idle this
241 * function returns one shared memory reference to free. To disable the
242 * cache and free all cached objects this function has to be called until
243 * it returns OPTEE_SMC_RETURN_ENOTAVAIL.
244 *
245 * Call register usage:
246 * a0 SMC Function ID, OPTEE_SMC_DISABLE_SHM_CACHE
247 * a1-6 Not used
248 * a7 Hypervisor Client ID register
249 *
250 * Normal return register usage:
251 * a0 OPTEE_SMC_RETURN_OK
252 * a1 Upper 32bit of a 64bit Shared memory cookie
253 * a2 Lower 32bit of a 64bit Shared memory cookie
254 * a3-7 Preserved
255 *
256 * Cache empty return register usage:
257 * a0 OPTEE_SMC_RETURN_ENOTAVAIL
258 * a1-7 Preserved
259 *
260 * Not idle return register usage:
261 * a0 OPTEE_SMC_RETURN_EBUSY
262 * a1-7 Preserved
263 */
264#define OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE 10
265#define OPTEE_SMC_DISABLE_SHM_CACHE \
266 OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_DISABLE_SHM_CACHE)
267
268struct optee_smc_disable_shm_cache_result {
269 unsigned long status;
270 unsigned long shm_upper32;
271 unsigned long shm_lower32;
272 unsigned long reserved0;
273};
274
275/*
276 * Enable cache of shared memory objects
277 *
278 * Secure world can cache frequently used shared memory objects, for
279 * example objects used as RPC arguments. When secure world is idle this
280 * function returns OPTEE_SMC_RETURN_OK and the cache is enabled. If
281 * secure world isn't idle OPTEE_SMC_RETURN_EBUSY is returned.
282 *
283 * Call register usage:
284 * a0 SMC Function ID, OPTEE_SMC_ENABLE_SHM_CACHE
285 * a1-6 Not used
286 * a7 Hypervisor Client ID register
287 *
288 * Normal return register usage:
289 * a0 OPTEE_SMC_RETURN_OK
290 * a1-7 Preserved
291 *
292 * Not idle return register usage:
293 * a0 OPTEE_SMC_RETURN_EBUSY
294 * a1-7 Preserved
295 */
296#define OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE 11
297#define OPTEE_SMC_ENABLE_SHM_CACHE \
298 OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_ENABLE_SHM_CACHE)
299
300/*
301 * Resume from RPC (for example after processing an IRQ)
302 *
303 * Call register usage:
304 * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC
305 * a1-3 Value of a1-3 when OPTEE_SMC_CALL_WITH_ARG returned
306 * OPTEE_SMC_RETURN_RPC in a0
307 *
308 * Return register usage is the same as for OPTEE_SMC_*CALL_WITH_ARG above.
309 *
310 * Possible return values
311 * OPTEE_SMC_RETURN_UNKNOWN_FUNCTION Trusted OS does not recognize this
312 * function.
313 * OPTEE_SMC_RETURN_OK Original call completed, result
314 * updated in the previously supplied.
315 * struct optee_msg_arg
316 * OPTEE_SMC_RETURN_RPC Call suspended by RPC call to normal
317 * world.
318 * OPTEE_SMC_RETURN_ERESUME Resume failed, the opaque resume
319 * information was corrupt.
320 */
321#define OPTEE_SMC_FUNCID_RETURN_FROM_RPC 3
322#define OPTEE_SMC_CALL_RETURN_FROM_RPC \
323 OPTEE_SMC_STD_CALL_VAL(OPTEE_SMC_FUNCID_RETURN_FROM_RPC)
324
325#define OPTEE_SMC_RETURN_RPC_PREFIX_MASK 0xFFFF0000
326#define OPTEE_SMC_RETURN_RPC_PREFIX 0xFFFF0000
327#define OPTEE_SMC_RETURN_RPC_FUNC_MASK 0x0000FFFF
328
329#define OPTEE_SMC_RETURN_GET_RPC_FUNC(ret) \
330 ((ret) & OPTEE_SMC_RETURN_RPC_FUNC_MASK)
331
332#define OPTEE_SMC_RPC_VAL(func) ((func) | OPTEE_SMC_RETURN_RPC_PREFIX)
333
334/*
335 * Allocate memory for RPC parameter passing. The memory is used to hold a
336 * struct optee_msg_arg.
337 *
338 * "Call" register usage:
339 * a0 This value, OPTEE_SMC_RETURN_RPC_ALLOC
340 * a1 Size in bytes of required argument memory
341 * a2 Not used
342 * a3 Resume information, must be preserved
343 * a4-5 Not used
344 * a6-7 Resume information, must be preserved
345 *
346 * "Return" register usage:
347 * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
348 * a1 Upper 32bits of 64bit physical pointer to allocated
349 * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
350 * be allocated.
351 * a2 Lower 32bits of 64bit physical pointer to allocated
352 * memory, (a1 == 0 && a2 == 0) if size was 0 or if memory can't
353 * be allocated
354 * a3 Preserved
355 * a4 Upper 32bits of 64bit Shared memory cookie used when freeing
356 * the memory or doing an RPC
357 * a5 Lower 32bits of 64bit Shared memory cookie used when freeing
358 * the memory or doing an RPC
359 * a6-7 Preserved
360 */
361#define OPTEE_SMC_RPC_FUNC_ALLOC 0
362#define OPTEE_SMC_RETURN_RPC_ALLOC \
363 OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_ALLOC)
364
365/*
366 * Free memory previously allocated by OPTEE_SMC_RETURN_RPC_ALLOC
367 *
368 * "Call" register usage:
369 * a0 This value, OPTEE_SMC_RETURN_RPC_FREE
370 * a1 Upper 32bits of 64bit shared memory cookie belonging to this
371 * argument memory
372 * a2 Lower 32bits of 64bit shared memory cookie belonging to this
373 * argument memory
374 * a3-7 Resume information, must be preserved
375 *
376 * "Return" register usage:
377 * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
378 * a1-2 Not used
379 * a3-7 Preserved
380 */
381#define OPTEE_SMC_RPC_FUNC_FREE 2
382#define OPTEE_SMC_RETURN_RPC_FREE \
383 OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_FREE)
384
385/*
386 * Deliver an IRQ in normal world.
387 *
388 * "Call" register usage:
389 * a0 OPTEE_SMC_RETURN_RPC_IRQ
390 * a1-7 Resume information, must be preserved
391 *
392 * "Return" register usage:
393 * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
394 * a1-7 Preserved
395 */
396#define OPTEE_SMC_RPC_FUNC_IRQ 4
397#define OPTEE_SMC_RETURN_RPC_IRQ \
398 OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_IRQ)
399
400/*
401 * Do an RPC request. The supplied struct optee_msg_arg tells which
402 * request to do and the parameters for the request. The following fields
403 * are used (the rest are unused):
404 * - cmd the Request ID
405 * - ret return value of the request, filled in by normal world
406 * - num_params number of parameters for the request
407 * - params the parameters
408 * - param_attrs attributes of the parameters
409 *
410 * "Call" register usage:
411 * a0 OPTEE_SMC_RETURN_RPC_CMD
412 * a1 Upper 32bit of a 64bit Shared memory cookie holding a
413 * struct optee_msg_arg, must be preserved, only the data should
414 * be updated
415 * a2 Lower 32bit of a 64bit Shared memory cookie holding a
416 * struct optee_msg_arg, must be preserved, only the data should
417 * be updated
418 * a3-7 Resume information, must be preserved
419 *
420 * "Return" register usage:
421 * a0 SMC Function ID, OPTEE_SMC_CALL_RETURN_FROM_RPC.
422 * a1-2 Not used
423 * a3-7 Preserved
424 */
425#define OPTEE_SMC_RPC_FUNC_CMD 5
426#define OPTEE_SMC_RETURN_RPC_CMD \
427 OPTEE_SMC_RPC_VAL(OPTEE_SMC_RPC_FUNC_CMD)
428
429/* Returned in a0 */
430#define OPTEE_SMC_RETURN_UNKNOWN_FUNCTION 0xFFFFFFFF
431
432/* Returned in a0 only from Trusted OS functions */
433#define OPTEE_SMC_RETURN_OK 0x0
434#define OPTEE_SMC_RETURN_ETHREAD_LIMIT 0x1
435#define OPTEE_SMC_RETURN_EBUSY 0x2
436#define OPTEE_SMC_RETURN_ERESUME 0x3
437#define OPTEE_SMC_RETURN_EBADADDR 0x4
438#define OPTEE_SMC_RETURN_EBADCMD 0x5
439#define OPTEE_SMC_RETURN_ENOMEM 0x6
440#define OPTEE_SMC_RETURN_ENOTAVAIL 0x7
441#define OPTEE_SMC_RETURN_IS_RPC(ret) __optee_smc_return_is_rpc((ret))
442
443static inline bool __optee_smc_return_is_rpc(u32 ret)
444{
445 return ret != OPTEE_SMC_RETURN_UNKNOWN_FUNCTION &&
446 (ret & OPTEE_SMC_RETURN_RPC_PREFIX_MASK) ==
447 OPTEE_SMC_RETURN_RPC_PREFIX;
448}
449
450#endif /* OPTEE_SMC_H */
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
new file mode 100644
index 000000000000..8814eca06021
--- /dev/null
+++ b/drivers/tee/optee/rpc.c
@@ -0,0 +1,396 @@
1/*
2 * Copyright (c) 2015-2016, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/slab.h>
20#include <linux/tee_drv.h>
21#include "optee_private.h"
22#include "optee_smc.h"
23
24struct wq_entry {
25 struct list_head link;
26 struct completion c;
27 u32 key;
28};
29
30void optee_wait_queue_init(struct optee_wait_queue *priv)
31{
32 mutex_init(&priv->mu);
33 INIT_LIST_HEAD(&priv->db);
34}
35
36void optee_wait_queue_exit(struct optee_wait_queue *priv)
37{
38 mutex_destroy(&priv->mu);
39}
40
41static void handle_rpc_func_cmd_get_time(struct optee_msg_arg *arg)
42{
43 struct timespec64 ts;
44
45 if (arg->num_params != 1)
46 goto bad;
47 if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
48 OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT)
49 goto bad;
50
51 getnstimeofday64(&ts);
52 arg->params[0].u.value.a = ts.tv_sec;
53 arg->params[0].u.value.b = ts.tv_nsec;
54
55 arg->ret = TEEC_SUCCESS;
56 return;
57bad:
58 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
59}
60
61static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key)
62{
63 struct wq_entry *w;
64
65 mutex_lock(&wq->mu);
66
67 list_for_each_entry(w, &wq->db, link)
68 if (w->key == key)
69 goto out;
70
71 w = kmalloc(sizeof(*w), GFP_KERNEL);
72 if (w) {
73 init_completion(&w->c);
74 w->key = key;
75 list_add_tail(&w->link, &wq->db);
76 }
77out:
78 mutex_unlock(&wq->mu);
79 return w;
80}
81
82static void wq_sleep(struct optee_wait_queue *wq, u32 key)
83{
84 struct wq_entry *w = wq_entry_get(wq, key);
85
86 if (w) {
87 wait_for_completion(&w->c);
88 mutex_lock(&wq->mu);
89 list_del(&w->link);
90 mutex_unlock(&wq->mu);
91 kfree(w);
92 }
93}
94
95static void wq_wakeup(struct optee_wait_queue *wq, u32 key)
96{
97 struct wq_entry *w = wq_entry_get(wq, key);
98
99 if (w)
100 complete(&w->c);
101}
102
103static void handle_rpc_func_cmd_wq(struct optee *optee,
104 struct optee_msg_arg *arg)
105{
106 if (arg->num_params != 1)
107 goto bad;
108
109 if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
110 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
111 goto bad;
112
113 switch (arg->params[0].u.value.a) {
114 case OPTEE_MSG_RPC_WAIT_QUEUE_SLEEP:
115 wq_sleep(&optee->wait_queue, arg->params[0].u.value.b);
116 break;
117 case OPTEE_MSG_RPC_WAIT_QUEUE_WAKEUP:
118 wq_wakeup(&optee->wait_queue, arg->params[0].u.value.b);
119 break;
120 default:
121 goto bad;
122 }
123
124 arg->ret = TEEC_SUCCESS;
125 return;
126bad:
127 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
128}
129
130static void handle_rpc_func_cmd_wait(struct optee_msg_arg *arg)
131{
132 u32 msec_to_wait;
133
134 if (arg->num_params != 1)
135 goto bad;
136
137 if ((arg->params[0].attr & OPTEE_MSG_ATTR_TYPE_MASK) !=
138 OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
139 goto bad;
140
141 msec_to_wait = arg->params[0].u.value.a;
142
143 /* set task's state to interruptible sleep */
144 set_current_state(TASK_INTERRUPTIBLE);
145
146 /* take a nap */
147 msleep(msec_to_wait);
148
149 arg->ret = TEEC_SUCCESS;
150 return;
151bad:
152 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
153}
154
155static void handle_rpc_supp_cmd(struct tee_context *ctx,
156 struct optee_msg_arg *arg)
157{
158 struct tee_param *params;
159
160 arg->ret_origin = TEEC_ORIGIN_COMMS;
161
162 params = kmalloc_array(arg->num_params, sizeof(struct tee_param),
163 GFP_KERNEL);
164 if (!params) {
165 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
166 return;
167 }
168
169 if (optee_from_msg_param(params, arg->num_params, arg->params)) {
170 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
171 goto out;
172 }
173
174 arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
175
176 if (optee_to_msg_param(arg->params, arg->num_params, params))
177 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
178out:
179 kfree(params);
180}
181
182static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
183{
184 u32 ret;
185 struct tee_param param;
186 struct optee *optee = tee_get_drvdata(ctx->teedev);
187 struct tee_shm *shm;
188
189 param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
190 param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
191 param.u.value.b = sz;
192 param.u.value.c = 0;
193
194 ret = optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_ALLOC, 1, &param);
195 if (ret)
196 return ERR_PTR(-ENOMEM);
197
198 mutex_lock(&optee->supp.ctx_mutex);
199 /* Increases count as secure world doesn't have a reference */
200 shm = tee_shm_get_from_id(optee->supp.ctx, param.u.value.c);
201 mutex_unlock(&optee->supp.ctx_mutex);
202 return shm;
203}
204
205static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
206 struct optee_msg_arg *arg)
207{
208 phys_addr_t pa;
209 struct tee_shm *shm;
210 size_t sz;
211 size_t n;
212
213 arg->ret_origin = TEEC_ORIGIN_COMMS;
214
215 if (!arg->num_params ||
216 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
217 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
218 return;
219 }
220
221 for (n = 1; n < arg->num_params; n++) {
222 if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
223 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
224 return;
225 }
226 }
227
228 sz = arg->params[0].u.value.b;
229 switch (arg->params[0].u.value.a) {
230 case OPTEE_MSG_RPC_SHM_TYPE_APPL:
231 shm = cmd_alloc_suppl(ctx, sz);
232 break;
233 case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
234 shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED);
235 break;
236 default:
237 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
238 return;
239 }
240
241 if (IS_ERR(shm)) {
242 arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
243 return;
244 }
245
246 if (tee_shm_get_pa(shm, 0, &pa)) {
247 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
248 goto bad;
249 }
250
251 arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
252 arg->params[0].u.tmem.buf_ptr = pa;
253 arg->params[0].u.tmem.size = sz;
254 arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
255 arg->ret = TEEC_SUCCESS;
256 return;
257bad:
258 tee_shm_free(shm);
259}
260
261static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
262{
263 struct tee_param param;
264
265 param.attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT;
266 param.u.value.a = OPTEE_MSG_RPC_SHM_TYPE_APPL;
267 param.u.value.b = tee_shm_get_id(shm);
268 param.u.value.c = 0;
269
270 /*
271 * Match the tee_shm_get_from_id() in cmd_alloc_suppl() as secure
272 * world has released its reference.
273 *
274 * It's better to do this before sending the request to supplicant
275 * as we'd like to let the process doing the initial allocation to
276 * do release the last reference too in order to avoid stacking
277 * many pending fput() on the client process. This could otherwise
278 * happen if secure world does many allocate and free in a single
279 * invoke.
280 */
281 tee_shm_put(shm);
282
283 optee_supp_thrd_req(ctx, OPTEE_MSG_RPC_CMD_SHM_FREE, 1, &param);
284}
285
286static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
287 struct optee_msg_arg *arg)
288{
289 struct tee_shm *shm;
290
291 arg->ret_origin = TEEC_ORIGIN_COMMS;
292
293 if (arg->num_params != 1 ||
294 arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
295 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
296 return;
297 }
298
299 shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
300 switch (arg->params[0].u.value.a) {
301 case OPTEE_MSG_RPC_SHM_TYPE_APPL:
302 cmd_free_suppl(ctx, shm);
303 break;
304 case OPTEE_MSG_RPC_SHM_TYPE_KERNEL:
305 tee_shm_free(shm);
306 break;
307 default:
308 arg->ret = TEEC_ERROR_BAD_PARAMETERS;
309 }
310 arg->ret = TEEC_SUCCESS;
311}
312
313static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
314 struct tee_shm *shm)
315{
316 struct optee_msg_arg *arg;
317
318 arg = tee_shm_get_va(shm, 0);
319 if (IS_ERR(arg)) {
320 pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
321 return;
322 }
323
324 switch (arg->cmd) {
325 case OPTEE_MSG_RPC_CMD_GET_TIME:
326 handle_rpc_func_cmd_get_time(arg);
327 break;
328 case OPTEE_MSG_RPC_CMD_WAIT_QUEUE:
329 handle_rpc_func_cmd_wq(optee, arg);
330 break;
331 case OPTEE_MSG_RPC_CMD_SUSPEND:
332 handle_rpc_func_cmd_wait(arg);
333 break;
334 case OPTEE_MSG_RPC_CMD_SHM_ALLOC:
335 handle_rpc_func_cmd_shm_alloc(ctx, arg);
336 break;
337 case OPTEE_MSG_RPC_CMD_SHM_FREE:
338 handle_rpc_func_cmd_shm_free(ctx, arg);
339 break;
340 default:
341 handle_rpc_supp_cmd(ctx, arg);
342 }
343}
344
345/**
346 * optee_handle_rpc() - handle RPC from secure world
347 * @ctx: context doing the RPC
348 * @param: value of registers for the RPC
349 *
350 * Result of RPC is written back into @param.
351 */
352void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param)
353{
354 struct tee_device *teedev = ctx->teedev;
355 struct optee *optee = tee_get_drvdata(teedev);
356 struct tee_shm *shm;
357 phys_addr_t pa;
358
359 switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
360 case OPTEE_SMC_RPC_FUNC_ALLOC:
361 shm = tee_shm_alloc(ctx, param->a1, TEE_SHM_MAPPED);
362 if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
363 reg_pair_from_64(&param->a1, &param->a2, pa);
364 reg_pair_from_64(&param->a4, &param->a5,
365 (unsigned long)shm);
366 } else {
367 param->a1 = 0;
368 param->a2 = 0;
369 param->a4 = 0;
370 param->a5 = 0;
371 }
372 break;
373 case OPTEE_SMC_RPC_FUNC_FREE:
374 shm = reg_pair_to_ptr(param->a1, param->a2);
375 tee_shm_free(shm);
376 break;
377 case OPTEE_SMC_RPC_FUNC_IRQ:
378 /*
379 * An IRQ was raised while secure world was executing,
380 * since all IRQs are handled in Linux a dummy RPC is
381 * performed to let Linux take the IRQ through the normal
382 * vector.
383 */
384 break;
385 case OPTEE_SMC_RPC_FUNC_CMD:
386 shm = reg_pair_to_ptr(param->a1, param->a2);
387 handle_rpc_func_cmd(ctx, optee, shm);
388 break;
389 default:
390 pr_warn("Unknown RPC func 0x%x\n",
391 (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
392 break;
393 }
394
395 param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
396}
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
new file mode 100644
index 000000000000..b4ea0678a436
--- /dev/null
+++ b/drivers/tee/optee/supp.c
@@ -0,0 +1,273 @@
1/*
2 * Copyright (c) 2015, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/device.h>
15#include <linux/slab.h>
16#include <linux/uaccess.h>
17#include "optee_private.h"
18
19void optee_supp_init(struct optee_supp *supp)
20{
21 memset(supp, 0, sizeof(*supp));
22 mutex_init(&supp->ctx_mutex);
23 mutex_init(&supp->thrd_mutex);
24 mutex_init(&supp->supp_mutex);
25 init_completion(&supp->data_to_supp);
26 init_completion(&supp->data_from_supp);
27}
28
29void optee_supp_uninit(struct optee_supp *supp)
30{
31 mutex_destroy(&supp->ctx_mutex);
32 mutex_destroy(&supp->thrd_mutex);
33 mutex_destroy(&supp->supp_mutex);
34}
35
36/**
37 * optee_supp_thrd_req() - request service from supplicant
38 * @ctx: context doing the request
39 * @func: function requested
40 * @num_params: number of elements in @param array
41 * @param: parameters for function
42 *
43 * Returns result of operation to be passed to secure world
44 */
45u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
46 struct tee_param *param)
47{
48 bool interruptable;
49 struct optee *optee = tee_get_drvdata(ctx->teedev);
50 struct optee_supp *supp = &optee->supp;
51 u32 ret;
52
53 /*
54 * Other threads blocks here until we've copied our answer from
55 * supplicant.
56 */
57 while (mutex_lock_interruptible(&supp->thrd_mutex)) {
58 /* See comment below on when the RPC can be interrupted. */
59 mutex_lock(&supp->ctx_mutex);
60 interruptable = !supp->ctx;
61 mutex_unlock(&supp->ctx_mutex);
62 if (interruptable)
63 return TEEC_ERROR_COMMUNICATION;
64 }
65
66 /*
67 * We have exclusive access now since the supplicant at this
68 * point is either doing a
69 * wait_for_completion_interruptible(&supp->data_to_supp) or is in
70 * userspace still about to do the ioctl() to enter
71 * optee_supp_recv() below.
72 */
73
74 supp->func = func;
75 supp->num_params = num_params;
76 supp->param = param;
77 supp->req_posted = true;
78
79 /* Let supplicant get the data */
80 complete(&supp->data_to_supp);
81
82 /*
83 * Wait for supplicant to process and return result, once we've
84 * returned from wait_for_completion(data_from_supp) we have
85 * exclusive access again.
86 */
87 while (wait_for_completion_interruptible(&supp->data_from_supp)) {
88 mutex_lock(&supp->ctx_mutex);
89 interruptable = !supp->ctx;
90 if (interruptable) {
91 /*
92 * There's no supplicant available and since the
93 * supp->ctx_mutex currently is held none can
94 * become available until the mutex released
95 * again.
96 *
97 * Interrupting an RPC to supplicant is only
98 * allowed as a way of slightly improving the user
99 * experience in case the supplicant hasn't been
100 * started yet. During normal operation the supplicant
101 * will serve all requests in a timely manner and
102 * interrupting then wouldn't make sense.
103 */
104 supp->ret = TEEC_ERROR_COMMUNICATION;
105 init_completion(&supp->data_to_supp);
106 }
107 mutex_unlock(&supp->ctx_mutex);
108 if (interruptable)
109 break;
110 }
111
112 ret = supp->ret;
113 supp->param = NULL;
114 supp->req_posted = false;
115
116 /* We're done, let someone else talk to the supplicant now. */
117 mutex_unlock(&supp->thrd_mutex);
118
119 return ret;
120}
121
122/**
123 * optee_supp_recv() - receive request for supplicant
124 * @ctx: context receiving the request
125 * @func: requested function in supplicant
126 * @num_params: number of elements allocated in @param, updated with number
127 * used elements
128 * @param: space for parameters for @func
129 *
130 * Returns 0 on success or <0 on failure
131 */
132int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
133 struct tee_param *param)
134{
135 struct tee_device *teedev = ctx->teedev;
136 struct optee *optee = tee_get_drvdata(teedev);
137 struct optee_supp *supp = &optee->supp;
138 int rc;
139
140 /*
141 * In case two threads in one supplicant is calling this function
142 * simultaneously we need to protect the data with a mutex which
143 * we'll release before returning.
144 */
145 mutex_lock(&supp->supp_mutex);
146
147 if (supp->supp_next_send) {
148 /*
149 * optee_supp_recv() has been called again without
150 * a optee_supp_send() in between. Supplicant has
151 * probably been restarted before it was able to
152 * write back last result. Abort last request and
153 * wait for a new.
154 */
155 if (supp->req_posted) {
156 supp->ret = TEEC_ERROR_COMMUNICATION;
157 supp->supp_next_send = false;
158 complete(&supp->data_from_supp);
159 }
160 }
161
162 /*
163 * This is where supplicant will be hanging most of the
164 * time, let's make this interruptable so we can easily
165 * restart supplicant if needed.
166 */
167 if (wait_for_completion_interruptible(&supp->data_to_supp)) {
168 rc = -ERESTARTSYS;
169 goto out;
170 }
171
172 /* We have exlusive access to the data */
173
174 if (*num_params < supp->num_params) {
175 /*
176 * Not enough room for parameters, tell supplicant
177 * it failed and abort last request.
178 */
179 supp->ret = TEEC_ERROR_COMMUNICATION;
180 rc = -EINVAL;
181 complete(&supp->data_from_supp);
182 goto out;
183 }
184
185 *func = supp->func;
186 *num_params = supp->num_params;
187 memcpy(param, supp->param,
188 sizeof(struct tee_param) * supp->num_params);
189
190 /* Allow optee_supp_send() below to do its work */
191 supp->supp_next_send = true;
192
193 rc = 0;
194out:
195 mutex_unlock(&supp->supp_mutex);
196 return rc;
197}
198
199/**
200 * optee_supp_send() - send result of request from supplicant
201 * @ctx: context sending result
202 * @ret: return value of request
203 * @num_params: number of parameters returned
204 * @param: returned parameters
205 *
206 * Returns 0 on success or <0 on failure.
207 */
208int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
209 struct tee_param *param)
210{
211 struct tee_device *teedev = ctx->teedev;
212 struct optee *optee = tee_get_drvdata(teedev);
213 struct optee_supp *supp = &optee->supp;
214 size_t n;
215 int rc = 0;
216
217 /*
218 * We still have exclusive access to the data since that's how we
219 * left it when returning from optee_supp_read().
220 */
221
222 /* See comment on mutex in optee_supp_read() above */
223 mutex_lock(&supp->supp_mutex);
224
225 if (!supp->supp_next_send) {
226 /*
227 * Something strange is going on, supplicant shouldn't
228 * enter optee_supp_send() in this state
229 */
230 rc = -ENOENT;
231 goto out;
232 }
233
234 if (num_params != supp->num_params) {
235 /*
236 * Something is wrong, let supplicant restart. Next call to
237 * optee_supp_recv() will give an error to the requesting
238 * thread and release it.
239 */
240 rc = -EINVAL;
241 goto out;
242 }
243
244 /* Update out and in/out parameters */
245 for (n = 0; n < num_params; n++) {
246 struct tee_param *p = supp->param + n;
247
248 switch (p->attr) {
249 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
250 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
251 p->u.value.a = param[n].u.value.a;
252 p->u.value.b = param[n].u.value.b;
253 p->u.value.c = param[n].u.value.c;
254 break;
255 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
256 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
257 p->u.memref.size = param[n].u.memref.size;
258 break;
259 default:
260 break;
261 }
262 }
263 supp->ret = ret;
264
265 /* Allow optee_supp_recv() above to do its work */
266 supp->supp_next_send = false;
267
268 /* Let the requesting thread continue */
269 complete(&supp->data_from_supp);
270out:
271 mutex_unlock(&supp->supp_mutex);
272 return rc;
273}
diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c
new file mode 100644
index 000000000000..5c60bf4423e6
--- /dev/null
+++ b/drivers/tee/tee_core.c
@@ -0,0 +1,893 @@
1/*
2 * Copyright (c) 2015-2016, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#define pr_fmt(fmt) "%s: " fmt, __func__
16
17#include <linux/cdev.h>
18#include <linux/device.h>
19#include <linux/fs.h>
20#include <linux/idr.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/tee_drv.h>
24#include <linux/uaccess.h>
25#include "tee_private.h"
26
27#define TEE_NUM_DEVICES 32
28
29#define TEE_IOCTL_PARAM_SIZE(x) (sizeof(struct tee_param) * (x))
30
31/*
32 * Unprivileged devices in the lower half range and privileged devices in
33 * the upper half range.
34 */
35static DECLARE_BITMAP(dev_mask, TEE_NUM_DEVICES);
36static DEFINE_SPINLOCK(driver_lock);
37
38static struct class *tee_class;
39static dev_t tee_devt;
40
41static int tee_open(struct inode *inode, struct file *filp)
42{
43 int rc;
44 struct tee_device *teedev;
45 struct tee_context *ctx;
46
47 teedev = container_of(inode->i_cdev, struct tee_device, cdev);
48 if (!tee_device_get(teedev))
49 return -EINVAL;
50
51 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
52 if (!ctx) {
53 rc = -ENOMEM;
54 goto err;
55 }
56
57 ctx->teedev = teedev;
58 INIT_LIST_HEAD(&ctx->list_shm);
59 filp->private_data = ctx;
60 rc = teedev->desc->ops->open(ctx);
61 if (rc)
62 goto err;
63
64 return 0;
65err:
66 kfree(ctx);
67 tee_device_put(teedev);
68 return rc;
69}
70
71static int tee_release(struct inode *inode, struct file *filp)
72{
73 struct tee_context *ctx = filp->private_data;
74 struct tee_device *teedev = ctx->teedev;
75 struct tee_shm *shm;
76
77 ctx->teedev->desc->ops->release(ctx);
78 mutex_lock(&ctx->teedev->mutex);
79 list_for_each_entry(shm, &ctx->list_shm, link)
80 shm->ctx = NULL;
81 mutex_unlock(&ctx->teedev->mutex);
82 kfree(ctx);
83 tee_device_put(teedev);
84 return 0;
85}
86
87static int tee_ioctl_version(struct tee_context *ctx,
88 struct tee_ioctl_version_data __user *uvers)
89{
90 struct tee_ioctl_version_data vers;
91
92 ctx->teedev->desc->ops->get_version(ctx->teedev, &vers);
93 if (copy_to_user(uvers, &vers, sizeof(vers)))
94 return -EFAULT;
95 return 0;
96}
97
98static int tee_ioctl_shm_alloc(struct tee_context *ctx,
99 struct tee_ioctl_shm_alloc_data __user *udata)
100{
101 long ret;
102 struct tee_ioctl_shm_alloc_data data;
103 struct tee_shm *shm;
104
105 if (copy_from_user(&data, udata, sizeof(data)))
106 return -EFAULT;
107
108 /* Currently no input flags are supported */
109 if (data.flags)
110 return -EINVAL;
111
112 data.id = -1;
113
114 shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
115 if (IS_ERR(shm))
116 return PTR_ERR(shm);
117
118 data.id = shm->id;
119 data.flags = shm->flags;
120 data.size = shm->size;
121
122 if (copy_to_user(udata, &data, sizeof(data)))
123 ret = -EFAULT;
124 else
125 ret = tee_shm_get_fd(shm);
126
127 /*
128 * When user space closes the file descriptor the shared memory
129 * should be freed or if tee_shm_get_fd() failed then it will
130 * be freed immediately.
131 */
132 tee_shm_put(shm);
133 return ret;
134}
135
136static int params_from_user(struct tee_context *ctx, struct tee_param *params,
137 size_t num_params,
138 struct tee_ioctl_param __user *uparams)
139{
140 size_t n;
141
142 for (n = 0; n < num_params; n++) {
143 struct tee_shm *shm;
144 struct tee_ioctl_param ip;
145
146 if (copy_from_user(&ip, uparams + n, sizeof(ip)))
147 return -EFAULT;
148
149 /* All unused attribute bits has to be zero */
150 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
151 return -EINVAL;
152
153 params[n].attr = ip.attr;
154 switch (ip.attr) {
155 case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
156 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
157 break;
158 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
159 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
160 params[n].u.value.a = ip.a;
161 params[n].u.value.b = ip.b;
162 params[n].u.value.c = ip.c;
163 break;
164 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
165 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
166 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
167 /*
168 * If we fail to get a pointer to a shared memory
169 * object (and increase the ref count) from an
170 * identifier we return an error. All pointers that
171 * has been added in params have an increased ref
172 * count. It's the callers responibility to do
173 * tee_shm_put() on all resolved pointers.
174 */
175 shm = tee_shm_get_from_id(ctx, ip.c);
176 if (IS_ERR(shm))
177 return PTR_ERR(shm);
178
179 params[n].u.memref.shm_offs = ip.a;
180 params[n].u.memref.size = ip.b;
181 params[n].u.memref.shm = shm;
182 break;
183 default:
184 /* Unknown attribute */
185 return -EINVAL;
186 }
187 }
188 return 0;
189}
190
191static int params_to_user(struct tee_ioctl_param __user *uparams,
192 size_t num_params, struct tee_param *params)
193{
194 size_t n;
195
196 for (n = 0; n < num_params; n++) {
197 struct tee_ioctl_param __user *up = uparams + n;
198 struct tee_param *p = params + n;
199
200 switch (p->attr) {
201 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
202 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
203 if (put_user(p->u.value.a, &up->a) ||
204 put_user(p->u.value.b, &up->b) ||
205 put_user(p->u.value.c, &up->c))
206 return -EFAULT;
207 break;
208 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
209 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
210 if (put_user((u64)p->u.memref.size, &up->b))
211 return -EFAULT;
212 default:
213 break;
214 }
215 }
216 return 0;
217}
218
219static bool param_is_memref(struct tee_param *param)
220{
221 switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) {
222 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
223 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
224 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
225 return true;
226 default:
227 return false;
228 }
229}
230
231static int tee_ioctl_open_session(struct tee_context *ctx,
232 struct tee_ioctl_buf_data __user *ubuf)
233{
234 int rc;
235 size_t n;
236 struct tee_ioctl_buf_data buf;
237 struct tee_ioctl_open_session_arg __user *uarg;
238 struct tee_ioctl_open_session_arg arg;
239 struct tee_ioctl_param __user *uparams = NULL;
240 struct tee_param *params = NULL;
241 bool have_session = false;
242
243 if (!ctx->teedev->desc->ops->open_session)
244 return -EINVAL;
245
246 if (copy_from_user(&buf, ubuf, sizeof(buf)))
247 return -EFAULT;
248
249 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
250 buf.buf_len < sizeof(struct tee_ioctl_open_session_arg))
251 return -EINVAL;
252
253 uarg = u64_to_user_ptr(buf.buf_ptr);
254 if (copy_from_user(&arg, uarg, sizeof(arg)))
255 return -EFAULT;
256
257 if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
258 return -EINVAL;
259
260 if (arg.num_params) {
261 params = kcalloc(arg.num_params, sizeof(struct tee_param),
262 GFP_KERNEL);
263 if (!params)
264 return -ENOMEM;
265 uparams = uarg->params;
266 rc = params_from_user(ctx, params, arg.num_params, uparams);
267 if (rc)
268 goto out;
269 }
270
271 rc = ctx->teedev->desc->ops->open_session(ctx, &arg, params);
272 if (rc)
273 goto out;
274 have_session = true;
275
276 if (put_user(arg.session, &uarg->session) ||
277 put_user(arg.ret, &uarg->ret) ||
278 put_user(arg.ret_origin, &uarg->ret_origin)) {
279 rc = -EFAULT;
280 goto out;
281 }
282 rc = params_to_user(uparams, arg.num_params, params);
283out:
284 /*
285 * If we've succeeded to open the session but failed to communicate
286 * it back to user space, close the session again to avoid leakage.
287 */
288 if (rc && have_session && ctx->teedev->desc->ops->close_session)
289 ctx->teedev->desc->ops->close_session(ctx, arg.session);
290
291 if (params) {
292 /* Decrease ref count for all valid shared memory pointers */
293 for (n = 0; n < arg.num_params; n++)
294 if (param_is_memref(params + n) &&
295 params[n].u.memref.shm)
296 tee_shm_put(params[n].u.memref.shm);
297 kfree(params);
298 }
299
300 return rc;
301}
302
303static int tee_ioctl_invoke(struct tee_context *ctx,
304 struct tee_ioctl_buf_data __user *ubuf)
305{
306 int rc;
307 size_t n;
308 struct tee_ioctl_buf_data buf;
309 struct tee_ioctl_invoke_arg __user *uarg;
310 struct tee_ioctl_invoke_arg arg;
311 struct tee_ioctl_param __user *uparams = NULL;
312 struct tee_param *params = NULL;
313
314 if (!ctx->teedev->desc->ops->invoke_func)
315 return -EINVAL;
316
317 if (copy_from_user(&buf, ubuf, sizeof(buf)))
318 return -EFAULT;
319
320 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
321 buf.buf_len < sizeof(struct tee_ioctl_invoke_arg))
322 return -EINVAL;
323
324 uarg = u64_to_user_ptr(buf.buf_ptr);
325 if (copy_from_user(&arg, uarg, sizeof(arg)))
326 return -EFAULT;
327
328 if (sizeof(arg) + TEE_IOCTL_PARAM_SIZE(arg.num_params) != buf.buf_len)
329 return -EINVAL;
330
331 if (arg.num_params) {
332 params = kcalloc(arg.num_params, sizeof(struct tee_param),
333 GFP_KERNEL);
334 if (!params)
335 return -ENOMEM;
336 uparams = uarg->params;
337 rc = params_from_user(ctx, params, arg.num_params, uparams);
338 if (rc)
339 goto out;
340 }
341
342 rc = ctx->teedev->desc->ops->invoke_func(ctx, &arg, params);
343 if (rc)
344 goto out;
345
346 if (put_user(arg.ret, &uarg->ret) ||
347 put_user(arg.ret_origin, &uarg->ret_origin)) {
348 rc = -EFAULT;
349 goto out;
350 }
351 rc = params_to_user(uparams, arg.num_params, params);
352out:
353 if (params) {
354 /* Decrease ref count for all valid shared memory pointers */
355 for (n = 0; n < arg.num_params; n++)
356 if (param_is_memref(params + n) &&
357 params[n].u.memref.shm)
358 tee_shm_put(params[n].u.memref.shm);
359 kfree(params);
360 }
361 return rc;
362}
363
364static int tee_ioctl_cancel(struct tee_context *ctx,
365 struct tee_ioctl_cancel_arg __user *uarg)
366{
367 struct tee_ioctl_cancel_arg arg;
368
369 if (!ctx->teedev->desc->ops->cancel_req)
370 return -EINVAL;
371
372 if (copy_from_user(&arg, uarg, sizeof(arg)))
373 return -EFAULT;
374
375 return ctx->teedev->desc->ops->cancel_req(ctx, arg.cancel_id,
376 arg.session);
377}
378
379static int
380tee_ioctl_close_session(struct tee_context *ctx,
381 struct tee_ioctl_close_session_arg __user *uarg)
382{
383 struct tee_ioctl_close_session_arg arg;
384
385 if (!ctx->teedev->desc->ops->close_session)
386 return -EINVAL;
387
388 if (copy_from_user(&arg, uarg, sizeof(arg)))
389 return -EFAULT;
390
391 return ctx->teedev->desc->ops->close_session(ctx, arg.session);
392}
393
394static int params_to_supp(struct tee_context *ctx,
395 struct tee_ioctl_param __user *uparams,
396 size_t num_params, struct tee_param *params)
397{
398 size_t n;
399
400 for (n = 0; n < num_params; n++) {
401 struct tee_ioctl_param ip;
402 struct tee_param *p = params + n;
403
404 ip.attr = p->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK;
405 switch (p->attr) {
406 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
407 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
408 ip.a = p->u.value.a;
409 ip.b = p->u.value.b;
410 ip.c = p->u.value.c;
411 break;
412 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
413 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
414 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
415 ip.b = p->u.memref.size;
416 if (!p->u.memref.shm) {
417 ip.a = 0;
418 ip.c = (u64)-1; /* invalid shm id */
419 break;
420 }
421 ip.a = p->u.memref.shm_offs;
422 ip.c = p->u.memref.shm->id;
423 break;
424 default:
425 ip.a = 0;
426 ip.b = 0;
427 ip.c = 0;
428 break;
429 }
430
431 if (copy_to_user(uparams + n, &ip, sizeof(ip)))
432 return -EFAULT;
433 }
434
435 return 0;
436}
437
438static int tee_ioctl_supp_recv(struct tee_context *ctx,
439 struct tee_ioctl_buf_data __user *ubuf)
440{
441 int rc;
442 struct tee_ioctl_buf_data buf;
443 struct tee_iocl_supp_recv_arg __user *uarg;
444 struct tee_param *params;
445 u32 num_params;
446 u32 func;
447
448 if (!ctx->teedev->desc->ops->supp_recv)
449 return -EINVAL;
450
451 if (copy_from_user(&buf, ubuf, sizeof(buf)))
452 return -EFAULT;
453
454 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
455 buf.buf_len < sizeof(struct tee_iocl_supp_recv_arg))
456 return -EINVAL;
457
458 uarg = u64_to_user_ptr(buf.buf_ptr);
459 if (get_user(num_params, &uarg->num_params))
460 return -EFAULT;
461
462 if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) != buf.buf_len)
463 return -EINVAL;
464
465 params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
466 if (!params)
467 return -ENOMEM;
468
469 rc = ctx->teedev->desc->ops->supp_recv(ctx, &func, &num_params, params);
470 if (rc)
471 goto out;
472
473 if (put_user(func, &uarg->func) ||
474 put_user(num_params, &uarg->num_params)) {
475 rc = -EFAULT;
476 goto out;
477 }
478
479 rc = params_to_supp(ctx, uarg->params, num_params, params);
480out:
481 kfree(params);
482 return rc;
483}
484
485static int params_from_supp(struct tee_param *params, size_t num_params,
486 struct tee_ioctl_param __user *uparams)
487{
488 size_t n;
489
490 for (n = 0; n < num_params; n++) {
491 struct tee_param *p = params + n;
492 struct tee_ioctl_param ip;
493
494 if (copy_from_user(&ip, uparams + n, sizeof(ip)))
495 return -EFAULT;
496
497 /* All unused attribute bits has to be zero */
498 if (ip.attr & ~TEE_IOCTL_PARAM_ATTR_TYPE_MASK)
499 return -EINVAL;
500
501 p->attr = ip.attr;
502 switch (ip.attr) {
503 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
504 case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
505 /* Only out and in/out values can be updated */
506 p->u.value.a = ip.a;
507 p->u.value.b = ip.b;
508 p->u.value.c = ip.c;
509 break;
510 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
511 case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
512 /*
513 * Only the size of the memref can be updated.
514 * Since we don't have access to the original
515 * parameters here, only store the supplied size.
516 * The driver will copy the updated size into the
517 * original parameters.
518 */
519 p->u.memref.shm = NULL;
520 p->u.memref.shm_offs = 0;
521 p->u.memref.size = ip.b;
522 break;
523 default:
524 memset(&p->u, 0, sizeof(p->u));
525 break;
526 }
527 }
528 return 0;
529}
530
531static int tee_ioctl_supp_send(struct tee_context *ctx,
532 struct tee_ioctl_buf_data __user *ubuf)
533{
534 long rc;
535 struct tee_ioctl_buf_data buf;
536 struct tee_iocl_supp_send_arg __user *uarg;
537 struct tee_param *params;
538 u32 num_params;
539 u32 ret;
540
541 /* Not valid for this driver */
542 if (!ctx->teedev->desc->ops->supp_send)
543 return -EINVAL;
544
545 if (copy_from_user(&buf, ubuf, sizeof(buf)))
546 return -EFAULT;
547
548 if (buf.buf_len > TEE_MAX_ARG_SIZE ||
549 buf.buf_len < sizeof(struct tee_iocl_supp_send_arg))
550 return -EINVAL;
551
552 uarg = u64_to_user_ptr(buf.buf_ptr);
553 if (get_user(ret, &uarg->ret) ||
554 get_user(num_params, &uarg->num_params))
555 return -EFAULT;
556
557 if (sizeof(*uarg) + TEE_IOCTL_PARAM_SIZE(num_params) > buf.buf_len)
558 return -EINVAL;
559
560 params = kcalloc(num_params, sizeof(struct tee_param), GFP_KERNEL);
561 if (!params)
562 return -ENOMEM;
563
564 rc = params_from_supp(params, num_params, uarg->params);
565 if (rc)
566 goto out;
567
568 rc = ctx->teedev->desc->ops->supp_send(ctx, ret, num_params, params);
569out:
570 kfree(params);
571 return rc;
572}
573
574static long tee_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
575{
576 struct tee_context *ctx = filp->private_data;
577 void __user *uarg = (void __user *)arg;
578
579 switch (cmd) {
580 case TEE_IOC_VERSION:
581 return tee_ioctl_version(ctx, uarg);
582 case TEE_IOC_SHM_ALLOC:
583 return tee_ioctl_shm_alloc(ctx, uarg);
584 case TEE_IOC_OPEN_SESSION:
585 return tee_ioctl_open_session(ctx, uarg);
586 case TEE_IOC_INVOKE:
587 return tee_ioctl_invoke(ctx, uarg);
588 case TEE_IOC_CANCEL:
589 return tee_ioctl_cancel(ctx, uarg);
590 case TEE_IOC_CLOSE_SESSION:
591 return tee_ioctl_close_session(ctx, uarg);
592 case TEE_IOC_SUPPL_RECV:
593 return tee_ioctl_supp_recv(ctx, uarg);
594 case TEE_IOC_SUPPL_SEND:
595 return tee_ioctl_supp_send(ctx, uarg);
596 default:
597 return -EINVAL;
598 }
599}
600
601static const struct file_operations tee_fops = {
602 .owner = THIS_MODULE,
603 .open = tee_open,
604 .release = tee_release,
605 .unlocked_ioctl = tee_ioctl,
606 .compat_ioctl = tee_ioctl,
607};
608
609static void tee_release_device(struct device *dev)
610{
611 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
612
613 spin_lock(&driver_lock);
614 clear_bit(teedev->id, dev_mask);
615 spin_unlock(&driver_lock);
616 mutex_destroy(&teedev->mutex);
617 idr_destroy(&teedev->idr);
618 kfree(teedev);
619}
620
621/**
622 * tee_device_alloc() - Allocate a new struct tee_device instance
623 * @teedesc: Descriptor for this driver
624 * @dev: Parent device for this device
625 * @pool: Shared memory pool, NULL if not used
626 * @driver_data: Private driver data for this device
627 *
628 * Allocates a new struct tee_device instance. The device is
629 * removed by tee_device_unregister().
630 *
631 * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
632 */
633struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
634 struct device *dev,
635 struct tee_shm_pool *pool,
636 void *driver_data)
637{
638 struct tee_device *teedev;
639 void *ret;
640 int rc;
641 int offs = 0;
642
643 if (!teedesc || !teedesc->name || !teedesc->ops ||
644 !teedesc->ops->get_version || !teedesc->ops->open ||
645 !teedesc->ops->release || !pool)
646 return ERR_PTR(-EINVAL);
647
648 teedev = kzalloc(sizeof(*teedev), GFP_KERNEL);
649 if (!teedev) {
650 ret = ERR_PTR(-ENOMEM);
651 goto err;
652 }
653
654 if (teedesc->flags & TEE_DESC_PRIVILEGED)
655 offs = TEE_NUM_DEVICES / 2;
656
657 spin_lock(&driver_lock);
658 teedev->id = find_next_zero_bit(dev_mask, TEE_NUM_DEVICES, offs);
659 if (teedev->id < TEE_NUM_DEVICES)
660 set_bit(teedev->id, dev_mask);
661 spin_unlock(&driver_lock);
662
663 if (teedev->id >= TEE_NUM_DEVICES) {
664 ret = ERR_PTR(-ENOMEM);
665 goto err;
666 }
667
668 snprintf(teedev->name, sizeof(teedev->name), "tee%s%d",
669 teedesc->flags & TEE_DESC_PRIVILEGED ? "priv" : "",
670 teedev->id - offs);
671
672 teedev->dev.class = tee_class;
673 teedev->dev.release = tee_release_device;
674 teedev->dev.parent = dev;
675
676 teedev->dev.devt = MKDEV(MAJOR(tee_devt), teedev->id);
677
678 rc = dev_set_name(&teedev->dev, "%s", teedev->name);
679 if (rc) {
680 ret = ERR_PTR(rc);
681 goto err_devt;
682 }
683
684 cdev_init(&teedev->cdev, &tee_fops);
685 teedev->cdev.owner = teedesc->owner;
686 teedev->cdev.kobj.parent = &teedev->dev.kobj;
687
688 dev_set_drvdata(&teedev->dev, driver_data);
689 device_initialize(&teedev->dev);
690
691 /* 1 as tee_device_unregister() does one final tee_device_put() */
692 teedev->num_users = 1;
693 init_completion(&teedev->c_no_users);
694 mutex_init(&teedev->mutex);
695 idr_init(&teedev->idr);
696
697 teedev->desc = teedesc;
698 teedev->pool = pool;
699
700 return teedev;
701err_devt:
702 unregister_chrdev_region(teedev->dev.devt, 1);
703err:
704 pr_err("could not register %s driver\n",
705 teedesc->flags & TEE_DESC_PRIVILEGED ? "privileged" : "client");
706 if (teedev && teedev->id < TEE_NUM_DEVICES) {
707 spin_lock(&driver_lock);
708 clear_bit(teedev->id, dev_mask);
709 spin_unlock(&driver_lock);
710 }
711 kfree(teedev);
712 return ret;
713}
714EXPORT_SYMBOL_GPL(tee_device_alloc);
715
716static ssize_t implementation_id_show(struct device *dev,
717 struct device_attribute *attr, char *buf)
718{
719 struct tee_device *teedev = container_of(dev, struct tee_device, dev);
720 struct tee_ioctl_version_data vers;
721
722 teedev->desc->ops->get_version(teedev, &vers);
723 return scnprintf(buf, PAGE_SIZE, "%d\n", vers.impl_id);
724}
725static DEVICE_ATTR_RO(implementation_id);
726
727static struct attribute *tee_dev_attrs[] = {
728 &dev_attr_implementation_id.attr,
729 NULL
730};
731
732static const struct attribute_group tee_dev_group = {
733 .attrs = tee_dev_attrs,
734};
735
736/**
737 * tee_device_register() - Registers a TEE device
738 * @teedev: Device to register
739 *
740 * tee_device_unregister() need to be called to remove the @teedev if
741 * this function fails.
742 *
743 * @returns < 0 on failure
744 */
745int tee_device_register(struct tee_device *teedev)
746{
747 int rc;
748
749 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
750 dev_err(&teedev->dev, "attempt to register twice\n");
751 return -EINVAL;
752 }
753
754 rc = cdev_add(&teedev->cdev, teedev->dev.devt, 1);
755 if (rc) {
756 dev_err(&teedev->dev,
757 "unable to cdev_add() %s, major %d, minor %d, err=%d\n",
758 teedev->name, MAJOR(teedev->dev.devt),
759 MINOR(teedev->dev.devt), rc);
760 return rc;
761 }
762
763 rc = device_add(&teedev->dev);
764 if (rc) {
765 dev_err(&teedev->dev,
766 "unable to device_add() %s, major %d, minor %d, err=%d\n",
767 teedev->name, MAJOR(teedev->dev.devt),
768 MINOR(teedev->dev.devt), rc);
769 goto err_device_add;
770 }
771
772 rc = sysfs_create_group(&teedev->dev.kobj, &tee_dev_group);
773 if (rc) {
774 dev_err(&teedev->dev,
775 "failed to create sysfs attributes, err=%d\n", rc);
776 goto err_sysfs_create_group;
777 }
778
779 teedev->flags |= TEE_DEVICE_FLAG_REGISTERED;
780 return 0;
781
782err_sysfs_create_group:
783 device_del(&teedev->dev);
784err_device_add:
785 cdev_del(&teedev->cdev);
786 return rc;
787}
788EXPORT_SYMBOL_GPL(tee_device_register);
789
790void tee_device_put(struct tee_device *teedev)
791{
792 mutex_lock(&teedev->mutex);
793 /* Shouldn't put in this state */
794 if (!WARN_ON(!teedev->desc)) {
795 teedev->num_users--;
796 if (!teedev->num_users) {
797 teedev->desc = NULL;
798 complete(&teedev->c_no_users);
799 }
800 }
801 mutex_unlock(&teedev->mutex);
802}
803
804bool tee_device_get(struct tee_device *teedev)
805{
806 mutex_lock(&teedev->mutex);
807 if (!teedev->desc) {
808 mutex_unlock(&teedev->mutex);
809 return false;
810 }
811 teedev->num_users++;
812 mutex_unlock(&teedev->mutex);
813 return true;
814}
815
816/**
817 * tee_device_unregister() - Removes a TEE device
818 * @teedev: Device to unregister
819 *
820 * This function should be called to remove the @teedev even if
821 * tee_device_register() hasn't been called yet. Does nothing if
822 * @teedev is NULL.
823 */
824void tee_device_unregister(struct tee_device *teedev)
825{
826 if (!teedev)
827 return;
828
829 if (teedev->flags & TEE_DEVICE_FLAG_REGISTERED) {
830 sysfs_remove_group(&teedev->dev.kobj, &tee_dev_group);
831 cdev_del(&teedev->cdev);
832 device_del(&teedev->dev);
833 }
834
835 tee_device_put(teedev);
836 wait_for_completion(&teedev->c_no_users);
837
838 /*
839 * No need to take a mutex any longer now since teedev->desc was
840 * set to NULL before teedev->c_no_users was completed.
841 */
842
843 teedev->pool = NULL;
844
845 put_device(&teedev->dev);
846}
847EXPORT_SYMBOL_GPL(tee_device_unregister);
848
849/**
850 * tee_get_drvdata() - Return driver_data pointer
851 * @teedev: Device containing the driver_data pointer
852 * @returns the driver_data pointer supplied to tee_register().
853 */
854void *tee_get_drvdata(struct tee_device *teedev)
855{
856 return dev_get_drvdata(&teedev->dev);
857}
858EXPORT_SYMBOL_GPL(tee_get_drvdata);
859
860static int __init tee_init(void)
861{
862 int rc;
863
864 tee_class = class_create(THIS_MODULE, "tee");
865 if (IS_ERR(tee_class)) {
866 pr_err("couldn't create class\n");
867 return PTR_ERR(tee_class);
868 }
869
870 rc = alloc_chrdev_region(&tee_devt, 0, TEE_NUM_DEVICES, "tee");
871 if (rc) {
872 pr_err("failed to allocate char dev region\n");
873 class_destroy(tee_class);
874 tee_class = NULL;
875 }
876
877 return rc;
878}
879
880static void __exit tee_exit(void)
881{
882 class_destroy(tee_class);
883 tee_class = NULL;
884 unregister_chrdev_region(tee_devt, TEE_NUM_DEVICES);
885}
886
887subsys_initcall(tee_init);
888module_exit(tee_exit);
889
890MODULE_AUTHOR("Linaro");
891MODULE_DESCRIPTION("TEE Driver");
892MODULE_VERSION("1.0");
893MODULE_LICENSE("GPL v2");
diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h
new file mode 100644
index 000000000000..21cb6be8bce9
--- /dev/null
+++ b/drivers/tee/tee_private.h
@@ -0,0 +1,129 @@
1/*
2 * Copyright (c) 2015-2016, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#ifndef TEE_PRIVATE_H
15#define TEE_PRIVATE_H
16
17#include <linux/cdev.h>
18#include <linux/completion.h>
19#include <linux/device.h>
20#include <linux/kref.h>
21#include <linux/mutex.h>
22#include <linux/types.h>
23
24struct tee_device;
25
26/**
27 * struct tee_shm - shared memory object
28 * @teedev: device used to allocate the object
29 * @ctx: context using the object, if NULL the context is gone
30 * @link link element
31 * @paddr: physical address of the shared memory
32 * @kaddr: virtual address of the shared memory
33 * @size: size of shared memory
34 * @dmabuf: dmabuf used to for exporting to user space
35 * @flags: defined by TEE_SHM_* in tee_drv.h
36 * @id: unique id of a shared memory object on this device
37 */
38struct tee_shm {
39 struct tee_device *teedev;
40 struct tee_context *ctx;
41 struct list_head link;
42 phys_addr_t paddr;
43 void *kaddr;
44 size_t size;
45 struct dma_buf *dmabuf;
46 u32 flags;
47 int id;
48};
49
50struct tee_shm_pool_mgr;
51
52/**
53 * struct tee_shm_pool_mgr_ops - shared memory pool manager operations
54 * @alloc: called when allocating shared memory
55 * @free: called when freeing shared memory
56 */
57struct tee_shm_pool_mgr_ops {
58 int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
59 size_t size);
60 void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
61};
62
63/**
64 * struct tee_shm_pool_mgr - shared memory manager
65 * @ops: operations
66 * @private_data: private data for the shared memory manager
67 */
68struct tee_shm_pool_mgr {
69 const struct tee_shm_pool_mgr_ops *ops;
70 void *private_data;
71};
72
73/**
74 * struct tee_shm_pool - shared memory pool
75 * @private_mgr: pool manager for shared memory only between kernel
76 * and secure world
77 * @dma_buf_mgr: pool manager for shared memory exported to user space
78 * @destroy: called when destroying the pool
79 * @private_data: private data for the pool
80 */
81struct tee_shm_pool {
82 struct tee_shm_pool_mgr private_mgr;
83 struct tee_shm_pool_mgr dma_buf_mgr;
84 void (*destroy)(struct tee_shm_pool *pool);
85 void *private_data;
86};
87
88#define TEE_DEVICE_FLAG_REGISTERED 0x1
89#define TEE_MAX_DEV_NAME_LEN 32
90
91/**
92 * struct tee_device - TEE Device representation
93 * @name: name of device
94 * @desc: description of device
95 * @id: unique id of device
96 * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above
97 * @dev: embedded basic device structure
98 * @cdev: embedded cdev
99 * @num_users: number of active users of this device
100 * @c_no_user: completion used when unregistering the device
101 * @mutex: mutex protecting @num_users and @idr
102 * @idr: register of shared memory object allocated on this device
103 * @pool: shared memory pool
104 */
105struct tee_device {
106 char name[TEE_MAX_DEV_NAME_LEN];
107 const struct tee_desc *desc;
108 int id;
109 unsigned int flags;
110
111 struct device dev;
112 struct cdev cdev;
113
114 size_t num_users;
115 struct completion c_no_users;
116 struct mutex mutex; /* protects num_users and idr */
117
118 struct idr idr;
119 struct tee_shm_pool *pool;
120};
121
122int tee_shm_init(void);
123
124int tee_shm_get_fd(struct tee_shm *shm);
125
126bool tee_device_get(struct tee_device *teedev);
127void tee_device_put(struct tee_device *teedev);
128
129#endif /*TEE_PRIVATE_H*/
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
new file mode 100644
index 000000000000..d356d7f025eb
--- /dev/null
+++ b/drivers/tee/tee_shm.c
@@ -0,0 +1,358 @@
1/*
2 * Copyright (c) 2015-2016, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/device.h>
15#include <linux/dma-buf.h>
16#include <linux/fdtable.h>
17#include <linux/idr.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/tee_drv.h>
21#include "tee_private.h"
22
23static void tee_shm_release(struct tee_shm *shm)
24{
25 struct tee_device *teedev = shm->teedev;
26 struct tee_shm_pool_mgr *poolm;
27
28 mutex_lock(&teedev->mutex);
29 idr_remove(&teedev->idr, shm->id);
30 if (shm->ctx)
31 list_del(&shm->link);
32 mutex_unlock(&teedev->mutex);
33
34 if (shm->flags & TEE_SHM_DMA_BUF)
35 poolm = &teedev->pool->dma_buf_mgr;
36 else
37 poolm = &teedev->pool->private_mgr;
38
39 poolm->ops->free(poolm, shm);
40 kfree(shm);
41
42 tee_device_put(teedev);
43}
44
45static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment
46 *attach, enum dma_data_direction dir)
47{
48 return NULL;
49}
50
51static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach,
52 struct sg_table *table,
53 enum dma_data_direction dir)
54{
55}
56
57static void tee_shm_op_release(struct dma_buf *dmabuf)
58{
59 struct tee_shm *shm = dmabuf->priv;
60
61 tee_shm_release(shm);
62}
63
64static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum)
65{
66 return NULL;
67}
68
69static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum)
70{
71 return NULL;
72}
73
74static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
75{
76 struct tee_shm *shm = dmabuf->priv;
77 size_t size = vma->vm_end - vma->vm_start;
78
79 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
80 size, vma->vm_page_prot);
81}
82
83static struct dma_buf_ops tee_shm_dma_buf_ops = {
84 .map_dma_buf = tee_shm_op_map_dma_buf,
85 .unmap_dma_buf = tee_shm_op_unmap_dma_buf,
86 .release = tee_shm_op_release,
87 .map_atomic = tee_shm_op_map_atomic,
88 .map = tee_shm_op_map,
89 .mmap = tee_shm_op_mmap,
90};
91
92/**
93 * tee_shm_alloc() - Allocate shared memory
94 * @ctx: Context that allocates the shared memory
95 * @size: Requested size of shared memory
96 * @flags: Flags setting properties for the requested shared memory.
97 *
98 * Memory allocated as global shared memory is automatically freed when the
99 * TEE file pointer is closed. The @flags field uses the bits defined by
100 * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
101 * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
102 * associated with a dma-buf handle, else driver private memory.
103 */
104struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
105{
106 struct tee_device *teedev = ctx->teedev;
107 struct tee_shm_pool_mgr *poolm = NULL;
108 struct tee_shm *shm;
109 void *ret;
110 int rc;
111
112 if (!(flags & TEE_SHM_MAPPED)) {
113 dev_err(teedev->dev.parent,
114 "only mapped allocations supported\n");
115 return ERR_PTR(-EINVAL);
116 }
117
118 if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) {
119 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
120 return ERR_PTR(-EINVAL);
121 }
122
123 if (!tee_device_get(teedev))
124 return ERR_PTR(-EINVAL);
125
126 if (!teedev->pool) {
127 /* teedev has been detached from driver */
128 ret = ERR_PTR(-EINVAL);
129 goto err_dev_put;
130 }
131
132 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
133 if (!shm) {
134 ret = ERR_PTR(-ENOMEM);
135 goto err_dev_put;
136 }
137
138 shm->flags = flags;
139 shm->teedev = teedev;
140 shm->ctx = ctx;
141 if (flags & TEE_SHM_DMA_BUF)
142 poolm = &teedev->pool->dma_buf_mgr;
143 else
144 poolm = &teedev->pool->private_mgr;
145
146 rc = poolm->ops->alloc(poolm, shm, size);
147 if (rc) {
148 ret = ERR_PTR(rc);
149 goto err_kfree;
150 }
151
152 mutex_lock(&teedev->mutex);
153 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
154 mutex_unlock(&teedev->mutex);
155 if (shm->id < 0) {
156 ret = ERR_PTR(shm->id);
157 goto err_pool_free;
158 }
159
160 if (flags & TEE_SHM_DMA_BUF) {
161 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
162
163 exp_info.ops = &tee_shm_dma_buf_ops;
164 exp_info.size = shm->size;
165 exp_info.flags = O_RDWR;
166 exp_info.priv = shm;
167
168 shm->dmabuf = dma_buf_export(&exp_info);
169 if (IS_ERR(shm->dmabuf)) {
170 ret = ERR_CAST(shm->dmabuf);
171 goto err_rem;
172 }
173 }
174 mutex_lock(&teedev->mutex);
175 list_add_tail(&shm->link, &ctx->list_shm);
176 mutex_unlock(&teedev->mutex);
177
178 return shm;
179err_rem:
180 mutex_lock(&teedev->mutex);
181 idr_remove(&teedev->idr, shm->id);
182 mutex_unlock(&teedev->mutex);
183err_pool_free:
184 poolm->ops->free(poolm, shm);
185err_kfree:
186 kfree(shm);
187err_dev_put:
188 tee_device_put(teedev);
189 return ret;
190}
191EXPORT_SYMBOL_GPL(tee_shm_alloc);
192
193/**
194 * tee_shm_get_fd() - Increase reference count and return file descriptor
195 * @shm: Shared memory handle
196 * @returns user space file descriptor to shared memory
197 */
198int tee_shm_get_fd(struct tee_shm *shm)
199{
200 u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF;
201 int fd;
202
203 if ((shm->flags & req_flags) != req_flags)
204 return -EINVAL;
205
206 fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC);
207 if (fd >= 0)
208 get_dma_buf(shm->dmabuf);
209 return fd;
210}
211
212/**
213 * tee_shm_free() - Free shared memory
214 * @shm: Handle to shared memory to free
215 */
216void tee_shm_free(struct tee_shm *shm)
217{
218 /*
219 * dma_buf_put() decreases the dmabuf reference counter and will
220 * call tee_shm_release() when the last reference is gone.
221 *
222 * In the case of driver private memory we call tee_shm_release
223 * directly instead as it doesn't have a reference counter.
224 */
225 if (shm->flags & TEE_SHM_DMA_BUF)
226 dma_buf_put(shm->dmabuf);
227 else
228 tee_shm_release(shm);
229}
230EXPORT_SYMBOL_GPL(tee_shm_free);
231
232/**
233 * tee_shm_va2pa() - Get physical address of a virtual address
234 * @shm: Shared memory handle
235 * @va: Virtual address to tranlsate
236 * @pa: Returned physical address
237 * @returns 0 on success and < 0 on failure
238 */
239int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
240{
241 /* Check that we're in the range of the shm */
242 if ((char *)va < (char *)shm->kaddr)
243 return -EINVAL;
244 if ((char *)va >= ((char *)shm->kaddr + shm->size))
245 return -EINVAL;
246
247 return tee_shm_get_pa(
248 shm, (unsigned long)va - (unsigned long)shm->kaddr, pa);
249}
250EXPORT_SYMBOL_GPL(tee_shm_va2pa);
251
252/**
253 * tee_shm_pa2va() - Get virtual address of a physical address
254 * @shm: Shared memory handle
255 * @pa: Physical address to tranlsate
256 * @va: Returned virtual address
257 * @returns 0 on success and < 0 on failure
258 */
259int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
260{
261 /* Check that we're in the range of the shm */
262 if (pa < shm->paddr)
263 return -EINVAL;
264 if (pa >= (shm->paddr + shm->size))
265 return -EINVAL;
266
267 if (va) {
268 void *v = tee_shm_get_va(shm, pa - shm->paddr);
269
270 if (IS_ERR(v))
271 return PTR_ERR(v);
272 *va = v;
273 }
274 return 0;
275}
276EXPORT_SYMBOL_GPL(tee_shm_pa2va);
277
278/**
279 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
280 * @shm: Shared memory handle
281 * @offs: Offset from start of this shared memory
282 * @returns virtual address of the shared memory + offs if offs is within
283 * the bounds of this shared memory, else an ERR_PTR
284 */
285void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
286{
287 if (offs >= shm->size)
288 return ERR_PTR(-EINVAL);
289 return (char *)shm->kaddr + offs;
290}
291EXPORT_SYMBOL_GPL(tee_shm_get_va);
292
293/**
294 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
295 * @shm: Shared memory handle
296 * @offs: Offset from start of this shared memory
297 * @pa: Physical address to return
298 * @returns 0 if offs is within the bounds of this shared memory, else an
299 * error code.
300 */
301int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
302{
303 if (offs >= shm->size)
304 return -EINVAL;
305 if (pa)
306 *pa = shm->paddr + offs;
307 return 0;
308}
309EXPORT_SYMBOL_GPL(tee_shm_get_pa);
310
311/**
312 * tee_shm_get_from_id() - Find shared memory object and increase reference
313 * count
314 * @ctx: Context owning the shared memory
315 * @id: Id of shared memory object
316 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
317 */
318struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id)
319{
320 struct tee_device *teedev;
321 struct tee_shm *shm;
322
323 if (!ctx)
324 return ERR_PTR(-EINVAL);
325
326 teedev = ctx->teedev;
327 mutex_lock(&teedev->mutex);
328 shm = idr_find(&teedev->idr, id);
329 if (!shm || shm->ctx != ctx)
330 shm = ERR_PTR(-EINVAL);
331 else if (shm->flags & TEE_SHM_DMA_BUF)
332 get_dma_buf(shm->dmabuf);
333 mutex_unlock(&teedev->mutex);
334 return shm;
335}
336EXPORT_SYMBOL_GPL(tee_shm_get_from_id);
337
338/**
339 * tee_shm_get_id() - Get id of a shared memory object
340 * @shm: Shared memory handle
341 * @returns id
342 */
343int tee_shm_get_id(struct tee_shm *shm)
344{
345 return shm->id;
346}
347EXPORT_SYMBOL_GPL(tee_shm_get_id);
348
349/**
350 * tee_shm_put() - Decrease reference count on a shared memory handle
351 * @shm: Shared memory handle
352 */
353void tee_shm_put(struct tee_shm *shm)
354{
355 if (shm->flags & TEE_SHM_DMA_BUF)
356 dma_buf_put(shm->dmabuf);
357}
358EXPORT_SYMBOL_GPL(tee_shm_put);
diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c
new file mode 100644
index 000000000000..fb4f8522a526
--- /dev/null
+++ b/drivers/tee/tee_shm_pool.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (c) 2015, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14#include <linux/device.h>
15#include <linux/dma-buf.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/tee_drv.h>
19#include "tee_private.h"
20
21static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
22 struct tee_shm *shm, size_t size)
23{
24 unsigned long va;
25 struct gen_pool *genpool = poolm->private_data;
26 size_t s = roundup(size, 1 << genpool->min_alloc_order);
27
28 va = gen_pool_alloc(genpool, s);
29 if (!va)
30 return -ENOMEM;
31
32 memset((void *)va, 0, s);
33 shm->kaddr = (void *)va;
34 shm->paddr = gen_pool_virt_to_phys(genpool, va);
35 shm->size = s;
36 return 0;
37}
38
39static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
40 struct tee_shm *shm)
41{
42 gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
43 shm->size);
44 shm->kaddr = NULL;
45}
46
47static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
48 .alloc = pool_op_gen_alloc,
49 .free = pool_op_gen_free,
50};
51
52static void pool_res_mem_destroy(struct tee_shm_pool *pool)
53{
54 gen_pool_destroy(pool->private_mgr.private_data);
55 gen_pool_destroy(pool->dma_buf_mgr.private_data);
56}
57
58static int pool_res_mem_mgr_init(struct tee_shm_pool_mgr *mgr,
59 struct tee_shm_pool_mem_info *info,
60 int min_alloc_order)
61{
62 size_t page_mask = PAGE_SIZE - 1;
63 struct gen_pool *genpool = NULL;
64 int rc;
65
66 /*
67 * Start and end must be page aligned
68 */
69 if ((info->vaddr & page_mask) || (info->paddr & page_mask) ||
70 (info->size & page_mask))
71 return -EINVAL;
72
73 genpool = gen_pool_create(min_alloc_order, -1);
74 if (!genpool)
75 return -ENOMEM;
76
77 gen_pool_set_algo(genpool, gen_pool_best_fit, NULL);
78 rc = gen_pool_add_virt(genpool, info->vaddr, info->paddr, info->size,
79 -1);
80 if (rc) {
81 gen_pool_destroy(genpool);
82 return rc;
83 }
84
85 mgr->private_data = genpool;
86 mgr->ops = &pool_ops_generic;
87 return 0;
88}
89
90/**
91 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
92 * memory range
93 * @priv_info: Information for driver private shared memory pool
94 * @dmabuf_info: Information for dma-buf shared memory pool
95 *
96 * Start and end of pools will must be page aligned.
97 *
98 * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
99 * in @dmabuf, others will use the range provided by @priv.
100 *
101 * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
102 */
103struct tee_shm_pool *
104tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
105 struct tee_shm_pool_mem_info *dmabuf_info)
106{
107 struct tee_shm_pool *pool = NULL;
108 int ret;
109
110 pool = kzalloc(sizeof(*pool), GFP_KERNEL);
111 if (!pool) {
112 ret = -ENOMEM;
113 goto err;
114 }
115
116 /*
117 * Create the pool for driver private shared memory
118 */
119 ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info,
120 3 /* 8 byte aligned */);
121 if (ret)
122 goto err;
123
124 /*
125 * Create the pool for dma_buf shared memory
126 */
127 ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info,
128 PAGE_SHIFT);
129 if (ret)
130 goto err;
131
132 pool->destroy = pool_res_mem_destroy;
133 return pool;
134err:
135 if (ret == -ENOMEM)
136 pr_err("%s: can't allocate memory for res_mem shared memory pool\n", __func__);
137 if (pool && pool->private_mgr.private_data)
138 gen_pool_destroy(pool->private_mgr.private_data);
139 kfree(pool);
140 return ERR_PTR(ret);
141}
142EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
143
144/**
145 * tee_shm_pool_free() - Free a shared memory pool
146 * @pool: The shared memory pool to free
147 *
148 * There must be no remaining shared memory allocated from this pool when
149 * this function is called.
150 */
151void tee_shm_pool_free(struct tee_shm_pool *pool)
152{
153 pool->destroy(pool);
154 kfree(pool);
155}
156EXPORT_SYMBOL_GPL(tee_shm_pool_free);
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
new file mode 100644
index 000000000000..0f175b8f6456
--- /dev/null
+++ b/include/linux/tee_drv.h
@@ -0,0 +1,277 @@
1/*
2 * Copyright (c) 2015-2016, Linaro Limited
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 */
14
15#ifndef __TEE_DRV_H
16#define __TEE_DRV_H
17
18#include <linux/types.h>
19#include <linux/idr.h>
20#include <linux/list.h>
21#include <linux/tee.h>
22
23/*
24 * The file describes the API provided by the generic TEE driver to the
25 * specific TEE driver.
26 */
27
28#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */
29#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */
30
31struct tee_device;
32struct tee_shm;
33struct tee_shm_pool;
34
35/**
36 * struct tee_context - driver specific context on file pointer data
37 * @teedev: pointer to this drivers struct tee_device
38 * @list_shm: List of shared memory object owned by this context
39 * @data: driver specific context data, managed by the driver
40 */
41struct tee_context {
42 struct tee_device *teedev;
43 struct list_head list_shm;
44 void *data;
45};
46
47struct tee_param_memref {
48 size_t shm_offs;
49 size_t size;
50 struct tee_shm *shm;
51};
52
53struct tee_param_value {
54 u64 a;
55 u64 b;
56 u64 c;
57};
58
59struct tee_param {
60 u64 attr;
61 union {
62 struct tee_param_memref memref;
63 struct tee_param_value value;
64 } u;
65};
66
67/**
68 * struct tee_driver_ops - driver operations vtable
69 * @get_version: returns version of driver
70 * @open: called when the device file is opened
71 * @release: release this open file
72 * @open_session: open a new session
73 * @close_session: close a session
74 * @invoke_func: invoke a trusted function
75 * @cancel_req: request cancel of an ongoing invoke or open
76 * @supp_revc: called for supplicant to get a command
77 * @supp_send: called for supplicant to send a response
78 */
79struct tee_driver_ops {
80 void (*get_version)(struct tee_device *teedev,
81 struct tee_ioctl_version_data *vers);
82 int (*open)(struct tee_context *ctx);
83 void (*release)(struct tee_context *ctx);
84 int (*open_session)(struct tee_context *ctx,
85 struct tee_ioctl_open_session_arg *arg,
86 struct tee_param *param);
87 int (*close_session)(struct tee_context *ctx, u32 session);
88 int (*invoke_func)(struct tee_context *ctx,
89 struct tee_ioctl_invoke_arg *arg,
90 struct tee_param *param);
91 int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
92 int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
93 struct tee_param *param);
94 int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
95 struct tee_param *param);
96};
97
98/**
99 * struct tee_desc - Describes the TEE driver to the subsystem
100 * @name: name of driver
101 * @ops: driver operations vtable
102 * @owner: module providing the driver
103 * @flags: Extra properties of driver, defined by TEE_DESC_* below
104 */
105#define TEE_DESC_PRIVILEGED 0x1
106struct tee_desc {
107 const char *name;
108 const struct tee_driver_ops *ops;
109 struct module *owner;
110 u32 flags;
111};
112
113/**
114 * tee_device_alloc() - Allocate a new struct tee_device instance
115 * @teedesc: Descriptor for this driver
116 * @dev: Parent device for this device
117 * @pool: Shared memory pool, NULL if not used
118 * @driver_data: Private driver data for this device
119 *
120 * Allocates a new struct tee_device instance. The device is
121 * removed by tee_device_unregister().
122 *
123 * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
124 */
125struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
126 struct device *dev,
127 struct tee_shm_pool *pool,
128 void *driver_data);
129
130/**
131 * tee_device_register() - Registers a TEE device
132 * @teedev: Device to register
133 *
134 * tee_device_unregister() need to be called to remove the @teedev if
135 * this function fails.
136 *
137 * @returns < 0 on failure
138 */
139int tee_device_register(struct tee_device *teedev);
140
141/**
142 * tee_device_unregister() - Removes a TEE device
143 * @teedev: Device to unregister
144 *
145 * This function should be called to remove the @teedev even if
146 * tee_device_register() hasn't been called yet. Does nothing if
147 * @teedev is NULL.
148 */
149void tee_device_unregister(struct tee_device *teedev);
150
151/**
152 * struct tee_shm_pool_mem_info - holds information needed to create a shared
153 * memory pool
154 * @vaddr: Virtual address of start of pool
155 * @paddr: Physical address of start of pool
156 * @size: Size in bytes of the pool
157 */
158struct tee_shm_pool_mem_info {
159 unsigned long vaddr;
160 phys_addr_t paddr;
161 size_t size;
162};
163
164/**
165 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
166 * memory range
167 * @priv_info: Information for driver private shared memory pool
168 * @dmabuf_info: Information for dma-buf shared memory pool
169 *
170 * Start and end of pools will must be page aligned.
171 *
172 * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
173 * in @dmabuf, others will use the range provided by @priv.
174 *
175 * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
176 */
177struct tee_shm_pool *
178tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
179 struct tee_shm_pool_mem_info *dmabuf_info);
180
181/**
182 * tee_shm_pool_free() - Free a shared memory pool
183 * @pool: The shared memory pool to free
184 *
185 * The must be no remaining shared memory allocated from this pool when
186 * this function is called.
187 */
188void tee_shm_pool_free(struct tee_shm_pool *pool);
189
190/**
191 * tee_get_drvdata() - Return driver_data pointer
192 * @returns the driver_data pointer supplied to tee_register().
193 */
194void *tee_get_drvdata(struct tee_device *teedev);
195
196/**
197 * tee_shm_alloc() - Allocate shared memory
198 * @ctx: Context that allocates the shared memory
199 * @size: Requested size of shared memory
200 * @flags: Flags setting properties for the requested shared memory.
201 *
202 * Memory allocated as global shared memory is automatically freed when the
203 * TEE file pointer is closed. The @flags field uses the bits defined by
204 * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
205 * TEE_SHM_DMA_BUF global shared memory will be allocated and associated
206 * with a dma-buf handle, else driver private memory.
207 *
208 * @returns a pointer to 'struct tee_shm'
209 */
210struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
211
212/**
213 * tee_shm_free() - Free shared memory
214 * @shm: Handle to shared memory to free
215 */
216void tee_shm_free(struct tee_shm *shm);
217
218/**
219 * tee_shm_put() - Decrease reference count on a shared memory handle
220 * @shm: Shared memory handle
221 */
222void tee_shm_put(struct tee_shm *shm);
223
224/**
225 * tee_shm_va2pa() - Get physical address of a virtual address
226 * @shm: Shared memory handle
227 * @va: Virtual address to tranlsate
228 * @pa: Returned physical address
229 * @returns 0 on success and < 0 on failure
230 */
231int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
232
233/**
234 * tee_shm_pa2va() - Get virtual address of a physical address
235 * @shm: Shared memory handle
236 * @pa: Physical address to tranlsate
237 * @va: Returned virtual address
238 * @returns 0 on success and < 0 on failure
239 */
240int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
241
242/**
243 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
244 * @shm: Shared memory handle
245 * @offs: Offset from start of this shared memory
246 * @returns virtual address of the shared memory + offs if offs is within
247 * the bounds of this shared memory, else an ERR_PTR
248 */
249void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
250
251/**
252 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
253 * @shm: Shared memory handle
254 * @offs: Offset from start of this shared memory
255 * @pa: Physical address to return
256 * @returns 0 if offs is within the bounds of this shared memory, else an
257 * error code.
258 */
259int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
260
261/**
262 * tee_shm_get_id() - Get id of a shared memory object
263 * @shm: Shared memory handle
264 * @returns id
265 */
266int tee_shm_get_id(struct tee_shm *shm);
267
268/**
269 * tee_shm_get_from_id() - Find shared memory object and increase reference
270 * count
271 * @ctx: Context owning the shared memory
272 * @id: Id of shared memory object
273 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
274 */
275struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
276
277#endif /*__TEE_DRV_H*/
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
new file mode 100644
index 000000000000..370d8845ab21
--- /dev/null
+++ b/include/uapi/linux/tee.h
@@ -0,0 +1,346 @@
1/*
2 * Copyright (c) 2015-2016, Linaro Limited
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25 * POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#ifndef __TEE_H
29#define __TEE_H
30
31#include <linux/ioctl.h>
32#include <linux/types.h>
33
34/*
35 * This file describes the API provided by a TEE driver to user space.
36 *
37 * Each TEE driver defines a TEE specific protocol which is used for the
38 * data passed back and forth using TEE_IOC_CMD.
39 */
40
41/* Helpers to make the ioctl defines */
42#define TEE_IOC_MAGIC 0xa4
43#define TEE_IOC_BASE 0
44
45/* Flags relating to shared memory */
46#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */
47#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */
48
49#define TEE_MAX_ARG_SIZE 1024
50
51#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
52
53/*
54 * TEE Implementation ID
55 */
56#define TEE_IMPL_ID_OPTEE 1
57
58/*
59 * OP-TEE specific capabilities
60 */
61#define TEE_OPTEE_CAP_TZ (1 << 0)
62
63/**
64 * struct tee_ioctl_version_data - TEE version
65 * @impl_id: [out] TEE implementation id
66 * @impl_caps: [out] Implementation specific capabilities
67 * @gen_caps: [out] Generic capabilities, defined by TEE_GEN_CAPS_* above
68 *
69 * Identifies the TEE implementation, @impl_id is one of TEE_IMPL_ID_* above.
70 * @impl_caps is implementation specific, for example TEE_OPTEE_CAP_*
71 * is valid when @impl_id == TEE_IMPL_ID_OPTEE.
72 */
73struct tee_ioctl_version_data {
74 __u32 impl_id;
75 __u32 impl_caps;
76 __u32 gen_caps;
77};
78
79/**
80 * TEE_IOC_VERSION - query version of TEE
81 *
82 * Takes a tee_ioctl_version_data struct and returns with the TEE version
83 * data filled in.
84 */
85#define TEE_IOC_VERSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 0, \
86 struct tee_ioctl_version_data)
87
88/**
89 * struct tee_ioctl_shm_alloc_data - Shared memory allocate argument
90 * @size: [in/out] Size of shared memory to allocate
91 * @flags: [in/out] Flags to/from allocation.
92 * @id: [out] Identifier of the shared memory
93 *
94 * The flags field should currently be zero as input. Updated by the call
95 * with actual flags as defined by TEE_IOCTL_SHM_* above.
96 * This structure is used as argument for TEE_IOC_SHM_ALLOC below.
97 */
98struct tee_ioctl_shm_alloc_data {
99 __u64 size;
100 __u32 flags;
101 __s32 id;
102};
103
104/**
105 * TEE_IOC_SHM_ALLOC - allocate shared memory
106 *
107 * Allocates shared memory between the user space process and secure OS.
108 *
109 * Returns a file descriptor on success or < 0 on failure
110 *
111 * The returned file descriptor is used to map the shared memory into user
112 * space. The shared memory is freed when the descriptor is closed and the
113 * memory is unmapped.
114 */
115#define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \
116 struct tee_ioctl_shm_alloc_data)
117
118/**
119 * struct tee_ioctl_buf_data - Variable sized buffer
120 * @buf_ptr: [in] A __user pointer to a buffer
121 * @buf_len: [in] Length of the buffer above
122 *
123 * Used as argument for TEE_IOC_OPEN_SESSION, TEE_IOC_INVOKE,
124 * TEE_IOC_SUPPL_RECV, and TEE_IOC_SUPPL_SEND below.
125 */
126struct tee_ioctl_buf_data {
127 __u64 buf_ptr;
128 __u64 buf_len;
129};
130
131/*
132 * Attributes for struct tee_ioctl_param, selects field in the union
133 */
134#define TEE_IOCTL_PARAM_ATTR_TYPE_NONE 0 /* parameter not used */
135
136/*
137 * These defines value parameters (struct tee_ioctl_param_value)
138 */
139#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT 1
140#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT 2
141#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT 3 /* input and output */
142
143/*
144 * These defines shared memory reference parameters (struct
145 * tee_ioctl_param_memref)
146 */
147#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT 5
148#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT 6
149#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT 7 /* input and output */
150
151/*
152 * Mask for the type part of the attribute, leaves room for more types
153 */
154#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
155
156/*
157 * Matches TEEC_LOGIN_* in GP TEE Client API
158 * Are only defined for GP compliant TEEs
159 */
160#define TEE_IOCTL_LOGIN_PUBLIC 0
161#define TEE_IOCTL_LOGIN_USER 1
162#define TEE_IOCTL_LOGIN_GROUP 2
163#define TEE_IOCTL_LOGIN_APPLICATION 4
164#define TEE_IOCTL_LOGIN_USER_APPLICATION 5
165#define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6
166
167/**
168 * struct tee_ioctl_param - parameter
169 * @attr: attributes
170 * @a: if a memref, offset into the shared memory object, else a value parameter
171 * @b: if a memref, size of the buffer, else a value parameter
172 * @c: if a memref, shared memory identifier, else a value parameter
173 *
174 * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref or value is used in
175 * the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value and
176 * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref. TEE_PARAM_ATTR_TYPE_NONE
177 * indicates that none of the members are used.
178 *
179 * Shared memory is allocated with TEE_IOC_SHM_ALLOC which returns an
180 * identifier representing the shared memory object. A memref can reference
181 * a part of a shared memory by specifying an offset (@a) and size (@b) of
182 * the object. To supply the entire shared memory object set the offset
183 * (@a) to 0 and size (@b) to the previously returned size of the object.
184 */
185struct tee_ioctl_param {
186 __u64 attr;
187 __u64 a;
188 __u64 b;
189 __u64 c;
190};
191
192#define TEE_IOCTL_UUID_LEN 16
193
194/**
195 * struct tee_ioctl_open_session_arg - Open session argument
196 * @uuid: [in] UUID of the Trusted Application
197 * @clnt_uuid: [in] UUID of client
198 * @clnt_login: [in] Login class of client, TEE_IOCTL_LOGIN_* above
199 * @cancel_id: [in] Cancellation id, a unique value to identify this request
200 * @session: [out] Session id
201 * @ret: [out] return value
202 * @ret_origin [out] origin of the return value
203 * @num_params [in] number of parameters following this struct
204 */
205struct tee_ioctl_open_session_arg {
206 __u8 uuid[TEE_IOCTL_UUID_LEN];
207 __u8 clnt_uuid[TEE_IOCTL_UUID_LEN];
208 __u32 clnt_login;
209 __u32 cancel_id;
210 __u32 session;
211 __u32 ret;
212 __u32 ret_origin;
213 __u32 num_params;
214 /* num_params tells the actual number of element in params */
215 struct tee_ioctl_param params[];
216};
217
218/**
219 * TEE_IOC_OPEN_SESSION - opens a session to a Trusted Application
220 *
221 * Takes a struct tee_ioctl_buf_data which contains a struct
222 * tee_ioctl_open_session_arg followed by any array of struct
223 * tee_ioctl_param
224 */
225#define TEE_IOC_OPEN_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 2, \
226 struct tee_ioctl_buf_data)
227
228/**
229 * struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted
230 * Application
231 * @func: [in] Trusted Application function, specific to the TA
232 * @session: [in] Session id
233 * @cancel_id: [in] Cancellation id, a unique value to identify this request
234 * @ret: [out] return value
235 * @ret_origin [out] origin of the return value
236 * @num_params [in] number of parameters following this struct
237 */
238struct tee_ioctl_invoke_arg {
239 __u32 func;
240 __u32 session;
241 __u32 cancel_id;
242 __u32 ret;
243 __u32 ret_origin;
244 __u32 num_params;
245 /* num_params tells the actual number of element in params */
246 struct tee_ioctl_param params[];
247};
248
249/**
250 * TEE_IOC_INVOKE - Invokes a function in a Trusted Application
251 *
252 * Takes a struct tee_ioctl_buf_data which contains a struct
253 * tee_invoke_func_arg followed by any array of struct tee_param
254 */
255#define TEE_IOC_INVOKE _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 3, \
256 struct tee_ioctl_buf_data)
257
258/**
259 * struct tee_ioctl_cancel_arg - Cancels an open session or invoke ioctl
260 * @cancel_id: [in] Cancellation id, a unique value to identify this request
261 * @session: [in] Session id, if the session is opened, else set to 0
262 */
263struct tee_ioctl_cancel_arg {
264 __u32 cancel_id;
265 __u32 session;
266};
267
268/**
269 * TEE_IOC_CANCEL - Cancels an open session or invoke
270 */
271#define TEE_IOC_CANCEL _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 4, \
272 struct tee_ioctl_cancel_arg)
273
274/**
275 * struct tee_ioctl_close_session_arg - Closes an open session
276 * @session: [in] Session id
277 */
278struct tee_ioctl_close_session_arg {
279 __u32 session;
280};
281
282/**
283 * TEE_IOC_CLOSE_SESSION - Closes a session
284 */
285#define TEE_IOC_CLOSE_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 5, \
286 struct tee_ioctl_close_session_arg)
287
288/**
289 * struct tee_iocl_supp_recv_arg - Receive a request for a supplicant function
290 * @func: [in] supplicant function
291 * @num_params [in/out] number of parameters following this struct
292 *
293 * @num_params is the number of params that tee-supplicant has room to
294 * receive when input, @num_params is the number of actual params
295 * tee-supplicant receives when output.
296 */
297struct tee_iocl_supp_recv_arg {
298 __u32 func;
299 __u32 num_params;
300 /* num_params tells the actual number of element in params */
301 struct tee_ioctl_param params[];
302};
303
304/**
305 * TEE_IOC_SUPPL_RECV - Receive a request for a supplicant function
306 *
307 * Takes a struct tee_ioctl_buf_data which contains a struct
308 * tee_iocl_supp_recv_arg followed by any array of struct tee_param
309 */
310#define TEE_IOC_SUPPL_RECV _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 6, \
311 struct tee_ioctl_buf_data)
312
313/**
314 * struct tee_iocl_supp_send_arg - Send a response to a received request
315 * @ret: [out] return value
316 * @num_params [in] number of parameters following this struct
317 */
318struct tee_iocl_supp_send_arg {
319 __u32 ret;
320 __u32 num_params;
321 /* num_params tells the actual number of element in params */
322 struct tee_ioctl_param params[];
323};
324
325/**
326 * TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function
327 *
328 * Takes a struct tee_ioctl_buf_data which contains a struct
329 * tee_iocl_supp_send_arg followed by any array of struct tee_param
330 */
331#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
332 struct tee_ioctl_buf_data)
333
334/*
335 * Five syscalls are used when communicating with the TEE driver.
336 * open(): opens the device associated with the driver
337 * ioctl(): as described above operating on the file descriptor from open()
338 * close(): two cases
339 * - closes the device file descriptor
340 * - closes a file descriptor connected to allocated shared memory
341 * mmap(): maps shared memory into user space using information from struct
342 * tee_ioctl_shm_alloc_data
343 * munmap(): unmaps previously shared memory
344 */
345
346#endif /*__TEE_H*/