aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-09-19 13:34:33 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-09-19 13:34:33 -0400
commit530a70617cb6325bd7781e7a993d732d6d37915a (patch)
tree8226dccfa0931e33d15a6ac697b2eeb0a0ee67ff /drivers/staging
parent26e9c85b48b4e3a2301c049213de3b5456c6b0a4 (diff)
parent2bbadafbe4eacab57aa7bc8e50287c1366303807 (diff)
Merge branch 'greybus' into staging-testing
This merges the greybus branch into staging-testing. It contains the drivers/staging/greybus/ subsystem and related drivers and has passed the 0-day bot tests so no builds should break. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/greybus/Documentation/firmware/authenticate.c139
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware-management333
-rw-r--r--drivers/staging/greybus/Documentation/firmware/firmware.c262
-rw-r--r--drivers/staging/greybus/Documentation/sysfs-bus-greybus275
-rw-r--r--drivers/staging/greybus/Kconfig219
-rw-r--r--drivers/staging/greybus/Makefile96
-rw-r--r--drivers/staging/greybus/arche-apb-ctrl.c522
-rw-r--r--drivers/staging/greybus/arche-platform.c828
-rw-r--r--drivers/staging/greybus/arche_platform.h39
-rw-r--r--drivers/staging/greybus/arpc.h109
-rw-r--r--drivers/staging/greybus/audio_apbridgea.c207
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h156
-rw-r--r--drivers/staging/greybus/audio_codec.c1132
-rw-r--r--drivers/staging/greybus/audio_codec.h283
-rw-r--r--drivers/staging/greybus/audio_gb.c228
-rw-r--r--drivers/staging/greybus/audio_manager.c184
-rw-r--r--drivers/staging/greybus/audio_manager.h83
-rw-r--r--drivers/staging/greybus/audio_manager_module.c258
-rw-r--r--drivers/staging/greybus/audio_manager_private.h28
-rw-r--r--drivers/staging/greybus/audio_manager_sysfs.c102
-rw-r--r--drivers/staging/greybus/audio_module.c482
-rw-r--r--drivers/staging/greybus/audio_topology.c1442
-rw-r--r--drivers/staging/greybus/authentication.c429
-rw-r--r--drivers/staging/greybus/bootrom.c524
-rw-r--r--drivers/staging/greybus/bundle.c253
-rw-r--r--drivers/staging/greybus/bundle.h90
-rw-r--r--drivers/staging/greybus/camera.c1400
-rw-r--r--drivers/staging/greybus/connection.c938
-rw-r--r--drivers/staging/greybus/connection.h129
-rw-r--r--drivers/staging/greybus/control.c635
-rw-r--r--drivers/staging/greybus/control.h65
-rw-r--r--drivers/staging/greybus/core.c361
-rw-r--r--drivers/staging/greybus/debugfs.c31
-rw-r--r--drivers/staging/greybus/es2.c1597
-rw-r--r--drivers/staging/greybus/firmware.h42
-rw-r--r--drivers/staging/greybus/fw-core.c312
-rw-r--r--drivers/staging/greybus/fw-download.c465
-rw-r--r--drivers/staging/greybus/fw-management.c721
-rw-r--r--drivers/staging/greybus/gb-camera.h127
-rw-r--r--drivers/staging/greybus/gbphy.c360
-rw-r--r--drivers/staging/greybus/gbphy.h110
-rw-r--r--drivers/staging/greybus/gpio.c767
-rw-r--r--drivers/staging/greybus/greybus.h154
-rw-r--r--drivers/staging/greybus/greybus_authentication.h120
-rw-r--r--drivers/staging/greybus/greybus_firmware.h120
-rw-r--r--drivers/staging/greybus/greybus_id.h26
-rw-r--r--drivers/staging/greybus/greybus_manifest.h177
-rw-r--r--drivers/staging/greybus/greybus_protocols.h2268
-rw-r--r--drivers/staging/greybus/greybus_trace.h531
-rw-r--r--drivers/staging/greybus/hd.c257
-rw-r--r--drivers/staging/greybus/hd.h90
-rw-r--r--drivers/staging/greybus/hid.c536
-rw-r--r--drivers/staging/greybus/i2c.c343
-rw-r--r--drivers/staging/greybus/interface.c1316
-rw-r--r--drivers/staging/greybus/interface.h88
-rw-r--r--drivers/staging/greybus/light.c1359
-rw-r--r--drivers/staging/greybus/log.c132
-rw-r--r--drivers/staging/greybus/loopback.c1365
-rw-r--r--drivers/staging/greybus/manifest.c535
-rw-r--r--drivers/staging/greybus/manifest.h16
-rw-r--r--drivers/staging/greybus/module.c238
-rw-r--r--drivers/staging/greybus/module.h34
-rw-r--r--drivers/staging/greybus/operation.c1239
-rw-r--r--drivers/staging/greybus/operation.h210
-rw-r--r--drivers/staging/greybus/power_supply.c1141
-rw-r--r--drivers/staging/greybus/pwm.c338
-rw-r--r--drivers/staging/greybus/raw.c381
-rw-r--r--drivers/staging/greybus/sdio.c884
-rw-r--r--drivers/staging/greybus/spi.c79
-rw-r--r--drivers/staging/greybus/spilib.c565
-rw-r--r--drivers/staging/greybus/spilib.h24
-rw-r--r--drivers/staging/greybus/svc.c1486
-rw-r--r--drivers/staging/greybus/svc.h109
-rw-r--r--drivers/staging/greybus/svc_watchdog.c198
-rw-r--r--drivers/staging/greybus/timesync.c1357
-rw-r--r--drivers/staging/greybus/timesync.h45
-rw-r--r--drivers/staging/greybus/timesync_platform.c77
-rw-r--r--drivers/staging/greybus/tools/.gitignore1
-rw-r--r--drivers/staging/greybus/tools/Android.mk10
-rw-r--r--drivers/staging/greybus/tools/Makefile31
-rw-r--r--drivers/staging/greybus/tools/README.loopback198
-rwxr-xr-xdrivers/staging/greybus/tools/lbtest168
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c1000
-rw-r--r--drivers/staging/greybus/uart.c1075
-rw-r--r--drivers/staging/greybus/usb.c247
-rw-r--r--drivers/staging/greybus/vibrator.c249
88 files changed, 37583 insertions, 0 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 5ebaf0028a10..9c0339c53fe0 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -102,4 +102,6 @@ source "drivers/staging/i4l/Kconfig"
102 102
103source "drivers/staging/ks7010/Kconfig" 103source "drivers/staging/ks7010/Kconfig"
104 104
105source "drivers/staging/greybus/Kconfig"
106
105endif # STAGING 107endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 29a1672820e6..6952aaf3114b 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_WILC1000) += wilc1000/
40obj-$(CONFIG_MOST) += most/ 40obj-$(CONFIG_MOST) += most/
41obj-$(CONFIG_ISDN_I4L) += i4l/ 41obj-$(CONFIG_ISDN_I4L) += i4l/
42obj-$(CONFIG_KS7010) += ks7010/ 42obj-$(CONFIG_KS7010) += ks7010/
43obj-$(CONFIG_GREYBUS) += greybus/
diff --git a/drivers/staging/greybus/Documentation/firmware/authenticate.c b/drivers/staging/greybus/Documentation/firmware/authenticate.c
new file mode 100644
index 000000000000..ab0688ad1e37
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/firmware/authenticate.c
@@ -0,0 +1,139 @@
1/*
2 * Sample code to test CAP protocol
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Google Inc. All rights reserved.
10 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License version 2 for more details.
20 *
21 * BSD LICENSE
22 *
23 * Copyright(c) 2016 Google Inc. All rights reserved.
24 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 *
30 * * Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * * Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in
34 * the documentation and/or other materials provided with the
35 * distribution.
36 * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
37 * its contributors may be used to endorse or promote products
38 * derived from this software without specific prior written
39 * permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
45 * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
46 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
47 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
48 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
49 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <stdio.h>
55#include <string.h>
56#include <unistd.h>
57#include <sys/ioctl.h>
58#include <sys/stat.h>
59#include <fcntl.h>
60
61#include "../../greybus_authentication.h"
62
63struct cap_ioc_get_endpoint_uid uid;
64struct cap_ioc_get_ims_certificate cert = {
65 .certificate_class = 0,
66 .certificate_id = 0,
67};
68
69struct cap_ioc_authenticate authenticate = {
70 .auth_type = 0,
71 .challenge = {0},
72};
73
74int main(int argc, char *argv[])
75{
76 unsigned int timeout = 10000;
77 char *capdev;
78 int fd, ret;
79
80 /* Make sure arguments are correct */
81 if (argc != 2) {
82 printf("\nUsage: ./firmware <Path of the gb-cap-X dev>\n");
83 return 0;
84 }
85
86 capdev = argv[1];
87
88 printf("Opening %s authentication device\n", capdev);
89
90 fd = open(capdev, O_RDWR);
91 if (fd < 0) {
92 printf("Failed to open: %s\n", capdev);
93 return -1;
94 }
95
96 /* Get UID */
97 printf("Get UID\n");
98
99 ret = ioctl(fd, CAP_IOC_GET_ENDPOINT_UID, &uid);
100 if (ret < 0) {
101 printf("Failed to get UID: %s (%d)\n", capdev, ret);
102 ret = -1;
103 goto close_fd;
104 }
105
106 printf("UID received: 0x%llx\n", *(long long unsigned int *)(uid.uid));
107
108 /* Get certificate */
109 printf("Get IMS certificate\n");
110
111 ret = ioctl(fd, CAP_IOC_GET_IMS_CERTIFICATE, &cert);
112 if (ret < 0) {
113 printf("Failed to get IMS certificate: %s (%d)\n", capdev, ret);
114 ret = -1;
115 goto close_fd;
116 }
117
118 printf("IMS Certificate size: %d\n", cert.cert_size);
119
120 /* Authenticate */
121 printf("Authenticate module\n");
122
123 memcpy(authenticate.uid, uid.uid, 8);
124
125 ret = ioctl(fd, CAP_IOC_AUTHENTICATE, &authenticate);
126 if (ret < 0) {
127 printf("Failed to authenticate module: %s (%d)\n", capdev, ret);
128 ret = -1;
129 goto close_fd;
130 }
131
132 printf("Authenticated, result (%02x), sig-size (%02x)\n",
133 authenticate.result_code, authenticate.signature_size);
134
135close_fd:
136 close(fd);
137
138 return ret;
139}
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware-management b/drivers/staging/greybus/Documentation/firmware/firmware-management
new file mode 100644
index 000000000000..7918257e5b3b
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/firmware/firmware-management
@@ -0,0 +1,333 @@
1
2Firmware Management
3-------------------
4 Copyright 2016 Google Inc.
5 Copyright 2016 Linaro Ltd.
6
7Interface-Manifest
8------------------
9
10All firmware packages on the Modules or Interfaces are managed by a special
11Firmware Management Protocol. To support Firmware Management by the AP, the
12Interface Manifest shall at least contain the Firmware Management Bundle and a
13Firmware Management Protocol CPort within it.
14
15The bundle may contain additional CPorts based on the extra functionality
16required to manage firmware packages.
17
18For example, this is how the Firmware Management part of the Interface Manifest
19may look like:
20
21 ; Firmware Management Bundle (Bundle 1):
22 [bundle-descriptor 1]
23 class = 0x16
24
25 ; (Mandatory) Firmware Management Protocol on CPort 1
26 [cport-descriptor 2]
27 bundle = 1
28 protocol = 0x18
29
30 ; (Optional) Firmware Download Protocol on CPort 2
31 [cport-descriptor 1]
32 bundle = 1
33 protocol = 0x17
34
35 ; (Optional) SPI protocol on CPort 3
36 [cport-descriptor 3]
37 bundle = 1
38 protocol = 0x0b
39
40 ; (Optional) Component Authentication Protocol (CAP) on CPort 4
41 [cport-descriptor 4]
42 bundle = 1
43 protocol = 0x19
44
45
46Sysfs Interfaces - Firmware Management
47--------------------------------------
48
49The Firmware Management Protocol interacts with Userspace using the character
50device interface. The character device will be present in /dev/ directory
51and will be named gb-fw-mgmt-<N>. The number <N> is assigned at runtime.
52
53Identifying the Character Device
54================================
55
56There can be multiple devices present in /dev/ directory with name gb-fw-mgmt-N
57and user first needs to identify the character device used for
58firmware-management for a particular interface.
59
60The Firmware Management core creates a device of class 'gb_fw_mgmt', which shall
61be used by the user to identify the right character device for it. The class
62device is created within the Bundle directory for a particular Interface.
63
64For example this is how the class-device can be present:
65
66/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_fw_mgmt/gb-fw-mgmt-0
67
68The last name in this path: gb-fw-mgmt-0 is precisely the name of the char
69device and so the device in this case will be:
70
71/dev/gb-fw-mgmt-0.
72
73Operations on the Char device
74=============================
75
76The Character device (gb-fw-mgmt-0 in example) can be opened by the userspace
77application and it can perform various 'ioctl' operations on the device. The
78device doesn't support any read/write operations.
79
80Following are the IOCTLs and their data structures available to the user:
81
82/* IOCTL support */
83#define GB_FW_LOAD_METHOD_UNIPRO 0x01
84#define GB_FW_LOAD_METHOD_INTERNAL 0x02
85
86#define GB_FW_LOAD_STATUS_FAILED 0x00
87#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
88#define GB_FW_LOAD_STATUS_VALIDATED 0x02
89#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
90
91#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
92#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
93#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
94#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
95#define GB_FW_BACKEND_FW_STATUS_INT 0x05
96#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
97#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
98
99#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
100#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
101#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
102#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
103#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
104
105
106struct fw_mgmt_ioc_get_intf_version {
107 __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
108 __u16 major;
109 __u16 minor;
110} __attribute__ ((__packed__));
111
112struct fw_mgmt_ioc_get_backend_version {
113 __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
114 __u16 major;
115 __u16 minor;
116 __u8 status;
117} __attribute__ ((__packed__));
118
119struct fw_mgmt_ioc_intf_load_and_validate {
120 __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
121 __u8 load_method;
122 __u8 status;
123 __u16 major;
124 __u16 minor;
125} __packed;
126
127struct fw_mgmt_ioc_backend_fw_update {
128 __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
129 __u8 status;
130} __packed;
131
132#define FW_MGMT_IOCTL_BASE 'S'
133#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
134#define FW_MGMT_IOC_GET_BACKEND_FW _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
135#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
136#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
137#define FW_MGMT_IOC_SET_TIMEOUT_MS _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
138#define FW_MGMT_IOC_MODE_SWITCH _IO(FW_MGMT_IOCTL_BASE, 5)
139
1401. FW_MGMT_IOC_GET_INTF_FW:
141
142 This ioctl shall be used by the user to get the version and firmware-tag of
143 the currently running Interface Firmware. All the fields of the 'struct
144 fw_mgmt_ioc_get_fw' are filled by the kernel.
145
1462. FW_MGMT_IOC_GET_BACKEND_FW:
147
148 This ioctl shall be used by the user to get the version of a currently
149 running Backend Interface Firmware identified by a firmware-tag. The user is
150 required to fill the 'firmware_tag' field of the 'struct fw_mgmt_ioc_get_fw'
151 in this case. The 'major' and 'minor' fields are set by the kernel in
152 response.
153
1543. FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
155
156 This ioctl shall be used by the user to load an Interface Firmware package on
157 an Interface. The user needs to fill the 'firmware_tag' and 'load_method'
158 fields of the 'struct fw_mgmt_ioc_intf_load_and_validate'. The 'status',
159 'major' and 'minor' fields are set by the kernel in response.
160
1614. FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
162
163 This ioctl shall be used by the user to request an Interface to update a
164 Backend Interface Firmware. The user is required to fill the 'firmware_tag'
165 field of the 'struct fw_mgmt_ioc_get_fw' in this case. The 'status' field is
166 set by the kernel in response.
167
1685. FW_MGMT_IOC_SET_TIMEOUT_MS:
169
170 This ioctl shall be used by the user to increase the timeout interval within
171 which the firmware must get loaded by the Module. The default timeout is 1
172 second. The user needs to pass the timeout in milliseconds.
173
1746. FW_MGMT_IOC_MODE_SWITCH:
175
176 This ioctl shall be used by the user to mode-switch the module to the
177 previously loaded interface firmware. If the interface firmware isn't loaded
178 previously, or if another unsuccessful FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE
179 operation is started after loading interface firmware, then the firmware core
180 wouldn't allow mode-switch.
181
182
183Sysfs Interfaces - Authentication
184---------------------------------
185
186The Component Authentication Protocol interacts with Userspace using the
187character device interface. The character device will be present in /dev/
188directory and will be named gb-authenticate-<N>. The number <N> is assigned at
189runtime.
190
191Identifying the Character Device
192================================
193
194There can be multiple devices present in /dev/ directory with name
195gb-authenticate-N and user first needs to identify the character device used for
196authentication a of particular interface.
197
198The Authentication core creates a device of class 'gb_authenticate', which shall
199be used by the user to identify the right character device for it. The class
200device is created within the Bundle directory for a particular Interface.
201
202For example this is how the class-device can be present:
203
204/sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/gb_authenticate/gb-authenticate-0
205
206The last name in this path: gb-authenticate-0 is precisely the name of the char
207device and so the device in this case will be:
208
209/dev/gb-authenticate-0.
210
211Operations on the Char device
212=============================
213
214The Character device (/dev/gb-authenticate-0 in above example) can be opened by
215the userspace application and it can perform various 'ioctl' operations on the
216device. The device doesn't support any read/write operations.
217
218Following are the IOCTLs and their data structures available to the user:
219
220#define CAP_CERTIFICATE_MAX_SIZE 1600
221#define CAP_SIGNATURE_MAX_SIZE 320
222
223/* Certificate class types */
224#define CAP_CERT_IMS_EAPC 0x00000001
225#define CAP_CERT_IMS_EASC 0x00000002
226#define CAP_CERT_IMS_EARC 0x00000003
227#define CAP_CERT_IMS_IAPC 0x00000004
228#define CAP_CERT_IMS_IASC 0x00000005
229#define CAP_CERT_IMS_IARC 0x00000006
230
231/* IMS Certificate response result codes */
232#define CAP_IMS_RESULT_CERT_FOUND 0x00
233#define CAP_IMS_RESULT_CERT_CLASS_INVAL 0x01
234#define CAP_IMS_RESULT_CERT_CORRUPT 0x02
235#define CAP_IMS_RESULT_CERT_NOT_FOUND 0x03
236
237/* Authentication types */
238#define CAP_AUTH_IMS_PRI 0x00000001
239#define CAP_AUTH_IMS_SEC 0x00000002
240#define CAP_AUTH_IMS_RSA 0x00000003
241
242/* Authenticate response result codes */
243#define CAP_AUTH_RESULT_CR_SUCCESS 0x00
244#define CAP_AUTH_RESULT_CR_BAD_TYPE 0x01
245#define CAP_AUTH_RESULT_CR_WRONG_EP 0x02
246#define CAP_AUTH_RESULT_CR_NO_KEY 0x03
247#define CAP_AUTH_RESULT_CR_SIG_FAIL 0x04
248
249
250/* IOCTL support */
251struct cap_ioc_get_endpoint_uid {
252 __u8 uid[8];
253} __attribute__ ((__packed__));
254
255struct cap_ioc_get_ims_certificate {
256 __u32 certificate_class;
257 __u32 certificate_id;
258
259 __u8 result_code;
260 __u32 cert_size;
261 __u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
262} __attribute__ ((__packed__));
263
264struct cap_ioc_authenticate {
265 __u32 auth_type;
266 __u8 uid[8];
267 __u8 challenge[32];
268
269 __u8 result_code;
270 __u8 response[64];
271 __u32 signature_size;
272 __u8 signature[CAP_SIGNATURE_MAX_SIZE];
273} __attribute__ ((__packed__));
274
275#define CAP_IOCTL_BASE 'C'
276#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
277#define CAP_IOC_GET_IMS_CERTIFICATE _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
278#define CAP_IOC_AUTHENTICATE _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
279
280
2811. CAP_IOC_GET_ENDPOINT_UID:
282
283 This ioctl shall be used by the user to get the endpoint UID associated with
284 the Interface. All the fields of the 'struct cap_ioc_get_endpoint_uid' are
285 filled by the kernel.
286
2872. CAP_IOC_GET_IMS_CERTIFICATE:
288
289 This ioctl shall be used by the user to retrieve one of the available
290 cryptographic certificates held by the Interface for use in Component
291 Authentication. The user is required to fill the 'certificate_class' and
292 'certificate_id' field of the 'struct cap_ioc_get_ims_certificate' in this
293 case. The other fields will be set by the kernel in response. The first
294 'cert_size' bytes of the 'certificate' shall be read by the user and others
295 must be discarded.
296
2973. CAP_IOC_AUTHENTICATE:
298
299 This ioctl shall be used by the user to authenticate the Module attached to
300 an Interface. The user needs to fill the 'auth_type', 'uid', and 'challenge'
301 fields of the 'struct cap_ioc_authenticate'. The other fields will be set by
302 the kernel in response. The first 'signature_size' bytes of the 'signature'
303 shall be read by the user and others must be discarded.
304
305
306Sysfs Interfaces - Firmware Download
307------------------------------------
308
309The Firmware Download Protocol uses the existing Linux Kernel's Firmware class
310and the interface provided to userspace are described in:
311Documentation/firmware_class/.
312
313
314Sysfs Interfaces - SPI Flash
315----------------------------
316
317The SPI flash is exposed in userspace as a MTD device and is created
318within the Bundle directory. For example, this is how the path may look like:
319
320$ ls /sys/bus/greybus/devices/1-1/1-1.1/1-1.1.1/spi_master/spi32766/spi32766.0/mtd
321mtd0 mtd0ro
322
323
324Sample Applications
325-------------------
326
327The current directory also provides a firmware.c test application, which can be
328referenced while developing userspace application to talk to firmware-management
329protocol.
330
331The current directory also provides a authenticate.c test application, which can
332be referenced while developing userspace application to talk to
333component authentication protocol.
diff --git a/drivers/staging/greybus/Documentation/firmware/firmware.c b/drivers/staging/greybus/Documentation/firmware/firmware.c
new file mode 100644
index 000000000000..ff9382401030
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/firmware/firmware.c
@@ -0,0 +1,262 @@
1/*
2 * Sample code to test firmware-management protocol
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Google Inc. All rights reserved.
10 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License version 2 for more details.
20 *
21 * BSD LICENSE
22 *
23 * Copyright(c) 2016 Google Inc. All rights reserved.
24 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 *
30 * * Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * * Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in
34 * the documentation and/or other materials provided with the
35 * distribution.
36 * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
37 * its contributors may be used to endorse or promote products
38 * derived from this software without specific prior written
39 * permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
45 * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
46 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
47 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
48 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
49 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#include <stdio.h>
55#include <string.h>
56#include <unistd.h>
57#include <sys/ioctl.h>
58#include <sys/stat.h>
59#include <fcntl.h>
60
61#include "../../greybus_firmware.h"
62
63#define FW_DEV_DEFAULT "/dev/gb-fw-mgmt-0"
64#define FW_TAG_INT_DEFAULT "s3f"
65#define FW_TAG_BCND_DEFAULT "bf_01"
66#define FW_UPDATE_TYPE_DEFAULT 0
67#define FW_TIMEOUT_DEFAULT 10000;
68
69static const char *firmware_tag;
70static const char *fwdev = FW_DEV_DEFAULT;
71static int fw_update_type = FW_UPDATE_TYPE_DEFAULT;
72static int fw_timeout = FW_TIMEOUT_DEFAULT;
73
74static struct fw_mgmt_ioc_get_intf_version intf_fw_info;
75static struct fw_mgmt_ioc_get_backend_version backend_fw_info;
76static struct fw_mgmt_ioc_intf_load_and_validate intf_load;
77static struct fw_mgmt_ioc_backend_fw_update backend_update;
78
79static void usage(void)
80{
81 printf("\nUsage: ./firmware <gb-fw-mgmt-X (default: gb-fw-mgmt-0)> <interface: 0, backend: 1 (default: 0)> <firmware-tag> (default: \"s3f\"/\"bf_01\") <timeout (default: 10000 ms)>\n");
82}
83
84static int update_intf_firmware(int fd)
85{
86 int ret;
87
88 /* Get Interface Firmware Version */
89 printf("Get Interface Firmware Version\n");
90
91 ret = ioctl(fd, FW_MGMT_IOC_GET_INTF_FW, &intf_fw_info);
92 if (ret < 0) {
93 printf("Failed to get interface firmware version: %s (%d)\n",
94 fwdev, ret);
95 return -1;
96 }
97
98 printf("Interface Firmware tag (%s), major (%d), minor (%d)\n",
99 intf_fw_info.firmware_tag, intf_fw_info.major,
100 intf_fw_info.minor);
101
102 /* Try Interface Firmware load over Unipro */
103 printf("Loading Interface Firmware\n");
104
105 intf_load.load_method = GB_FW_U_LOAD_METHOD_UNIPRO;
106 intf_load.status = 0;
107 intf_load.major = 0;
108 intf_load.minor = 0;
109
110 strncpy((char *)&intf_load.firmware_tag, firmware_tag,
111 GB_FIRMWARE_U_TAG_MAX_SIZE);
112
113 ret = ioctl(fd, FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE, &intf_load);
114 if (ret < 0) {
115 printf("Failed to load interface firmware: %s (%d)\n", fwdev,
116 ret);
117 return -1;
118 }
119
120 if (intf_load.status != GB_FW_U_LOAD_STATUS_VALIDATED &&
121 intf_load.status != GB_FW_U_LOAD_STATUS_UNVALIDATED) {
122 printf("Load status says loading failed: %d\n",
123 intf_load.status);
124 return -1;
125 }
126
127 printf("Interface Firmware (%s) Load done: major: %d, minor: %d, status: %d\n",
128 firmware_tag, intf_load.major, intf_load.minor,
129 intf_load.status);
130
131 /* Initiate Mode-switch to the newly loaded firmware */
132 printf("Initiate Mode switch\n");
133
134 ret = ioctl(fd, FW_MGMT_IOC_MODE_SWITCH);
135 if (ret < 0)
136 printf("Failed to initiate mode-switch (%d)\n", ret);
137
138 return ret;
139}
140
141static int update_backend_firmware(int fd)
142{
143 int ret;
144
145 /* Get Backend Firmware Version */
146 printf("Getting Backend Firmware Version\n");
147
148 strncpy((char *)&backend_fw_info.firmware_tag, firmware_tag,
149 GB_FIRMWARE_U_TAG_MAX_SIZE);
150
151retry_fw_version:
152 ret = ioctl(fd, FW_MGMT_IOC_GET_BACKEND_FW, &backend_fw_info);
153 if (ret < 0) {
154 printf("Failed to get backend firmware version: %s (%d)\n",
155 fwdev, ret);
156 return -1;
157 }
158
159 printf("Backend Firmware tag (%s), major (%d), minor (%d), status (%d)\n",
160 backend_fw_info.firmware_tag, backend_fw_info.major,
161 backend_fw_info.minor, backend_fw_info.status);
162
163 if (backend_fw_info.status == GB_FW_U_BACKEND_VERSION_STATUS_RETRY)
164 goto retry_fw_version;
165
166 if ((backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS)
167 && (backend_fw_info.status != GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE)) {
168 printf("Failed to get backend firmware version: %s (%d)\n",
169 fwdev, backend_fw_info.status);
170 return -1;
171 }
172
173 /* Try Backend Firmware Update over Unipro */
174 printf("Updating Backend Firmware\n");
175
176 strncpy((char *)&backend_update.firmware_tag, firmware_tag,
177 GB_FIRMWARE_U_TAG_MAX_SIZE);
178
179retry_fw_update:
180 backend_update.status = 0;
181
182 ret = ioctl(fd, FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE, &backend_update);
183 if (ret < 0) {
184 printf("Failed to load backend firmware: %s (%d)\n", fwdev, ret);
185 return -1;
186 }
187
188 if (backend_update.status == GB_FW_U_BACKEND_FW_STATUS_RETRY) {
189 printf("Retrying firmware update: %d\n", backend_update.status);
190 goto retry_fw_update;
191 }
192
193 if (backend_update.status != GB_FW_U_BACKEND_FW_STATUS_SUCCESS) {
194 printf("Load status says loading failed: %d\n",
195 backend_update.status);
196 } else {
197 printf("Backend Firmware (%s) Load done: status: %d\n",
198 firmware_tag, backend_update.status);
199 }
200
201 return 0;
202}
203
204int main(int argc, char *argv[])
205{
206 int fd, ret;
207
208 if (argc > 1 &&
209 (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help"))) {
210 usage();
211 return -1;
212 }
213
214 if (argc > 1)
215 fwdev = argv[1];
216
217 if (argc > 2)
218 sscanf(argv[2], "%u", &fw_update_type);
219
220 if (argc > 3) {
221 firmware_tag = argv[3];
222 } else if (!fw_update_type) {
223 firmware_tag = FW_TAG_INT_DEFAULT;
224 } else {
225 firmware_tag = FW_TAG_BCND_DEFAULT;
226 }
227
228 if (argc > 4)
229 sscanf(argv[4], "%u", &fw_timeout);
230
231 printf("Trying Firmware update: fwdev: %s, type: %s, tag: %s, timeout: %d\n",
232 fwdev, fw_update_type == 0 ? "interface" : "backend",
233 firmware_tag, fw_timeout);
234
235 printf("Opening %s firmware management device\n", fwdev);
236
237 fd = open(fwdev, O_RDWR);
238 if (fd < 0) {
239 printf("Failed to open: %s\n", fwdev);
240 return -1;
241 }
242
243 /* Set Timeout */
244 printf("Setting timeout to %u ms\n", fw_timeout);
245
246 ret = ioctl(fd, FW_MGMT_IOC_SET_TIMEOUT_MS, &fw_timeout);
247 if (ret < 0) {
248 printf("Failed to set timeout: %s (%d)\n", fwdev, ret);
249 ret = -1;
250 goto close_fd;
251 }
252
253 if (!fw_update_type)
254 ret = update_intf_firmware(fd);
255 else
256 ret = update_backend_firmware(fd);
257
258close_fd:
259 close(fd);
260
261 return ret;
262}
diff --git a/drivers/staging/greybus/Documentation/sysfs-bus-greybus b/drivers/staging/greybus/Documentation/sysfs-bus-greybus
new file mode 100644
index 000000000000..2e998966cbe1
--- /dev/null
+++ b/drivers/staging/greybus/Documentation/sysfs-bus-greybus
@@ -0,0 +1,275 @@
1What: /sys/bus/greybus/devices/greybusN
2Date: October 2015
3KernelVersion: 4.XX
4Contact: Greg Kroah-Hartman <greg@kroah.com>
5Description:
6 The "root" greybus device for the Greybus device tree, or bus,
7 where N is a dynamically assigned 1-based id.
8
9What: /sys/bus/greybus/devices/greybusN/bus_id
10Date: April 2016
11KernelVersion: 4.XX
12Contact: Greg Kroah-Hartman <greg@kroah.com>
13Description:
14 The ID of the "root" greybus device, or bus.
15
16What: /sys/bus/greybus/devices/N-M
17Date: March 2016
18KernelVersion: 4.XX
19Contact: Greg Kroah-Hartman <greg@kroah.com>
20Description:
21 A Module M on the bus N, where M is the 1-byte interface
22 ID of the module's primary interface.
23
24What: /sys/bus/greybus/devices/N-M/eject
25Date: March 2016
26KernelVersion: 4.XX
27Contact: Greg Kroah-Hartman <greg@kroah.com>
28Description:
29 Writing a non-zero argument to this attibute disables the
30 module's interfaces before physically ejecting it.
31
32What: /sys/bus/greybus/devices/N-M/module_id
33Date: March 2016
34KernelVersion: 4.XX
35Contact: Greg Kroah-Hartman <greg@kroah.com>
36Description:
37 The ID of a Greybus module, corresponding to the ID of its
38 primary interface.
39
40What: /sys/bus/greybus/devices/N-M/num_interfaces
41Date: March 2016
42KernelVersion: 4.XX
43Contact: Greg Kroah-Hartman <greg@kroah.com>
44Description:
45 The number of interfaces of a module.
46
47What: /sys/bus/greybus/devices/N-M.I
48Date: October 2015
49KernelVersion: 4.XX
50Contact: Greg Kroah-Hartman <greg@kroah.com>
51Description:
52 An Interface I on the bus N and module N-M, where I is the
53 1-byte interface ID.
54
55What: /sys/bus/greybus/devices/N-M.I/current_now
56Date: March 2016
57KernelVersion: 4.XX
58Contact: Greg Kroah-Hartman <greg@kroah.com>
59Description:
60 Current measurement of the interface in microamps (uA)
61
62What: /sys/bus/greybus/devices/N-M.I/ddbl1_manufacturer_id
63Date: October 2015
64KernelVersion: 4.XX
65Contact: Greg Kroah-Hartman <greg@kroah.com>
66Description:
67 Unipro Device Descriptor Block Level 1 manufacturer ID for the
68 greybus Interface.
69
70What: /sys/bus/greybus/devices/N-M.I/ddbl1_product_id
71Date: October 2015
72KernelVersion: 4.XX
73Contact: Greg Kroah-Hartman <greg@kroah.com>
74Description:
75 Unipro Device Descriptor Block Level 1 product ID for the
76 greybus Interface.
77
78What: /sys/bus/greybus/devices/N-M.I/interface_id
79Date: October 2015
80KernelVersion: 4.XX
81Contact: Greg Kroah-Hartman <greg@kroah.com>
82Description:
83 The ID of a Greybus interface.
84
85What: /sys/bus/greybus/devices/N-M.I/interface_type
86Date: June 2016
87KernelVersion: 4.XX
88Contact: Greg Kroah-Hartman <greg@kroah.com>
89Description:
90 The type of a Greybus interface; "dummy", "unipro", "greybus",
91 or "unknown".
92
93What: /sys/bus/greybus/devices/N-M.I/power_now
94Date: March 2016
95KernelVersion: 4.XX
96Contact: Greg Kroah-Hartman <greg@kroah.com>
97Description:
98 Power measurement of the interface in microwatts (uW)
99
100What: /sys/bus/greybus/devices/N-M.I/power_state
101Date: March 2016
102KernelVersion: 4.XX
103Contact: Greg Kroah-Hartman <greg@kroah.com>
104Description:
105 This file reflects the power state of a Greybus interface. If
106 the value read from it is "on", then power is currently
107 supplied to the interface. Otherwise it will read "off" and
108 power is currently not supplied to the interface.
109
110 If the value read is "off", then writing "on" (or '1', 'y',
111 'Y') to this file will enable power to the interface and an
112 attempt to boot and possibly enumerate it will be made. Note
113 that on errors, the interface will again be powered down.
114
115 If the value read is "on", then writing "off" (or '0', 'n',
116 'N') to this file will power down the interface.
117
118What: /sys/bus/greybus/devices/N-M.I/product_id
119Date: October 2015
120KernelVersion: 4.XX
121Contact: Greg Kroah-Hartman <greg@kroah.com>
122Description:
123 Product ID of a Greybus interface.
124
125What: /sys/bus/greybus/devices/N-M.I/serial_number
126Date: October 2015
127KernelVersion: 4.XX
128Contact: Greg Kroah-Hartman <greg@kroah.com>
129Description:
130 Serial Number of the Greybus interface, represented by a 64 bit
131 hexadecimal number.
132
133What: /sys/bus/greybus/devices/N-M.I/vendor_id
134Date: October 2015
135KernelVersion: 4.XX
136Contact: Greg Kroah-Hartman <greg@kroah.com>
137Description:
138 Vendor ID of a Greybus interface.
139
140What: /sys/bus/greybus/devices/N-M.I/voltage_now
141Date: March 2016
142KernelVersion: 4.XX
143Contact: Greg Kroah-Hartman <greg@kroah.com>
144Description:
145 Voltage measurement of the interface in microvolts (uV)
146
147What: /sys/bus/greybus/devices/N-M.I.ctrl
148Date: October 2015
149KernelVersion: 4.XX
150Contact: Greg Kroah-Hartman <greg@kroah.com>
151Description:
152 Abstract control device for interface I that represents the
153 current mode of an enumerated Greybus interface.
154
155What: /sys/bus/greybus/devices/N-M.I.ctrl/product_string
156Date: October 2015
157KernelVersion: 4.XX
158Contact: Greg Kroah-Hartman <greg@kroah.com>
159Description:
160 Product ID string of a Greybus interface.
161
162What: /sys/bus/greybus/devices/N-M.I.ctrl/vendor_string
163Date: October 2015
164KernelVersion: 4.XX
165Contact: Greg Kroah-Hartman <greg@kroah.com>
166Description:
167 Vendor ID string of a Greybus interface.
168
169What: /sys/bus/greybus/devices/N-M.I.B
170Date: October 2015
171KernelVersion: 4.XX
172Contact: Greg Kroah-Hartman <greg@kroah.com>
173Description:
174 A bundle B on the Interface I, B is replaced by a 1-byte
175 number representing the bundle.
176
177What: /sys/bus/greybus/devices/N-M.I.B/bundle_class
178Date: October 2015
179KernelVersion: 4.XX
180Contact: Greg Kroah-Hartman <greg@kroah.com>
181Description:
182 The greybus class of the bundle B.
183
184What: /sys/bus/greybus/devices/N-M.I.B/bundle_id
185Date: October 2015
186KernelVersion: 4.XX
187Contact: Greg Kroah-Hartman <greg@kroah.com>
188Description:
189 The interface-unique id of the bundle B.
190
191What: /sys/bus/greybus/devices/N-M.I.B/gpbX
192Date: April 2016
193KernelVersion: 4.XX
194Contact: Greg Kroah-Hartman <greg@kroah.com>
195Description:
196 The General Purpose Bridged PHY device of the bundle B,
197 where X is a dynamically assigned 0-based id.
198
199What: /sys/bus/greybus/devices/N-M.I.B/state
200Date: October 2015
201KernelVersion: 4.XX
202Contact: Greg Kroah-Hartman <greg@kroah.com>
203Description:
204 A bundle has a state that is managed by the userspace
205 Endo process. This file allows that Endo to signal
206 other Android HALs that the state of the bundle has
207 changed to a specific value. When written to, any
208 process watching the file will be woken up, and the new
209 value can be read. It's a "poor-man's IPC", yes, but
210 simplifies the Android userspace code immensely.
211
212What: /sys/bus/greybus/devices/N-svc
213Date: October 2015
214KernelVersion: 4.XX
215Contact: Greg Kroah-Hartman <greg@kroah.com>
216Description:
217 The singleton SVC device of bus N.
218
219What: /sys/bus/greybus/devices/N-svc/ap_intf_id
220Date: October 2015
221KernelVersion: 4.XX
222Contact: Greg Kroah-Hartman <greg@kroah.com>
223Description:
224 The AP interface ID, a 1-byte non-zero integer which
225 defines the position of the AP module on the frame.
226 The interface positions are defined in the GMP
227 Module Developer Kit.
228
229What: /sys/bus/greybus/devices/N-svc/endo_id
230Date: October 2015
231KernelVersion: 4.XX
232Contact: Greg Kroah-Hartman <greg@kroah.com>
233Description:
234 The Endo ID, which is a 2-byte hexadecimal value
235 defined by the Endo layout scheme, documented in
236 the GMP Module Developer Kit.
237
238What: /sys/bus/greybus/devices/N-svc/intf_eject
239Date: October 2015
240KernelVersion: 4.XX
241Contact: Greg Kroah-Hartman <greg@kroah.com>
242Description:
243 Write the number of the interface that you wish to
244 forcibly eject from the system.
245
246What: /sys/bus/greybus/devices/N-svc/version
247Date: October 2015
248KernelVersion: 4.XX
249Contact: Greg Kroah-Hartman <greg@kroah.com>
250Description:
251 The version number of the firmware in the SVC device.
252
253What: /sys/bus/greybus/devices/N-svc/watchdog
254Date: October 2016
255KernelVersion: 4.XX
256Contact: Greg Kroah-Hartman <greg@kroah.com>
257Description:
258 If the SVC watchdog is enabled or not. Writing 0 to this
259 file will disable the watchdog, writing 1 will enable it.
260
261What: /sys/bus/greybus/devices/N-svc/watchdog_action
262Date: July 2016
263KernelVersion: 4.XX
264Contact: Greg Kroah-Hartman <greg@kroah.com>
265Description:
266 This attribute indicates the action to be performed upon SVC
267 watchdog bite.
268
269 The action can be one of the "reset" or "panic". Writing either
270 one of the "reset" or "panic" will change the behavior of SVC
271 watchdog bite. Default value is "reset".
272
273 "reset" means the UniPro subsystem is to be reset.
274
275 "panic" means SVC watchdog bite will cause kernel to panic.
diff --git a/drivers/staging/greybus/Kconfig b/drivers/staging/greybus/Kconfig
new file mode 100644
index 000000000000..89c49767d247
--- /dev/null
+++ b/drivers/staging/greybus/Kconfig
@@ -0,0 +1,219 @@
1menuconfig GREYBUS
2 tristate "Greybus support"
3 depends on SYSFS
4 ---help---
5 This option enables the Greybus driver core. Greybus is an
6 hardware protocol that was designed to provide Unipro with a
7 sane application layer. It was originally designed for the
8 ARA project, a module phone system, but has shown up in other
9 phones, and can be tunneled over other busses in order to
10 control hardware devices.
11
12 Say Y here to enable support for these types of drivers.
13
14 To compile this code as a module, chose M here: the module
15 will be called greybus.ko
16
17if GREYBUS
18
19config GREYBUS_ES2
20 tristate "Greybus ES3 USB host controller"
21 depends on USB
22 ---help---
23 Select this option if you have a Toshiba ES3 USB device that
24 acts as a Greybus "host controller". This device is a bridge
25 from a USB device to a Unipro network.
26
27 To compile this code as a module, chose M here: the module
28 will be called gb-es2.ko
29
30config GREYBUS_AUDIO
31 tristate "Greybus Audio Class driver"
32 depends on SOUND
33 ---help---
34 Select this option if you have a device that follows the
35 Greybus Audio Class specification.
36
37 To compile this code as a module, chose M here: the module
38 will be called gb-audio.ko
39
40config GREYBUS_BOOTROM
41 tristate "Greybus Bootrom Class driver"
42 ---help---
43 Select this option if you have a device that follows the
44 Greybus Bootrom Class specification.
45
46 To compile this code as a module, chose M here: the module
47 will be called gb-bootrom.ko
48
49config GREYBUS_CAMERA
50 tristate "Greybus Camera Class driver"
51 depends on MEDIA && LEDS_CLASS_FLASH && BROKEN
52 ---help---
53 Select this option if you have a device that follows the
54 Greybus Camera Class specification.
55
56 To compile this code as a module, chose M here: the module
57 will be called gb-camera.ko
58
59config GREYBUS_FIRMWARE
60 tristate "Greybus Firmware Download Class driver"
61 depends on SPI
62 ---help---
63 Select this option if you have a device that follows the
64 Greybus Firmware Download Class specification.
65
66 To compile this code as a module, chose M here: the module
67 will be called gb-firmware.ko
68
69config GREYBUS_HID
70 tristate "Greybus HID Class driver"
71 depends on HID && INPUT
72 ---help---
73 Select this option if you have a device that follows the
74 Greybus HID Class specification.
75
76 To compile this code as a module, chose M here: the module
77 will be called gb-hid.ko
78
79config GREYBUS_LIGHT
80 tristate "Greybus LED Class driver"
81 depends on LEDS_CLASS
82 ---help---
83 Select this option if you have a device that follows the
84 Greybus LED Class specification.
85
86 To compile this code as a module, chose M here: the module
87 will be called gb-light.ko
88
89config GREYBUS_LOG
90 tristate "Greybus Debug Log Class driver"
91 ---help---
92 Select this option if you have a device that follows the
93 Greybus Debug Log Class specification.
94
95 To compile this code as a module, chose M here: the module
96 will be called gb-log.ko
97
98config GREYBUS_LOOPBACK
99 tristate "Greybus Loopback Class driver"
100 ---help---
101 Select this option if you have a device that follows the
102 Greybus Debug Log Class specification.
103
104 To compile this code as a module, chose M here: the module
105 will be called gb-log.ko
106
107config GREYBUS_POWER
108 tristate "Greybus Powersupply Class driver"
109 depends on POWER_SUPPLY
110 ---help---
111 Select this option if you have a device that follows the
112 Greybus Powersupply Class specification.
113
114 To compile this code as a module, chose M here: the module
115 will be called gb-power-supply.ko
116
117config GREYBUS_RAW
118 tristate "Greybus Raw Class driver"
119 ---help---
120 Select this option if you have a device that follows the
121 Greybus Raw Class specification.
122
123 To compile this code as a module, chose M here: the module
124 will be called gb-raw.ko
125
126config GREYBUS_VIBRATOR
127 tristate "Greybus Vibrator Motor Class driver"
128 ---help---
129 Select this option if you have a device that follows the
130 Greybus Vibrator Motor Class specification.
131
132 To compile this code as a module, chose M here: the module
133 will be called gb-vibrator.ko
134
135menuconfig GREYBUS_BRIDGED_PHY
136 tristate "Greybus Bridged PHY Class drivers"
137 ---help---
138 Select this option to pick from a variety of Greybus Bridged
139 PHY class drivers. These drivers emulate a number of
140 different "traditional" busses by tunneling them over Greybus.
141 Examples of this include serial, SPI, USB, and others.
142
143 To compile this code as a module, chose M here: the module
144 will be called gb-phy.ko
145
146if GREYBUS_BRIDGED_PHY
147
148config GREYBUS_GPIO
149 tristate "Greybus GPIO Bridged PHY driver"
150 depends on GPIOLIB
151 ---help---
152 Select this option if you have a device that follows the
153 Greybus GPIO Bridged PHY Class specification.
154
155 To compile this code as a module, chose M here: the module
156 will be called gb-gpio.ko
157
158config GREYBUS_I2C
159 tristate "Greybus I2C Bridged PHY driver"
160 depends on I2C
161 ---help---
162 Select this option if you have a device that follows the
163 Greybus I2C Bridged PHY Class specification.
164
165 To compile this code as a module, chose M here: the module
166 will be called gb-i2c.ko
167
168config GREYBUS_PWM
169 tristate "Greybus PWM Bridged PHY driver"
170 depends on PWM
171 ---help---
172 Select this option if you have a device that follows the
173 Greybus PWM Bridged PHY Class specification.
174
175 To compile this code as a module, chose M here: the module
176 will be called gb-pwm.ko
177
178config GREYBUS_SDIO
179 tristate "Greybus SDIO Bridged PHY driver"
180 depends on MMC
181 ---help---
182 Select this option if you have a device that follows the
183 Greybus SDIO Bridged PHY Class specification.
184
185 To compile this code as a module, chose M here: the module
186 will be called gb-sdio.ko
187
188config GREYBUS_SPI
189 tristate "Greybus SPI Bridged PHY driver"
190 depends on SPI
191 ---help---
192 Select this option if you have a device that follows the
193 Greybus SPI Bridged PHY Class specification.
194
195 To compile this code as a module, chose M here: the module
196 will be called gb-spi.ko
197
198config GREYBUS_UART
199 tristate "Greybus UART Bridged PHY driver"
200 depends on TTY
201 ---help---
202 Select this option if you have a device that follows the
203 Greybus UART Bridged PHY Class specification.
204
205 To compile this code as a module, chose M here: the module
206 will be called gb-uart.ko
207
208config GREYBUS_USB
209 tristate "Greybus USB Host Bridged PHY driver"
210 depends on USB
211 ---help---
212 Select this option if you have a device that follows the
213 Greybus USB Host Bridged PHY Class specification.
214
215 To compile this code as a module, chose M here: the module
216 will be called gb-usb.ko
217
218endif # GREYBUS_BRIDGED_PHY
219endif # GREYBUS
diff --git a/drivers/staging/greybus/Makefile b/drivers/staging/greybus/Makefile
new file mode 100644
index 000000000000..f337b7b70782
--- /dev/null
+++ b/drivers/staging/greybus/Makefile
@@ -0,0 +1,96 @@
1# Greybus core
2greybus-y := core.o \
3 debugfs.o \
4 hd.o \
5 manifest.o \
6 module.o \
7 interface.o \
8 bundle.o \
9 connection.o \
10 control.o \
11 svc.o \
12 svc_watchdog.o \
13 operation.o \
14 timesync.o \
15 timesync_platform.o
16
17obj-$(CONFIG_GREYBUS) += greybus.o
18
19# needed for trace events
20ccflags-y += -I$(src)
21
22
23# Greybus Host controller drivers
24gb-es2-y := es2.o
25
26obj-$(CONFIG_GREYBUS_ES2) += gb-es2.o
27
28# Greybus class drivers
29gb-bootrom-y := bootrom.o
30gb-camera-y := camera.o
31gb-firmware-y := fw-core.o fw-download.o fw-management.o authentication.o
32gb-spilib-y := spilib.o
33gb-hid-y := hid.o
34gb-light-y := light.o
35gb-log-y := log.o
36gb-loopback-y := loopback.o
37gb-power-supply-y := power_supply.o
38gb-raw-y := raw.o
39gb-vibrator-y := vibrator.o
40
41obj-$(CONFIG_GREYBUS_BOOTROM) += gb-bootrom.o
42obj-$(CONFIG_GREYBUS_CAMERA) += gb-camera.o
43obj-$(CONFIG_GREYBUS_FIRMWARE) += gb-firmware.o gb-spilib.o
44obj-$(CONFIG_GREYBUS_HID) += gb-hid.o
45obj-$(CONFIG_GREYBUS_LIGHT) += gb-light.o
46obj-$(CONFIG_GREYBUS_LOG) += gb-log.o
47obj-$(CONFIG_GREYBUS_LOOPBACK) += gb-loopback.o
48obj-$(CONFIG_GREYBUS_POWER) += gb-power-supply.o
49obj-$(CONFIG_GREYBUS_RAW) += gb-raw.o
50obj-$(CONFIG_GREYBUS_VIBRATOR) += gb-vibrator.o
51
52# Greybus Audio is a bunch of modules
53gb-audio-module-y := audio_module.o audio_topology.o
54gb-audio-codec-y := audio_codec.o
55gb-audio-gb-y := audio_gb.o
56gb-audio-apbridgea-y := audio_apbridgea.o
57gb-audio-manager-y := audio_manager.o audio_manager_module.o
58
59# Greybus Audio sysfs helpers can be useful when debugging
60#GB_AUDIO_MANAGER_SYSFS ?= true
61#ifeq ($(GB_AUDIO_MANAGER_SYSFS),true)
62#gb-audio-manager-y += audio_manager_sysfs.o
63#ccflags-y += -DGB_AUDIO_MANAGER_SYSFS
64#endif
65
66obj-$(CONFIG_GREYBUS_AUDIO_MSM8994) += gb-audio-codec.o
67obj-$(CONFIG_GREYBUS_AUDIO_MSM8994) += gb-audio-module.o
68obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-gb.o
69obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-apbridgea.o
70obj-$(CONFIG_GREYBUS_AUDIO) += gb-audio-manager.o
71
72
73# Greybus Bridged PHY drivers
74gb-gbphy-y := gbphy.o
75gb-gpio-y := gpio.o
76gb-i2c-y := i2c.o
77gb-pwm-y := pwm.o
78gb-sdio-y := sdio.o
79gb-spi-y := spi.o
80gb-uart-y := uart.o
81gb-usb-y := usb.o
82
83obj-$(CONFIG_GREYBUS_BRIDGED_PHY) += gb-gbphy.o
84obj-$(CONFIG_GREYBUS_GPIO) += gb-gpio.o
85obj-$(CONFIG_GREYBUS_I2C) += gb-i2c.o
86obj-$(CONFIG_GREYBUS_PWM) += gb-pwm.o
87obj-$(CONFIG_GREYBUS_SDIO) += gb-sdio.o
88obj-$(CONFIG_GREYBUS_SPI) += gb-spi.o gb-spilib.o
89obj-$(CONFIG_GREYBUS_UART) += gb-uart.o
90obj-$(CONFIG_GREYBUS_USB) += gb-usb.o
91
92
93# Greybus Platform driver
94gb-arche-y := arche-platform.o arche-apb-ctrl.o
95
96obj-$(CONFIG_USB_HSIC_USB3613) += gb-arche.o
diff --git a/drivers/staging/greybus/arche-apb-ctrl.c b/drivers/staging/greybus/arche-apb-ctrl.c
new file mode 100644
index 000000000000..59d9d422cf04
--- /dev/null
+++ b/drivers/staging/greybus/arche-apb-ctrl.c
@@ -0,0 +1,522 @@
1/*
2 * Arche Platform driver to control APB.
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/gpio.h>
13#include <linux/interrupt.h>
14#include <linux/of_gpio.h>
15#include <linux/of_irq.h>
16#include <linux/module.h>
17#include <linux/pinctrl/consumer.h>
18#include <linux/platform_device.h>
19#include <linux/pm.h>
20#include <linux/regulator/consumer.h>
21#include <linux/spinlock.h>
22#include "arche_platform.h"
23
24
25struct arche_apb_ctrl_drvdata {
26 /* Control GPIO signals to and from AP <=> AP Bridges */
27 int resetn_gpio;
28 int boot_ret_gpio;
29 int pwroff_gpio;
30 int wake_in_gpio;
31 int wake_out_gpio;
32 int pwrdn_gpio;
33
34 enum arche_platform_state state;
35 bool init_disabled;
36
37 struct regulator *vcore;
38 struct regulator *vio;
39
40 int clk_en_gpio;
41 struct clk *clk;
42
43 struct pinctrl *pinctrl;
44 struct pinctrl_state *pin_default;
45
46 /* V2: SPI Bus control */
47 int spi_en_gpio;
48 bool spi_en_polarity_high;
49};
50
51/*
52 * Note that these low level api's are active high
53 */
54static inline void deassert_reset(unsigned int gpio)
55{
56 gpio_set_value(gpio, 1);
57}
58
59static inline void assert_reset(unsigned int gpio)
60{
61 gpio_set_value(gpio, 0);
62}
63
64/*
65 * Note: Please do not modify the below sequence, as it is as per the spec
66 */
67static int coldboot_seq(struct platform_device *pdev)
68{
69 struct device *dev = &pdev->dev;
70 struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
71 int ret;
72
73 if (apb->init_disabled ||
74 apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
75 return 0;
76
77 /* Hold APB in reset state */
78 assert_reset(apb->resetn_gpio);
79
80 if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
81 gpio_is_valid(apb->spi_en_gpio))
82 devm_gpio_free(dev, apb->spi_en_gpio);
83
84 /* Enable power to APB */
85 if (!IS_ERR(apb->vcore)) {
86 ret = regulator_enable(apb->vcore);
87 if (ret) {
88 dev_err(dev, "failed to enable core regulator\n");
89 return ret;
90 }
91 }
92
93 if (!IS_ERR(apb->vio)) {
94 ret = regulator_enable(apb->vio);
95 if (ret) {
96 dev_err(dev, "failed to enable IO regulator\n");
97 return ret;
98 }
99 }
100
101 apb_bootret_deassert(dev);
102
103 /* On DB3 clock was not mandatory */
104 if (gpio_is_valid(apb->clk_en_gpio))
105 gpio_set_value(apb->clk_en_gpio, 1);
106
107 usleep_range(100, 200);
108
109 /* deassert reset to APB : Active-low signal */
110 deassert_reset(apb->resetn_gpio);
111
112 apb->state = ARCHE_PLATFORM_STATE_ACTIVE;
113
114 return 0;
115}
116
117static int fw_flashing_seq(struct platform_device *pdev)
118{
119 struct device *dev = &pdev->dev;
120 struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
121 int ret;
122
123 if (apb->init_disabled ||
124 apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
125 return 0;
126
127 ret = regulator_enable(apb->vcore);
128 if (ret) {
129 dev_err(dev, "failed to enable core regulator\n");
130 return ret;
131 }
132
133 ret = regulator_enable(apb->vio);
134 if (ret) {
135 dev_err(dev, "failed to enable IO regulator\n");
136 return ret;
137 }
138
139 if (gpio_is_valid(apb->spi_en_gpio)) {
140 unsigned long flags;
141
142 if (apb->spi_en_polarity_high)
143 flags = GPIOF_OUT_INIT_HIGH;
144 else
145 flags = GPIOF_OUT_INIT_LOW;
146
147 ret = devm_gpio_request_one(dev, apb->spi_en_gpio,
148 flags, "apb_spi_en");
149 if (ret) {
150 dev_err(dev, "Failed requesting SPI bus en gpio %d\n",
151 apb->spi_en_gpio);
152 return ret;
153 }
154 }
155
156 /* for flashing device should be in reset state */
157 assert_reset(apb->resetn_gpio);
158 apb->state = ARCHE_PLATFORM_STATE_FW_FLASHING;
159
160 return 0;
161}
162
163static int standby_boot_seq(struct platform_device *pdev)
164{
165 struct device *dev = &pdev->dev;
166 struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
167
168 if (apb->init_disabled)
169 return 0;
170
171 /* Even if it is in OFF state, then we do not want to change the state */
172 if (apb->state == ARCHE_PLATFORM_STATE_STANDBY ||
173 apb->state == ARCHE_PLATFORM_STATE_OFF)
174 return 0;
175
176 if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
177 gpio_is_valid(apb->spi_en_gpio))
178 devm_gpio_free(dev, apb->spi_en_gpio);
179
180 /*
181 * As per WDM spec, do nothing
182 *
183 * Pasted from WDM spec,
184 * - A falling edge on POWEROFF_L is detected (a)
185 * - WDM enters standby mode, but no output signals are changed
186 * */
187
188 /* TODO: POWEROFF_L is input to WDM module */
189 apb->state = ARCHE_PLATFORM_STATE_STANDBY;
190 return 0;
191}
192
193static void poweroff_seq(struct platform_device *pdev)
194{
195 struct device *dev = &pdev->dev;
196 struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
197
198 if (apb->init_disabled || apb->state == ARCHE_PLATFORM_STATE_OFF)
199 return;
200
201 if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING &&
202 gpio_is_valid(apb->spi_en_gpio))
203 devm_gpio_free(dev, apb->spi_en_gpio);
204
205 /* disable the clock */
206 if (gpio_is_valid(apb->clk_en_gpio))
207 gpio_set_value(apb->clk_en_gpio, 0);
208
209 if (!IS_ERR(apb->vcore) && regulator_is_enabled(apb->vcore) > 0)
210 regulator_disable(apb->vcore);
211
212 if (!IS_ERR(apb->vio) && regulator_is_enabled(apb->vio) > 0)
213 regulator_disable(apb->vio);
214
215 /* As part of exit, put APB back in reset state */
216 assert_reset(apb->resetn_gpio);
217 apb->state = ARCHE_PLATFORM_STATE_OFF;
218
219 /* TODO: May have to send an event to SVC about this exit */
220}
221
222void apb_bootret_assert(struct device *dev)
223{
224 struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
225
226 gpio_set_value(apb->boot_ret_gpio, 1);
227}
228
229void apb_bootret_deassert(struct device *dev)
230{
231 struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
232
233 gpio_set_value(apb->boot_ret_gpio, 0);
234}
235
236int apb_ctrl_coldboot(struct device *dev)
237{
238 return coldboot_seq(to_platform_device(dev));
239}
240
241int apb_ctrl_fw_flashing(struct device *dev)
242{
243 return fw_flashing_seq(to_platform_device(dev));
244}
245
246int apb_ctrl_standby_boot(struct device *dev)
247{
248 return standby_boot_seq(to_platform_device(dev));
249}
250
251void apb_ctrl_poweroff(struct device *dev)
252{
253 poweroff_seq(to_platform_device(dev));
254}
255
256static ssize_t state_store(struct device *dev,
257 struct device_attribute *attr, const char *buf, size_t count)
258{
259 struct platform_device *pdev = to_platform_device(dev);
260 struct arche_apb_ctrl_drvdata *apb = platform_get_drvdata(pdev);
261 int ret = 0;
262 bool is_disabled;
263
264 if (sysfs_streq(buf, "off")) {
265 if (apb->state == ARCHE_PLATFORM_STATE_OFF)
266 return count;
267
268 poweroff_seq(pdev);
269 } else if (sysfs_streq(buf, "active")) {
270 if (apb->state == ARCHE_PLATFORM_STATE_ACTIVE)
271 return count;
272
273 poweroff_seq(pdev);
274 is_disabled = apb->init_disabled;
275 apb->init_disabled = false;
276 ret = coldboot_seq(pdev);
277 if (ret)
278 apb->init_disabled = is_disabled;
279 } else if (sysfs_streq(buf, "standby")) {
280 if (apb->state == ARCHE_PLATFORM_STATE_STANDBY)
281 return count;
282
283 ret = standby_boot_seq(pdev);
284 } else if (sysfs_streq(buf, "fw_flashing")) {
285 if (apb->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
286 return count;
287
288 /* First we want to make sure we power off everything
289 * and then enter FW flashing state */
290 poweroff_seq(pdev);
291 ret = fw_flashing_seq(pdev);
292 } else {
293 dev_err(dev, "unknown state\n");
294 ret = -EINVAL;
295 }
296
297 return ret ? ret : count;
298}
299
300static ssize_t state_show(struct device *dev,
301 struct device_attribute *attr, char *buf)
302{
303 struct arche_apb_ctrl_drvdata *apb = dev_get_drvdata(dev);
304
305 switch (apb->state) {
306 case ARCHE_PLATFORM_STATE_OFF:
307 return sprintf(buf, "off%s\n",
308 apb->init_disabled ? ",disabled" : "");
309 case ARCHE_PLATFORM_STATE_ACTIVE:
310 return sprintf(buf, "active\n");
311 case ARCHE_PLATFORM_STATE_STANDBY:
312 return sprintf(buf, "standby\n");
313 case ARCHE_PLATFORM_STATE_FW_FLASHING:
314 return sprintf(buf, "fw_flashing\n");
315 default:
316 return sprintf(buf, "unknown state\n");
317 }
318}
319
320static DEVICE_ATTR_RW(state);
321
322static int apb_ctrl_get_devtree_data(struct platform_device *pdev,
323 struct arche_apb_ctrl_drvdata *apb)
324{
325 struct device *dev = &pdev->dev;
326 struct device_node *np = dev->of_node;
327 int ret;
328
329 apb->resetn_gpio = of_get_named_gpio(np, "reset-gpios", 0);
330 if (apb->resetn_gpio < 0) {
331 dev_err(dev, "failed to get reset gpio\n");
332 return apb->resetn_gpio;
333 }
334 ret = devm_gpio_request_one(dev, apb->resetn_gpio,
335 GPIOF_OUT_INIT_LOW, "apb-reset");
336 if (ret) {
337 dev_err(dev, "Failed requesting reset gpio %d\n",
338 apb->resetn_gpio);
339 return ret;
340 }
341
342 apb->boot_ret_gpio = of_get_named_gpio(np, "boot-ret-gpios", 0);
343 if (apb->boot_ret_gpio < 0) {
344 dev_err(dev, "failed to get boot retention gpio\n");
345 return apb->boot_ret_gpio;
346 }
347 ret = devm_gpio_request_one(dev, apb->boot_ret_gpio,
348 GPIOF_OUT_INIT_LOW, "boot retention");
349 if (ret) {
350 dev_err(dev, "Failed requesting bootret gpio %d\n",
351 apb->boot_ret_gpio);
352 return ret;
353 }
354
355 /* It's not mandatory to support power management interface */
356 apb->pwroff_gpio = of_get_named_gpio(np, "pwr-off-gpios", 0);
357 if (apb->pwroff_gpio < 0) {
358 dev_err(dev, "failed to get power off gpio\n");
359 return apb->pwroff_gpio;
360 }
361 ret = devm_gpio_request_one(dev, apb->pwroff_gpio,
362 GPIOF_IN, "pwroff_n");
363 if (ret) {
364 dev_err(dev, "Failed requesting pwroff_n gpio %d\n",
365 apb->pwroff_gpio);
366 return ret;
367 }
368
369 /* Do not make clock mandatory as of now (for DB3) */
370 apb->clk_en_gpio = of_get_named_gpio(np, "clock-en-gpio", 0);
371 if (apb->clk_en_gpio < 0) {
372 dev_warn(dev, "failed to get clock en gpio\n");
373 } else if (gpio_is_valid(apb->clk_en_gpio)) {
374 ret = devm_gpio_request_one(dev, apb->clk_en_gpio,
375 GPIOF_OUT_INIT_LOW, "apb_clk_en");
376 if (ret) {
377 dev_warn(dev, "Failed requesting APB clock en gpio %d\n",
378 apb->clk_en_gpio);
379 return ret;
380 }
381 }
382
383 apb->pwrdn_gpio = of_get_named_gpio(np, "pwr-down-gpios", 0);
384 if (apb->pwrdn_gpio < 0)
385 dev_warn(dev, "failed to get power down gpio\n");
386
387 /* Regulators are optional, as we may have fixed supply coming in */
388 apb->vcore = devm_regulator_get(dev, "vcore");
389 if (IS_ERR(apb->vcore))
390 dev_warn(dev, "no core regulator found\n");
391
392 apb->vio = devm_regulator_get(dev, "vio");
393 if (IS_ERR(apb->vio))
394 dev_warn(dev, "no IO regulator found\n");
395
396 apb->pinctrl = devm_pinctrl_get(&pdev->dev);
397 if (IS_ERR(apb->pinctrl)) {
398 dev_err(&pdev->dev, "could not get pinctrl handle\n");
399 return PTR_ERR(apb->pinctrl);
400 }
401 apb->pin_default = pinctrl_lookup_state(apb->pinctrl, "default");
402 if (IS_ERR(apb->pin_default)) {
403 dev_err(&pdev->dev, "could not get default pin state\n");
404 return PTR_ERR(apb->pin_default);
405 }
406
407 /* Only applicable for platform >= V2 */
408 apb->spi_en_gpio = of_get_named_gpio(np, "spi-en-gpio", 0);
409 if (apb->spi_en_gpio >= 0) {
410 if (of_property_read_bool(pdev->dev.of_node,
411 "spi-en-active-high"))
412 apb->spi_en_polarity_high = true;
413 }
414
415 return 0;
416}
417
418static int arche_apb_ctrl_probe(struct platform_device *pdev)
419{
420 int ret;
421 struct arche_apb_ctrl_drvdata *apb;
422 struct device *dev = &pdev->dev;
423
424 apb = devm_kzalloc(&pdev->dev, sizeof(*apb), GFP_KERNEL);
425 if (!apb)
426 return -ENOMEM;
427
428 ret = apb_ctrl_get_devtree_data(pdev, apb);
429 if (ret) {
430 dev_err(dev, "failed to get apb devicetree data %d\n", ret);
431 return ret;
432 }
433
434 /* Initially set APB to OFF state */
435 apb->state = ARCHE_PLATFORM_STATE_OFF;
436 /* Check whether device needs to be enabled on boot */
437 if (of_property_read_bool(pdev->dev.of_node, "arche,init-disable"))
438 apb->init_disabled = true;
439
440 platform_set_drvdata(pdev, apb);
441
442 /* Create sysfs interface to allow user to change state dynamically */
443 ret = device_create_file(dev, &dev_attr_state);
444 if (ret) {
445 dev_err(dev, "failed to create state file in sysfs\n");
446 return ret;
447 }
448
449 dev_info(&pdev->dev, "Device registered successfully\n");
450 return 0;
451}
452
453static int arche_apb_ctrl_remove(struct platform_device *pdev)
454{
455 device_remove_file(&pdev->dev, &dev_attr_state);
456 poweroff_seq(pdev);
457 platform_set_drvdata(pdev, NULL);
458
459 return 0;
460}
461
462static int arche_apb_ctrl_suspend(struct device *dev)
463{
464 /*
465 * If timing profile permits, we may shutdown bridge
466 * completely
467 *
468 * TODO: sequence ??
469 *
470 * Also, need to make sure we meet precondition for unipro suspend
471 * Precondition: Definition ???
472 */
473 return 0;
474}
475
476static int arche_apb_ctrl_resume(struct device *dev)
477{
478 /*
479 * Atleast for ES2 we have to meet the delay requirement between
480 * unipro switch and AP bridge init, depending on whether bridge is in
481 * OFF state or standby state.
482 *
483 * Based on whether bridge is in standby or OFF state we may have to
484 * assert multiple signals. Please refer to WDM spec, for more info.
485 *
486 */
487 return 0;
488}
489
490static void arche_apb_ctrl_shutdown(struct platform_device *pdev)
491{
492 apb_ctrl_poweroff(&pdev->dev);
493}
494
495static SIMPLE_DEV_PM_OPS(arche_apb_ctrl_pm_ops, arche_apb_ctrl_suspend,
496 arche_apb_ctrl_resume);
497
498static struct of_device_id arche_apb_ctrl_of_match[] = {
499 { .compatible = "usbffff,2", },
500 { },
501};
502
503static struct platform_driver arche_apb_ctrl_device_driver = {
504 .probe = arche_apb_ctrl_probe,
505 .remove = arche_apb_ctrl_remove,
506 .shutdown = arche_apb_ctrl_shutdown,
507 .driver = {
508 .name = "arche-apb-ctrl",
509 .pm = &arche_apb_ctrl_pm_ops,
510 .of_match_table = arche_apb_ctrl_of_match,
511 }
512};
513
514int __init arche_apb_init(void)
515{
516 return platform_driver_register(&arche_apb_ctrl_device_driver);
517}
518
519void __exit arche_apb_exit(void)
520{
521 platform_driver_unregister(&arche_apb_ctrl_device_driver);
522}
diff --git a/drivers/staging/greybus/arche-platform.c b/drivers/staging/greybus/arche-platform.c
new file mode 100644
index 000000000000..9d9048e6aed3
--- /dev/null
+++ b/drivers/staging/greybus/arche-platform.c
@@ -0,0 +1,828 @@
1/*
2 * Arche Platform driver to enable Unipro link.
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/gpio.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/of_gpio.h>
16#include <linux/of_platform.h>
17#include <linux/pinctrl/consumer.h>
18#include <linux/platform_device.h>
19#include <linux/pm.h>
20#include <linux/interrupt.h>
21#include <linux/irq.h>
22#include <linux/suspend.h>
23#include <linux/time.h>
24#include "arche_platform.h"
25#include "greybus.h"
26
27#include <linux/usb/usb3613.h>
28
29#define WD_COLDBOOT_PULSE_WIDTH_MS 30
30
31enum svc_wakedetect_state {
32 WD_STATE_IDLE, /* Default state = pulled high/low */
33 WD_STATE_BOOT_INIT, /* WD = falling edge (low) */
34 WD_STATE_COLDBOOT_TRIG, /* WD = rising edge (high), > 30msec */
35 WD_STATE_STANDBYBOOT_TRIG, /* As of now not used ?? */
36 WD_STATE_COLDBOOT_START, /* Cold boot process started */
37 WD_STATE_STANDBYBOOT_START, /* Not used */
38 WD_STATE_TIMESYNC,
39};
40
41struct arche_platform_drvdata {
42 /* Control GPIO signals to and from AP <=> SVC */
43 int svc_reset_gpio;
44 bool is_reset_act_hi;
45 int svc_sysboot_gpio;
46 int wake_detect_gpio; /* bi-dir,maps to WAKE_MOD & WAKE_FRAME signals */
47
48 enum arche_platform_state state;
49
50 int svc_refclk_req;
51 struct clk *svc_ref_clk;
52
53 struct pinctrl *pinctrl;
54 struct pinctrl_state *pin_default;
55
56 int num_apbs;
57
58 enum svc_wakedetect_state wake_detect_state;
59 int wake_detect_irq;
60 spinlock_t wake_lock; /* Protect wake_detect_state */
61 struct mutex platform_state_mutex; /* Protect state */
62 wait_queue_head_t wq; /* WQ for arche_pdata->state */
63 unsigned long wake_detect_start;
64 struct notifier_block pm_notifier;
65
66 struct device *dev;
67 struct gb_timesync_svc *timesync_svc_pdata;
68};
69
70static int arche_apb_bootret_assert(struct device *dev, void *data)
71{
72 apb_bootret_assert(dev);
73 return 0;
74}
75
76static int arche_apb_bootret_deassert(struct device *dev, void *data)
77{
78 apb_bootret_deassert(dev);
79 return 0;
80}
81
82/* Requires calling context to hold arche_pdata->platform_state_mutex */
83static void arche_platform_set_state(struct arche_platform_drvdata *arche_pdata,
84 enum arche_platform_state state)
85{
86 arche_pdata->state = state;
87}
88
89/*
90 * arche_platform_change_state: Change the operational state
91 *
92 * This exported function allows external drivers to change the state
93 * of the arche-platform driver.
94 * Note that this function only supports transitions between two states
95 * with limited functionality.
96 *
97 * - ARCHE_PLATFORM_STATE_TIME_SYNC:
98 * Once set, allows timesync operations between SVC <=> AP and makes
99 * sure that arche-platform driver ignores any subsequent events/pulses
100 * from SVC over wake/detect.
101 *
102 * - ARCHE_PLATFORM_STATE_ACTIVE:
103 * Puts back driver to active state, where any pulse from SVC on wake/detect
104 * line would trigger either cold/standby boot.
105 * Note: Transition request from this function does not trigger cold/standby
106 * boot. It just puts back driver book keeping variable back to ACTIVE
107 * state and restores the interrupt.
108 *
109 * Returns -ENODEV if device not found, -EAGAIN if the driver cannot currently
110 * satisfy the requested state-transition or -EINVAL for all other
111 * state-transition requests.
112 */
113int arche_platform_change_state(enum arche_platform_state state,
114 struct gb_timesync_svc *timesync_svc_pdata)
115{
116 struct arche_platform_drvdata *arche_pdata;
117 struct platform_device *pdev;
118 struct device_node *np;
119 int ret = -EAGAIN;
120 unsigned long flags;
121
122 np = of_find_compatible_node(NULL, NULL, "google,arche-platform");
123 if (!np) {
124 pr_err("google,arche-platform device node not found\n");
125 return -ENODEV;
126 }
127
128 pdev = of_find_device_by_node(np);
129 if (!pdev) {
130 pr_err("arche-platform device not found\n");
131 return -ENODEV;
132 }
133
134 arche_pdata = platform_get_drvdata(pdev);
135
136 mutex_lock(&arche_pdata->platform_state_mutex);
137 spin_lock_irqsave(&arche_pdata->wake_lock, flags);
138
139 if (arche_pdata->state == state) {
140 ret = 0;
141 goto exit;
142 }
143
144 switch (state) {
145 case ARCHE_PLATFORM_STATE_TIME_SYNC:
146 if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
147 ret = -EINVAL;
148 goto exit;
149 }
150 if (arche_pdata->wake_detect_state != WD_STATE_IDLE) {
151 dev_err(arche_pdata->dev,
152 "driver busy with wake/detect line ops\n");
153 goto exit;
154 }
155 device_for_each_child(arche_pdata->dev, NULL,
156 arche_apb_bootret_assert);
157 arche_pdata->wake_detect_state = WD_STATE_TIMESYNC;
158 break;
159 case ARCHE_PLATFORM_STATE_ACTIVE:
160 if (arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC) {
161 ret = -EINVAL;
162 goto exit;
163 }
164 device_for_each_child(arche_pdata->dev, NULL,
165 arche_apb_bootret_deassert);
166 arche_pdata->wake_detect_state = WD_STATE_IDLE;
167 break;
168 case ARCHE_PLATFORM_STATE_OFF:
169 case ARCHE_PLATFORM_STATE_STANDBY:
170 case ARCHE_PLATFORM_STATE_FW_FLASHING:
171 dev_err(arche_pdata->dev, "busy, request to retry later\n");
172 goto exit;
173 default:
174 ret = -EINVAL;
175 dev_err(arche_pdata->dev,
176 "invalid state transition request\n");
177 goto exit;
178 }
179 arche_pdata->timesync_svc_pdata = timesync_svc_pdata;
180 arche_platform_set_state(arche_pdata, state);
181 if (state == ARCHE_PLATFORM_STATE_ACTIVE)
182 wake_up(&arche_pdata->wq);
183
184 ret = 0;
185exit:
186 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
187 mutex_unlock(&arche_pdata->platform_state_mutex);
188 of_node_put(np);
189 return ret;
190}
191EXPORT_SYMBOL_GPL(arche_platform_change_state);
192
193/* Requires arche_pdata->wake_lock is held by calling context */
194static void arche_platform_set_wake_detect_state(
195 struct arche_platform_drvdata *arche_pdata,
196 enum svc_wakedetect_state state)
197{
198 arche_pdata->wake_detect_state = state;
199}
200
201static inline void svc_reset_onoff(unsigned int gpio, bool onoff)
202{
203 gpio_set_value(gpio, onoff);
204}
205
206static int apb_cold_boot(struct device *dev, void *data)
207{
208 int ret;
209
210 ret = apb_ctrl_coldboot(dev);
211 if (ret)
212 dev_warn(dev, "failed to coldboot\n");
213
214 /*Child nodes are independent, so do not exit coldboot operation */
215 return 0;
216}
217
218static int apb_poweroff(struct device *dev, void *data)
219{
220 apb_ctrl_poweroff(dev);
221
222 /* Enable HUB3613 into HUB mode. */
223 if (usb3613_hub_mode_ctrl(false))
224 dev_warn(dev, "failed to control hub device\n");
225
226 return 0;
227}
228
229static void arche_platform_wd_irq_en(struct arche_platform_drvdata *arche_pdata)
230{
231 /* Enable interrupt here, to read event back from SVC */
232 gpio_direction_input(arche_pdata->wake_detect_gpio);
233 enable_irq(arche_pdata->wake_detect_irq);
234}
235
236static irqreturn_t arche_platform_wd_irq_thread(int irq, void *devid)
237{
238 struct arche_platform_drvdata *arche_pdata = devid;
239 unsigned long flags;
240
241 spin_lock_irqsave(&arche_pdata->wake_lock, flags);
242 if (arche_pdata->wake_detect_state != WD_STATE_COLDBOOT_TRIG) {
243 /* Something is wrong */
244 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
245 return IRQ_HANDLED;
246 }
247
248 arche_platform_set_wake_detect_state(arche_pdata,
249 WD_STATE_COLDBOOT_START);
250 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
251
252 /* It should complete power cycle, so first make sure it is poweroff */
253 device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
254
255 /* Bring APB out of reset: cold boot sequence */
256 device_for_each_child(arche_pdata->dev, NULL, apb_cold_boot);
257
258 /* Enable HUB3613 into HUB mode. */
259 if (usb3613_hub_mode_ctrl(true))
260 dev_warn(arche_pdata->dev, "failed to control hub device\n");
261
262 spin_lock_irqsave(&arche_pdata->wake_lock, flags);
263 arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
264 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
265
266 return IRQ_HANDLED;
267}
268
269static irqreturn_t arche_platform_wd_irq(int irq, void *devid)
270{
271 struct arche_platform_drvdata *arche_pdata = devid;
272 unsigned long flags;
273
274 spin_lock_irqsave(&arche_pdata->wake_lock, flags);
275
276 if (arche_pdata->wake_detect_state == WD_STATE_TIMESYNC) {
277 gb_timesync_irq(arche_pdata->timesync_svc_pdata);
278 goto exit;
279 }
280
281 if (gpio_get_value(arche_pdata->wake_detect_gpio)) {
282 /* wake/detect rising */
283
284 /*
285 * If wake/detect line goes high after low, within less than
286 * 30msec, then standby boot sequence is initiated, which is not
287 * supported/implemented as of now. So ignore it.
288 */
289 if (arche_pdata->wake_detect_state == WD_STATE_BOOT_INIT) {
290 if (time_before(jiffies,
291 arche_pdata->wake_detect_start +
292 msecs_to_jiffies(WD_COLDBOOT_PULSE_WIDTH_MS))) {
293 arche_platform_set_wake_detect_state(arche_pdata,
294 WD_STATE_IDLE);
295 } else {
296 /* Check we are not in middle of irq thread already */
297 if (arche_pdata->wake_detect_state !=
298 WD_STATE_COLDBOOT_START) {
299 arche_platform_set_wake_detect_state(arche_pdata,
300 WD_STATE_COLDBOOT_TRIG);
301 spin_unlock_irqrestore(
302 &arche_pdata->wake_lock,
303 flags);
304 return IRQ_WAKE_THREAD;
305 }
306 }
307 }
308 } else {
309 /* wake/detect falling */
310 if (arche_pdata->wake_detect_state == WD_STATE_IDLE) {
311 arche_pdata->wake_detect_start = jiffies;
312 /*
313 * In the begining, when wake/detect goes low (first time), we assume
314 * it is meant for coldboot and set the flag. If wake/detect line stays low
315 * beyond 30msec, then it is coldboot else fallback to standby boot.
316 */
317 arche_platform_set_wake_detect_state(arche_pdata,
318 WD_STATE_BOOT_INIT);
319 }
320 }
321
322exit:
323 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
324
325 return IRQ_HANDLED;
326}
327
328/*
329 * Requires arche_pdata->platform_state_mutex to be held
330 */
331static int arche_platform_coldboot_seq(struct arche_platform_drvdata *arche_pdata)
332{
333 int ret;
334
335 if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
336 return 0;
337
338 dev_info(arche_pdata->dev, "Booting from cold boot state\n");
339
340 svc_reset_onoff(arche_pdata->svc_reset_gpio,
341 arche_pdata->is_reset_act_hi);
342
343 gpio_set_value(arche_pdata->svc_sysboot_gpio, 0);
344 usleep_range(100, 200);
345
346 ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
347 if (ret) {
348 dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
349 ret);
350 return ret;
351 }
352
353 /* bring SVC out of reset */
354 svc_reset_onoff(arche_pdata->svc_reset_gpio,
355 !arche_pdata->is_reset_act_hi);
356
357 arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_ACTIVE);
358
359 return 0;
360}
361
362/*
363 * Requires arche_pdata->platform_state_mutex to be held
364 */
365static int arche_platform_fw_flashing_seq(struct arche_platform_drvdata *arche_pdata)
366{
367 int ret;
368
369 if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
370 return 0;
371
372 dev_info(arche_pdata->dev, "Switching to FW flashing state\n");
373
374 svc_reset_onoff(arche_pdata->svc_reset_gpio,
375 arche_pdata->is_reset_act_hi);
376
377 gpio_set_value(arche_pdata->svc_sysboot_gpio, 1);
378
379 usleep_range(100, 200);
380
381 ret = clk_prepare_enable(arche_pdata->svc_ref_clk);
382 if (ret) {
383 dev_err(arche_pdata->dev, "failed to enable svc_ref_clk: %d\n",
384 ret);
385 return ret;
386 }
387
388 svc_reset_onoff(arche_pdata->svc_reset_gpio,
389 !arche_pdata->is_reset_act_hi);
390
391 arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_FW_FLASHING);
392
393 return 0;
394}
395
396/*
397 * Requires arche_pdata->platform_state_mutex to be held
398 */
399static void arche_platform_poweroff_seq(struct arche_platform_drvdata *arche_pdata)
400{
401 unsigned long flags;
402
403 if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
404 return;
405
406 /* If in fw_flashing mode, then no need to repeate things again */
407 if (arche_pdata->state != ARCHE_PLATFORM_STATE_FW_FLASHING) {
408 disable_irq(arche_pdata->wake_detect_irq);
409
410 spin_lock_irqsave(&arche_pdata->wake_lock, flags);
411 arche_platform_set_wake_detect_state(arche_pdata,
412 WD_STATE_IDLE);
413 spin_unlock_irqrestore(&arche_pdata->wake_lock, flags);
414 }
415
416 clk_disable_unprepare(arche_pdata->svc_ref_clk);
417
418 /* As part of exit, put APB back in reset state */
419 svc_reset_onoff(arche_pdata->svc_reset_gpio,
420 arche_pdata->is_reset_act_hi);
421
422 arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
423}
424
425static ssize_t state_store(struct device *dev,
426 struct device_attribute *attr, const char *buf, size_t count)
427{
428 struct platform_device *pdev = to_platform_device(dev);
429 struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
430 int ret = 0;
431
432retry:
433 mutex_lock(&arche_pdata->platform_state_mutex);
434 if (arche_pdata->state == ARCHE_PLATFORM_STATE_TIME_SYNC) {
435 mutex_unlock(&arche_pdata->platform_state_mutex);
436 ret = wait_event_interruptible(
437 arche_pdata->wq,
438 arche_pdata->state != ARCHE_PLATFORM_STATE_TIME_SYNC);
439 if (ret)
440 return ret;
441 goto retry;
442 }
443
444 if (sysfs_streq(buf, "off")) {
445 if (arche_pdata->state == ARCHE_PLATFORM_STATE_OFF)
446 goto exit;
447
448 /* If SVC goes down, bring down APB's as well */
449 device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
450
451 arche_platform_poweroff_seq(arche_pdata);
452
453 } else if (sysfs_streq(buf, "active")) {
454 if (arche_pdata->state == ARCHE_PLATFORM_STATE_ACTIVE)
455 goto exit;
456
457 /* First we want to make sure we power off everything
458 * and then activate back again */
459 device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
460 arche_platform_poweroff_seq(arche_pdata);
461
462 arche_platform_wd_irq_en(arche_pdata);
463 ret = arche_platform_coldboot_seq(arche_pdata);
464 if (ret)
465 goto exit;
466
467 } else if (sysfs_streq(buf, "standby")) {
468 if (arche_pdata->state == ARCHE_PLATFORM_STATE_STANDBY)
469 goto exit;
470
471 dev_warn(arche_pdata->dev, "standby state not supported\n");
472 } else if (sysfs_streq(buf, "fw_flashing")) {
473 if (arche_pdata->state == ARCHE_PLATFORM_STATE_FW_FLASHING)
474 goto exit;
475
476 /*
477 * Here we only control SVC.
478 *
479 * In case of FW_FLASHING mode we do not want to control
480 * APBs, as in case of V2, SPI bus is shared between both
481 * the APBs. So let user chose which APB he wants to flash.
482 */
483 arche_platform_poweroff_seq(arche_pdata);
484
485 ret = arche_platform_fw_flashing_seq(arche_pdata);
486 if (ret)
487 goto exit;
488 } else {
489 dev_err(arche_pdata->dev, "unknown state\n");
490 ret = -EINVAL;
491 }
492
493exit:
494 mutex_unlock(&arche_pdata->platform_state_mutex);
495 return ret ? ret : count;
496}
497
498static ssize_t state_show(struct device *dev,
499 struct device_attribute *attr, char *buf)
500{
501 struct arche_platform_drvdata *arche_pdata = dev_get_drvdata(dev);
502
503 switch (arche_pdata->state) {
504 case ARCHE_PLATFORM_STATE_OFF:
505 return sprintf(buf, "off\n");
506 case ARCHE_PLATFORM_STATE_ACTIVE:
507 return sprintf(buf, "active\n");
508 case ARCHE_PLATFORM_STATE_STANDBY:
509 return sprintf(buf, "standby\n");
510 case ARCHE_PLATFORM_STATE_FW_FLASHING:
511 return sprintf(buf, "fw_flashing\n");
512 case ARCHE_PLATFORM_STATE_TIME_SYNC:
513 return sprintf(buf, "time_sync\n");
514 default:
515 return sprintf(buf, "unknown state\n");
516 }
517}
518
519static DEVICE_ATTR_RW(state);
520
521static int arche_platform_pm_notifier(struct notifier_block *notifier,
522 unsigned long pm_event, void *unused)
523{
524 struct arche_platform_drvdata *arche_pdata =
525 container_of(notifier, struct arche_platform_drvdata,
526 pm_notifier);
527 int ret = NOTIFY_DONE;
528
529 mutex_lock(&arche_pdata->platform_state_mutex);
530 switch (pm_event) {
531 case PM_SUSPEND_PREPARE:
532 if (arche_pdata->state != ARCHE_PLATFORM_STATE_ACTIVE) {
533 ret = NOTIFY_STOP;
534 break;
535 }
536 device_for_each_child(arche_pdata->dev, NULL, apb_poweroff);
537 arche_platform_poweroff_seq(arche_pdata);
538 break;
539 case PM_POST_SUSPEND:
540 if (arche_pdata->state != ARCHE_PLATFORM_STATE_OFF)
541 break;
542
543 arche_platform_wd_irq_en(arche_pdata);
544 arche_platform_coldboot_seq(arche_pdata);
545 break;
546 default:
547 break;
548 }
549 mutex_unlock(&arche_pdata->platform_state_mutex);
550
551 return ret;
552}
553
554static int arche_platform_probe(struct platform_device *pdev)
555{
556 struct arche_platform_drvdata *arche_pdata;
557 struct device *dev = &pdev->dev;
558 struct device_node *np = dev->of_node;
559 int ret;
560
561 arche_pdata = devm_kzalloc(&pdev->dev, sizeof(*arche_pdata), GFP_KERNEL);
562 if (!arche_pdata)
563 return -ENOMEM;
564
565 /* setup svc reset gpio */
566 arche_pdata->is_reset_act_hi = of_property_read_bool(np,
567 "svc,reset-active-high");
568 arche_pdata->svc_reset_gpio = of_get_named_gpio(np, "svc,reset-gpio", 0);
569 if (arche_pdata->svc_reset_gpio < 0) {
570 dev_err(dev, "failed to get reset-gpio\n");
571 return arche_pdata->svc_reset_gpio;
572 }
573 ret = devm_gpio_request(dev, arche_pdata->svc_reset_gpio, "svc-reset");
574 if (ret) {
575 dev_err(dev, "failed to request svc-reset gpio:%d\n", ret);
576 return ret;
577 }
578 ret = gpio_direction_output(arche_pdata->svc_reset_gpio,
579 arche_pdata->is_reset_act_hi);
580 if (ret) {
581 dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
582 return ret;
583 }
584 arche_platform_set_state(arche_pdata, ARCHE_PLATFORM_STATE_OFF);
585
586 arche_pdata->svc_sysboot_gpio = of_get_named_gpio(np,
587 "svc,sysboot-gpio", 0);
588 if (arche_pdata->svc_sysboot_gpio < 0) {
589 dev_err(dev, "failed to get sysboot gpio\n");
590 return arche_pdata->svc_sysboot_gpio;
591 }
592 ret = devm_gpio_request(dev, arche_pdata->svc_sysboot_gpio, "sysboot0");
593 if (ret) {
594 dev_err(dev, "failed to request sysboot0 gpio:%d\n", ret);
595 return ret;
596 }
597 ret = gpio_direction_output(arche_pdata->svc_sysboot_gpio, 0);
598 if (ret) {
599 dev_err(dev, "failed to set svc-reset gpio dir:%d\n", ret);
600 return ret;
601 }
602
603 /* setup the clock request gpio first */
604 arche_pdata->svc_refclk_req = of_get_named_gpio(np,
605 "svc,refclk-req-gpio", 0);
606 if (arche_pdata->svc_refclk_req < 0) {
607 dev_err(dev, "failed to get svc clock-req gpio\n");
608 return arche_pdata->svc_refclk_req;
609 }
610 ret = devm_gpio_request(dev, arche_pdata->svc_refclk_req, "svc-clk-req");
611 if (ret) {
612 dev_err(dev, "failed to request svc-clk-req gpio: %d\n", ret);
613 return ret;
614 }
615 ret = gpio_direction_input(arche_pdata->svc_refclk_req);
616 if (ret) {
617 dev_err(dev, "failed to set svc-clk-req gpio dir :%d\n", ret);
618 return ret;
619 }
620
621 /* setup refclk2 to follow the pin */
622 arche_pdata->svc_ref_clk = devm_clk_get(dev, "svc_ref_clk");
623 if (IS_ERR(arche_pdata->svc_ref_clk)) {
624 ret = PTR_ERR(arche_pdata->svc_ref_clk);
625 dev_err(dev, "failed to get svc_ref_clk: %d\n", ret);
626 return ret;
627 }
628
629 platform_set_drvdata(pdev, arche_pdata);
630
631 arche_pdata->num_apbs = of_get_child_count(np);
632 dev_dbg(dev, "Number of APB's available - %d\n", arche_pdata->num_apbs);
633
634 arche_pdata->wake_detect_gpio = of_get_named_gpio(np, "svc,wake-detect-gpio", 0);
635 if (arche_pdata->wake_detect_gpio < 0) {
636 dev_err(dev, "failed to get wake detect gpio\n");
637 ret = arche_pdata->wake_detect_gpio;
638 return ret;
639 }
640
641 ret = devm_gpio_request(dev, arche_pdata->wake_detect_gpio, "wake detect");
642 if (ret) {
643 dev_err(dev, "Failed requesting wake_detect gpio %d\n",
644 arche_pdata->wake_detect_gpio);
645 return ret;
646 }
647
648 arche_platform_set_wake_detect_state(arche_pdata, WD_STATE_IDLE);
649
650 arche_pdata->dev = &pdev->dev;
651
652 spin_lock_init(&arche_pdata->wake_lock);
653 mutex_init(&arche_pdata->platform_state_mutex);
654 init_waitqueue_head(&arche_pdata->wq);
655 arche_pdata->wake_detect_irq =
656 gpio_to_irq(arche_pdata->wake_detect_gpio);
657
658 ret = devm_request_threaded_irq(dev, arche_pdata->wake_detect_irq,
659 arche_platform_wd_irq,
660 arche_platform_wd_irq_thread,
661 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
662 dev_name(dev), arche_pdata);
663 if (ret) {
664 dev_err(dev, "failed to request wake detect IRQ %d\n", ret);
665 return ret;
666 }
667 disable_irq(arche_pdata->wake_detect_irq);
668
669 ret = device_create_file(dev, &dev_attr_state);
670 if (ret) {
671 dev_err(dev, "failed to create state file in sysfs\n");
672 return ret;
673 }
674
675 ret = of_platform_populate(np, NULL, NULL, dev);
676 if (ret) {
677 dev_err(dev, "failed to populate child nodes %d\n", ret);
678 goto err_device_remove;
679 }
680
681 arche_pdata->pm_notifier.notifier_call = arche_platform_pm_notifier;
682 ret = register_pm_notifier(&arche_pdata->pm_notifier);
683
684 if (ret) {
685 dev_err(dev, "failed to register pm notifier %d\n", ret);
686 goto err_device_remove;
687 }
688
689 /* Register callback pointer */
690 arche_platform_change_state_cb = arche_platform_change_state;
691
692 /* Explicitly power off if requested */
693 if (!of_property_read_bool(pdev->dev.of_node, "arche,init-off")) {
694 mutex_lock(&arche_pdata->platform_state_mutex);
695 ret = arche_platform_coldboot_seq(arche_pdata);
696 if (ret) {
697 dev_err(dev, "Failed to cold boot svc %d\n", ret);
698 goto err_coldboot;
699 }
700 arche_platform_wd_irq_en(arche_pdata);
701 mutex_unlock(&arche_pdata->platform_state_mutex);
702 }
703
704 dev_info(dev, "Device registered successfully\n");
705 return 0;
706
707err_coldboot:
708 mutex_unlock(&arche_pdata->platform_state_mutex);
709err_device_remove:
710 device_remove_file(&pdev->dev, &dev_attr_state);
711 return ret;
712}
713
714static int arche_remove_child(struct device *dev, void *unused)
715{
716 struct platform_device *pdev = to_platform_device(dev);
717
718 platform_device_unregister(pdev);
719
720 return 0;
721}
722
723static int arche_platform_remove(struct platform_device *pdev)
724{
725 struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
726
727 unregister_pm_notifier(&arche_pdata->pm_notifier);
728 device_remove_file(&pdev->dev, &dev_attr_state);
729 device_for_each_child(&pdev->dev, NULL, arche_remove_child);
730 arche_platform_poweroff_seq(arche_pdata);
731 platform_set_drvdata(pdev, NULL);
732
733 if (usb3613_hub_mode_ctrl(false))
734 dev_warn(arche_pdata->dev, "failed to control hub device\n");
735 /* TODO: Should we do anything more here ?? */
736 return 0;
737}
738
739static int arche_platform_suspend(struct device *dev)
740{
741 /*
742 * If timing profile premits, we may shutdown bridge
743 * completely
744 *
745 * TODO: sequence ??
746 *
747 * Also, need to make sure we meet precondition for unipro suspend
748 * Precondition: Definition ???
749 */
750 return 0;
751}
752
753static int arche_platform_resume(struct device *dev)
754{
755 /*
756 * Atleast for ES2 we have to meet the delay requirement between
757 * unipro switch and AP bridge init, depending on whether bridge is in
758 * OFF state or standby state.
759 *
760 * Based on whether bridge is in standby or OFF state we may have to
761 * assert multiple signals. Please refer to WDM spec, for more info.
762 *
763 */
764 return 0;
765}
766
767static void arche_platform_shutdown(struct platform_device *pdev)
768{
769 struct arche_platform_drvdata *arche_pdata = platform_get_drvdata(pdev);
770
771 arche_platform_poweroff_seq(arche_pdata);
772
773 usb3613_hub_mode_ctrl(false);
774}
775
776static SIMPLE_DEV_PM_OPS(arche_platform_pm_ops,
777 arche_platform_suspend,
778 arche_platform_resume);
779
780static struct of_device_id arche_platform_of_match[] = {
781 { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
782 { },
783};
784
785static struct of_device_id arche_combined_id[] = {
786 { .compatible = "google,arche-platform", }, /* Use PID/VID of SVC device */
787 { .compatible = "usbffff,2", },
788 { },
789};
790MODULE_DEVICE_TABLE(of, arche_combined_id);
791
792static struct platform_driver arche_platform_device_driver = {
793 .probe = arche_platform_probe,
794 .remove = arche_platform_remove,
795 .shutdown = arche_platform_shutdown,
796 .driver = {
797 .name = "arche-platform-ctrl",
798 .pm = &arche_platform_pm_ops,
799 .of_match_table = arche_platform_of_match,
800 }
801};
802
803static int __init arche_init(void)
804{
805 int retval;
806
807 retval = platform_driver_register(&arche_platform_device_driver);
808 if (retval)
809 return retval;
810
811 retval = arche_apb_init();
812 if (retval)
813 platform_driver_unregister(&arche_platform_device_driver);
814
815 return retval;
816}
817module_init(arche_init);
818
819static void __exit arche_exit(void)
820{
821 arche_apb_exit();
822 platform_driver_unregister(&arche_platform_device_driver);
823}
824module_exit(arche_exit);
825
826MODULE_LICENSE("GPL v2");
827MODULE_AUTHOR("Vaibhav Hiremath <vaibhav.hiremath@linaro.org>");
828MODULE_DESCRIPTION("Arche Platform Driver");
diff --git a/drivers/staging/greybus/arche_platform.h b/drivers/staging/greybus/arche_platform.h
new file mode 100644
index 000000000000..bd12345b82a2
--- /dev/null
+++ b/drivers/staging/greybus/arche_platform.h
@@ -0,0 +1,39 @@
1/*
2 * Arche Platform driver to enable Unipro link.
3 *
4 * Copyright 2015-2016 Google Inc.
5 * Copyright 2015-2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __ARCHE_PLATFORM_H
11#define __ARCHE_PLATFORM_H
12
13#include "timesync.h"
14
15enum arche_platform_state {
16 ARCHE_PLATFORM_STATE_OFF,
17 ARCHE_PLATFORM_STATE_ACTIVE,
18 ARCHE_PLATFORM_STATE_STANDBY,
19 ARCHE_PLATFORM_STATE_FW_FLASHING,
20 ARCHE_PLATFORM_STATE_TIME_SYNC,
21};
22
23int arche_platform_change_state(enum arche_platform_state state,
24 struct gb_timesync_svc *pdata);
25
26extern int (*arche_platform_change_state_cb)(enum arche_platform_state state,
27 struct gb_timesync_svc *pdata);
28int __init arche_apb_init(void);
29void __exit arche_apb_exit(void);
30
31/* Operational states for the APB device */
32int apb_ctrl_coldboot(struct device *dev);
33int apb_ctrl_fw_flashing(struct device *dev);
34int apb_ctrl_standby_boot(struct device *dev);
35void apb_ctrl_poweroff(struct device *dev);
36void apb_bootret_assert(struct device *dev);
37void apb_bootret_deassert(struct device *dev);
38
39#endif /* __ARCHE_PLATFORM_H */
diff --git a/drivers/staging/greybus/arpc.h b/drivers/staging/greybus/arpc.h
new file mode 100644
index 000000000000..7fbddfc40d83
--- /dev/null
+++ b/drivers/staging/greybus/arpc.h
@@ -0,0 +1,109 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2016 Google Inc. All rights reserved.
8 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License version 2 for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2016 Google Inc. All rights reserved.
22 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
35 * its contributors may be used to endorse or promote products
36 * derived from this software without specific prior written
37 * permission.
38 *
39 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
40 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
41 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
42 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
43 * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
44 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
45 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
46 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
47 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
48 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
49 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 */
51
52#ifndef __ARPC_H
53#define __ARPC_H
54
55/* APBridgeA RPC (ARPC) */
56
57enum arpc_result {
58 ARPC_SUCCESS = 0x00,
59 ARPC_NO_MEMORY = 0x01,
60 ARPC_INVALID = 0x02,
61 ARPC_TIMEOUT = 0x03,
62 ARPC_UNKNOWN_ERROR = 0xff,
63};
64
65struct arpc_request_message {
66 __le16 id; /* RPC unique id */
67 __le16 size; /* Size in bytes of header + payload */
68 __u8 type; /* RPC type */
69 __u8 data[0]; /* ARPC data */
70} __packed;
71
72struct arpc_response_message {
73 __le16 id; /* RPC unique id */
74 __u8 result; /* Result of RPC */
75} __packed;
76
77
78/* ARPC requests */
79#define ARPC_TYPE_CPORT_CONNECTED 0x01
80#define ARPC_TYPE_CPORT_QUIESCE 0x02
81#define ARPC_TYPE_CPORT_CLEAR 0x03
82#define ARPC_TYPE_CPORT_FLUSH 0x04
83#define ARPC_TYPE_CPORT_SHUTDOWN 0x05
84
85struct arpc_cport_connected_req {
86 __le16 cport_id;
87} __packed;
88
89struct arpc_cport_quiesce_req {
90 __le16 cport_id;
91 __le16 peer_space;
92 __le16 timeout;
93} __packed;
94
95struct arpc_cport_clear_req {
96 __le16 cport_id;
97} __packed;
98
99struct arpc_cport_flush_req {
100 __le16 cport_id;
101} __packed;
102
103struct arpc_cport_shutdown_req {
104 __le16 cport_id;
105 __le16 timeout;
106 __u8 phase;
107} __packed;
108
109#endif /* __ARPC_H */
diff --git a/drivers/staging/greybus/audio_apbridgea.c b/drivers/staging/greybus/audio_apbridgea.c
new file mode 100644
index 000000000000..1b4252d5d255
--- /dev/null
+++ b/drivers/staging/greybus/audio_apbridgea.c
@@ -0,0 +1,207 @@
1/*
2 * Greybus Audio Device Class Protocol helpers
3 *
4 * Copyright 2015-2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include "greybus.h"
10#include "greybus_protocols.h"
11#include "audio_apbridgea.h"
12#include "audio_codec.h"
13
14int gb_audio_apbridgea_set_config(struct gb_connection *connection,
15 __u16 i2s_port, __u32 format, __u32 rate,
16 __u32 mclk_freq)
17{
18 struct audio_apbridgea_set_config_request req;
19
20 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG;
21 req.hdr.i2s_port = cpu_to_le16(i2s_port);
22 req.format = cpu_to_le32(format);
23 req.rate = cpu_to_le32(rate);
24 req.mclk_freq = cpu_to_le32(mclk_freq);
25
26 return gb_hd_output(connection->hd, &req, sizeof(req),
27 GB_APB_REQUEST_AUDIO_CONTROL, true);
28}
29EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_config);
30
31int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
32 __u16 i2s_port, __u16 cportid,
33 __u8 direction)
34{
35 struct audio_apbridgea_register_cport_request req;
36 int ret;
37
38 req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT;
39 req.hdr.i2s_port = cpu_to_le16(i2s_port);
40 req.cport = cpu_to_le16(cportid);
41 req.direction = direction;
42
43 ret = gb_pm_runtime_get_sync(connection->bundle);
44 if (ret)
45 return ret;
46
47 return gb_hd_output(connection->hd, &req, sizeof(req),
48 GB_APB_REQUEST_AUDIO_CONTROL, true);
49}
50EXPORT_SYMBOL_GPL(gb_audio_apbridgea_register_cport);
51
52int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
53 __u16 i2s_port, __u16 cportid,
54 __u8 direction)
55{
56 struct audio_apbridgea_unregister_cport_request req;
57 int ret;
58
59 req.hdr.type = AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT;
60 req.hdr.i2s_port = cpu_to_le16(i2s_port);
61 req.cport = cpu_to_le16(cportid);
62 req.direction = direction;
63
64 ret = gb_hd_output(connection->hd, &req, sizeof(req),
65 GB_APB_REQUEST_AUDIO_CONTROL, true);
66
67 gb_pm_runtime_put_autosuspend(connection->bundle);
68
69 return ret;
70}
71EXPORT_SYMBOL_GPL(gb_audio_apbridgea_unregister_cport);
72
73int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
74 __u16 i2s_port, __u16 size)
75{
76 struct audio_apbridgea_set_tx_data_size_request req;
77
78 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE;
79 req.hdr.i2s_port = cpu_to_le16(i2s_port);
80 req.size = cpu_to_le16(size);
81
82 return gb_hd_output(connection->hd, &req, sizeof(req),
83 GB_APB_REQUEST_AUDIO_CONTROL, true);
84}
85EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_tx_data_size);
86
87int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
88 __u16 i2s_port)
89{
90 struct audio_apbridgea_prepare_tx_request req;
91
92 req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_TX;
93 req.hdr.i2s_port = cpu_to_le16(i2s_port);
94
95 return gb_hd_output(connection->hd, &req, sizeof(req),
96 GB_APB_REQUEST_AUDIO_CONTROL, true);
97}
98EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_tx);
99
100int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
101 __u16 i2s_port, __u64 timestamp)
102{
103 struct audio_apbridgea_start_tx_request req;
104
105 req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_TX;
106 req.hdr.i2s_port = cpu_to_le16(i2s_port);
107 req.timestamp = cpu_to_le64(timestamp);
108
109 return gb_hd_output(connection->hd, &req, sizeof(req),
110 GB_APB_REQUEST_AUDIO_CONTROL, true);
111}
112EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_tx);
113
114int gb_audio_apbridgea_stop_tx(struct gb_connection *connection, __u16 i2s_port)
115{
116 struct audio_apbridgea_stop_tx_request req;
117
118 req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_TX;
119 req.hdr.i2s_port = cpu_to_le16(i2s_port);
120
121 return gb_hd_output(connection->hd, &req, sizeof(req),
122 GB_APB_REQUEST_AUDIO_CONTROL, true);
123}
124EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_tx);
125
126int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
127 __u16 i2s_port)
128{
129 struct audio_apbridgea_shutdown_tx_request req;
130
131 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX;
132 req.hdr.i2s_port = cpu_to_le16(i2s_port);
133
134 return gb_hd_output(connection->hd, &req, sizeof(req),
135 GB_APB_REQUEST_AUDIO_CONTROL, true);
136}
137EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_tx);
138
139int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
140 __u16 i2s_port, __u16 size)
141{
142 struct audio_apbridgea_set_rx_data_size_request req;
143
144 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE;
145 req.hdr.i2s_port = cpu_to_le16(i2s_port);
146 req.size = cpu_to_le16(size);
147
148 return gb_hd_output(connection->hd, &req, sizeof(req),
149 GB_APB_REQUEST_AUDIO_CONTROL, true);
150}
151EXPORT_SYMBOL_GPL(gb_audio_apbridgea_set_rx_data_size);
152
153int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
154 __u16 i2s_port)
155{
156 struct audio_apbridgea_prepare_rx_request req;
157
158 req.hdr.type = AUDIO_APBRIDGEA_TYPE_PREPARE_RX;
159 req.hdr.i2s_port = cpu_to_le16(i2s_port);
160
161 return gb_hd_output(connection->hd, &req, sizeof(req),
162 GB_APB_REQUEST_AUDIO_CONTROL, true);
163}
164EXPORT_SYMBOL_GPL(gb_audio_apbridgea_prepare_rx);
165
166int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
167 __u16 i2s_port)
168{
169 struct audio_apbridgea_start_rx_request req;
170
171 req.hdr.type = AUDIO_APBRIDGEA_TYPE_START_RX;
172 req.hdr.i2s_port = cpu_to_le16(i2s_port);
173
174 return gb_hd_output(connection->hd, &req, sizeof(req),
175 GB_APB_REQUEST_AUDIO_CONTROL, true);
176}
177EXPORT_SYMBOL_GPL(gb_audio_apbridgea_start_rx);
178
179int gb_audio_apbridgea_stop_rx(struct gb_connection *connection, __u16 i2s_port)
180{
181 struct audio_apbridgea_stop_rx_request req;
182
183 req.hdr.type = AUDIO_APBRIDGEA_TYPE_STOP_RX;
184 req.hdr.i2s_port = cpu_to_le16(i2s_port);
185
186 return gb_hd_output(connection->hd, &req, sizeof(req),
187 GB_APB_REQUEST_AUDIO_CONTROL, true);
188}
189EXPORT_SYMBOL_GPL(gb_audio_apbridgea_stop_rx);
190
191int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
192 __u16 i2s_port)
193{
194 struct audio_apbridgea_shutdown_rx_request req;
195
196 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX;
197 req.hdr.i2s_port = cpu_to_le16(i2s_port);
198
199 return gb_hd_output(connection->hd, &req, sizeof(req),
200 GB_APB_REQUEST_AUDIO_CONTROL, true);
201}
202EXPORT_SYMBOL_GPL(gb_audio_apbridgea_shutdown_rx);
203
204MODULE_LICENSE("GPL v2");
205MODULE_ALIAS("greybus:audio-apbridgea");
206MODULE_DESCRIPTION("Greybus Special APBridgeA Audio Protocol library");
207MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
new file mode 100644
index 000000000000..b94cb05c89e4
--- /dev/null
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -0,0 +1,156 @@
1/**
2 * Copyright (c) 2015-2016 Google Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 * 1. Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * 3. Neither the name of the copyright holder nor the names of its
13 * contributors may be used to endorse or promote products derived from this
14 * software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
18 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
20 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
25 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
26 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28/*
29 * This is a special protocol for configuring communication over the
30 * I2S bus between the DSP on the MSM8994 and APBridgeA. Therefore,
31 * we can predefine several low-level attributes of the communication
32 * because we know that they are supported. In particular, the following
33 * assumptions are made:
34 * - there are two channels (i.e., stereo)
35 * - the low-level protocol is I2S as defined by Philips/NXP
36 * - the DSP on the MSM8994 is the clock master for MCLK, BCLK, and WCLK
37 * - WCLK changes on the falling edge of BCLK
38 * - WCLK low for left channel; high for right channel
39 * - TX data is sent on the falling edge of BCLK
40 * - RX data is received/latched on the rising edge of BCLK
41 */
42
43#ifndef __AUDIO_APBRIDGEA_H
44#define __AUDIO_APBRIDGEA_H
45
46#define AUDIO_APBRIDGEA_TYPE_SET_CONFIG 0x01
47#define AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT 0x02
48#define AUDIO_APBRIDGEA_TYPE_UNREGISTER_CPORT 0x03
49#define AUDIO_APBRIDGEA_TYPE_SET_TX_DATA_SIZE 0x04
50 /* 0x05 unused */
51#define AUDIO_APBRIDGEA_TYPE_PREPARE_TX 0x06
52#define AUDIO_APBRIDGEA_TYPE_START_TX 0x07
53#define AUDIO_APBRIDGEA_TYPE_STOP_TX 0x08
54#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_TX 0x09
55#define AUDIO_APBRIDGEA_TYPE_SET_RX_DATA_SIZE 0x0a
56 /* 0x0b unused */
57#define AUDIO_APBRIDGEA_TYPE_PREPARE_RX 0x0c
58#define AUDIO_APBRIDGEA_TYPE_START_RX 0x0d
59#define AUDIO_APBRIDGEA_TYPE_STOP_RX 0x0e
60#define AUDIO_APBRIDGEA_TYPE_SHUTDOWN_RX 0x0f
61
62#define AUDIO_APBRIDGEA_PCM_FMT_8 BIT(0)
63#define AUDIO_APBRIDGEA_PCM_FMT_16 BIT(1)
64#define AUDIO_APBRIDGEA_PCM_FMT_24 BIT(2)
65#define AUDIO_APBRIDGEA_PCM_FMT_32 BIT(3)
66#define AUDIO_APBRIDGEA_PCM_FMT_64 BIT(4)
67
68#define AUDIO_APBRIDGEA_PCM_RATE_5512 BIT(0)
69#define AUDIO_APBRIDGEA_PCM_RATE_8000 BIT(1)
70#define AUDIO_APBRIDGEA_PCM_RATE_11025 BIT(2)
71#define AUDIO_APBRIDGEA_PCM_RATE_16000 BIT(3)
72#define AUDIO_APBRIDGEA_PCM_RATE_22050 BIT(4)
73#define AUDIO_APBRIDGEA_PCM_RATE_32000 BIT(5)
74#define AUDIO_APBRIDGEA_PCM_RATE_44100 BIT(6)
75#define AUDIO_APBRIDGEA_PCM_RATE_48000 BIT(7)
76#define AUDIO_APBRIDGEA_PCM_RATE_64000 BIT(8)
77#define AUDIO_APBRIDGEA_PCM_RATE_88200 BIT(9)
78#define AUDIO_APBRIDGEA_PCM_RATE_96000 BIT(10)
79#define AUDIO_APBRIDGEA_PCM_RATE_176400 BIT(11)
80#define AUDIO_APBRIDGEA_PCM_RATE_192000 BIT(12)
81
82#define AUDIO_APBRIDGEA_DIRECTION_TX BIT(0)
83#define AUDIO_APBRIDGEA_DIRECTION_RX BIT(1)
84
85/* The I2S port is passed in the 'index' parameter of the USB request */
86/* The CPort is passed in the 'value' parameter of the USB request */
87
88struct audio_apbridgea_hdr {
89 __u8 type;
90 __le16 i2s_port;
91 __u8 data[0];
92} __packed;
93
94struct audio_apbridgea_set_config_request {
95 struct audio_apbridgea_hdr hdr;
96 __le32 format; /* AUDIO_APBRIDGEA_PCM_FMT_* */
97 __le32 rate; /* AUDIO_APBRIDGEA_PCM_RATE_* */
98 __le32 mclk_freq; /* XXX Remove? */
99} __packed;
100
101struct audio_apbridgea_register_cport_request {
102 struct audio_apbridgea_hdr hdr;
103 __le16 cport;
104 __u8 direction;
105} __packed;
106
107struct audio_apbridgea_unregister_cport_request {
108 struct audio_apbridgea_hdr hdr;
109 __le16 cport;
110 __u8 direction;
111} __packed;
112
113struct audio_apbridgea_set_tx_data_size_request {
114 struct audio_apbridgea_hdr hdr;
115 __le16 size;
116} __packed;
117
118struct audio_apbridgea_prepare_tx_request {
119 struct audio_apbridgea_hdr hdr;
120} __packed;
121
122struct audio_apbridgea_start_tx_request {
123 struct audio_apbridgea_hdr hdr;
124 __le64 timestamp;
125} __packed;
126
127struct audio_apbridgea_stop_tx_request {
128 struct audio_apbridgea_hdr hdr;
129} __packed;
130
131struct audio_apbridgea_shutdown_tx_request {
132 struct audio_apbridgea_hdr hdr;
133} __packed;
134
135struct audio_apbridgea_set_rx_data_size_request {
136 struct audio_apbridgea_hdr hdr;
137 __le16 size;
138} __packed;
139
140struct audio_apbridgea_prepare_rx_request {
141 struct audio_apbridgea_hdr hdr;
142} __packed;
143
144struct audio_apbridgea_start_rx_request {
145 struct audio_apbridgea_hdr hdr;
146} __packed;
147
148struct audio_apbridgea_stop_rx_request {
149 struct audio_apbridgea_hdr hdr;
150} __packed;
151
152struct audio_apbridgea_shutdown_rx_request {
153 struct audio_apbridgea_hdr hdr;
154} __packed;
155
156#endif /*__AUDIO_APBRIDGEA_H */
diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
new file mode 100644
index 000000000000..2f70295e0094
--- /dev/null
+++ b/drivers/staging/greybus/audio_codec.c
@@ -0,0 +1,1132 @@
1/*
2 * APBridge ALSA SoC dummy codec driver
3 * Copyright 2016 Google Inc.
4 * Copyright 2016 Linaro Ltd.
5 *
6 * Released under the GPLv2 only.
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/pm_runtime.h>
11#include <sound/soc.h>
12#include <sound/pcm_params.h>
13#include <uapi/linux/input.h>
14
15#include "audio_codec.h"
16#include "audio_apbridgea.h"
17#include "audio_manager.h"
18
19static struct gbaudio_codec_info *gbcodec;
20
21static struct gbaudio_data_connection *
22find_data(struct gbaudio_module_info *module, int id)
23{
24 struct gbaudio_data_connection *data;
25
26 list_for_each_entry(data, &module->data_list, list) {
27 if (id == data->id)
28 return data;
29 }
30 return NULL;
31}
32
33static struct gbaudio_stream_params *
34find_dai_stream_params(struct gbaudio_codec_info *codec, int id, int stream)
35{
36 struct gbaudio_codec_dai *dai;
37
38 list_for_each_entry(dai, &codec->dai_list, list) {
39 if (dai->id == id)
40 return &dai->params[stream];
41 }
42 return NULL;
43}
44
45static int gbaudio_module_enable_tx(struct gbaudio_codec_info *codec,
46 struct gbaudio_module_info *module, int id)
47{
48 int module_state, ret = 0;
49 uint16_t data_cport, i2s_port, cportid;
50 uint8_t sig_bits, channels;
51 uint32_t format, rate;
52 struct gbaudio_data_connection *data;
53 struct gbaudio_stream_params *params;
54
55 /* find the dai */
56 data = find_data(module, id);
57 if (!data) {
58 dev_err(module->dev, "%d:DATA connection missing\n", id);
59 return -ENODEV;
60 }
61 module_state = data->state[SNDRV_PCM_STREAM_PLAYBACK];
62
63 params = find_dai_stream_params(codec, id, SNDRV_PCM_STREAM_PLAYBACK);
64 if (!params) {
65 dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
66 return -EINVAL;
67 }
68
69 /* register cport */
70 if (module_state < GBAUDIO_CODEC_STARTUP) {
71 i2s_port = 0; /* fixed for now */
72 cportid = data->connection->hd_cport_id;
73 ret = gb_audio_apbridgea_register_cport(data->connection,
74 i2s_port, cportid,
75 AUDIO_APBRIDGEA_DIRECTION_TX);
76 if (ret) {
77 dev_err_ratelimited(module->dev,
78 "reg_cport failed:%d\n", ret);
79 return ret;
80 }
81 data->state[SNDRV_PCM_STREAM_PLAYBACK] =
82 GBAUDIO_CODEC_STARTUP;
83 dev_dbg(module->dev, "Dynamic Register %d DAI\n", cportid);
84 }
85
86 /* hw_params */
87 if (module_state < GBAUDIO_CODEC_HWPARAMS) {
88 format = params->format;
89 channels = params->channels;
90 rate = params->rate;
91 sig_bits = params->sig_bits;
92 data_cport = data->connection->intf_cport_id;
93 ret = gb_audio_gb_set_pcm(module->mgmt_connection, data_cport,
94 format, rate, channels, sig_bits);
95 if (ret) {
96 dev_err_ratelimited(module->dev, "set_pcm failed:%d\n",
97 ret);
98 return ret;
99 }
100 data->state[SNDRV_PCM_STREAM_PLAYBACK] =
101 GBAUDIO_CODEC_HWPARAMS;
102 dev_dbg(module->dev, "Dynamic hw_params %d DAI\n", data_cport);
103 }
104
105 /* prepare */
106 if (module_state < GBAUDIO_CODEC_PREPARE) {
107 data_cport = data->connection->intf_cport_id;
108 ret = gb_audio_gb_set_tx_data_size(module->mgmt_connection,
109 data_cport, 192);
110 if (ret) {
111 dev_err_ratelimited(module->dev,
112 "set_tx_data_size failed:%d\n",
113 ret);
114 return ret;
115 }
116 ret = gb_audio_gb_activate_tx(module->mgmt_connection,
117 data_cport);
118 if (ret) {
119 dev_err_ratelimited(module->dev,
120 "activate_tx failed:%d\n", ret);
121 return ret;
122 }
123 data->state[SNDRV_PCM_STREAM_PLAYBACK] =
124 GBAUDIO_CODEC_PREPARE;
125 dev_dbg(module->dev, "Dynamic prepare %d DAI\n", data_cport);
126 }
127
128 return 0;
129}
130
131static int gbaudio_module_disable_tx(struct gbaudio_module_info *module, int id)
132{
133 int ret;
134 uint16_t data_cport, cportid, i2s_port;
135 int module_state;
136 struct gbaudio_data_connection *data;
137
138 /* find the dai */
139 data = find_data(module, id);
140 if (!data) {
141 dev_err(module->dev, "%d:DATA connection missing\n", id);
142 return -ENODEV;
143 }
144 module_state = data->state[SNDRV_PCM_STREAM_PLAYBACK];
145
146 if (module_state > GBAUDIO_CODEC_HWPARAMS) {
147 data_cport = data->connection->intf_cport_id;
148 ret = gb_audio_gb_deactivate_tx(module->mgmt_connection,
149 data_cport);
150 if (ret) {
151 dev_err_ratelimited(module->dev,
152 "deactivate_tx failed:%d\n", ret);
153 return ret;
154 }
155 dev_dbg(module->dev, "Dynamic deactivate %d DAI\n", data_cport);
156 data->state[SNDRV_PCM_STREAM_PLAYBACK] =
157 GBAUDIO_CODEC_HWPARAMS;
158 }
159
160 if (module_state > GBAUDIO_CODEC_SHUTDOWN) {
161 i2s_port = 0; /* fixed for now */
162 cportid = data->connection->hd_cport_id;
163 ret = gb_audio_apbridgea_unregister_cport(data->connection,
164 i2s_port, cportid,
165 AUDIO_APBRIDGEA_DIRECTION_TX);
166 if (ret) {
167 dev_err_ratelimited(module->dev,
168 "unregister_cport failed:%d\n",
169 ret);
170 return ret;
171 }
172 dev_dbg(module->dev, "Dynamic Unregister %d DAI\n", cportid);
173 data->state[SNDRV_PCM_STREAM_PLAYBACK] =
174 GBAUDIO_CODEC_SHUTDOWN;
175 }
176
177 return 0;
178}
179
180static int gbaudio_module_enable_rx(struct gbaudio_codec_info *codec,
181 struct gbaudio_module_info *module, int id)
182{
183 int module_state, ret = 0;
184 uint16_t data_cport, i2s_port, cportid;
185 uint8_t sig_bits, channels;
186 uint32_t format, rate;
187 struct gbaudio_data_connection *data;
188 struct gbaudio_stream_params *params;
189
190 /* find the dai */
191 data = find_data(module, id);
192 if (!data) {
193 dev_err(module->dev, "%d:DATA connection missing\n", id);
194 return -ENODEV;
195 }
196 module_state = data->state[SNDRV_PCM_STREAM_CAPTURE];
197
198 params = find_dai_stream_params(codec, id, SNDRV_PCM_STREAM_CAPTURE);
199 if (!params) {
200 dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
201 return -EINVAL;
202 }
203
204 /* register cport */
205 if (module_state < GBAUDIO_CODEC_STARTUP) {
206 i2s_port = 0; /* fixed for now */
207 cportid = data->connection->hd_cport_id;
208 ret = gb_audio_apbridgea_register_cport(data->connection,
209 i2s_port, cportid,
210 AUDIO_APBRIDGEA_DIRECTION_RX);
211 if (ret) {
212 dev_err_ratelimited(module->dev,
213 "reg_cport failed:%d\n", ret);
214 return ret;
215 }
216 data->state[SNDRV_PCM_STREAM_CAPTURE] =
217 GBAUDIO_CODEC_STARTUP;
218 dev_dbg(module->dev, "Dynamic Register %d DAI\n", cportid);
219 }
220
221 /* hw_params */
222 if (module_state < GBAUDIO_CODEC_HWPARAMS) {
223 format = params->format;
224 channels = params->channels;
225 rate = params->rate;
226 sig_bits = params->sig_bits;
227 data_cport = data->connection->intf_cport_id;
228 ret = gb_audio_gb_set_pcm(module->mgmt_connection, data_cport,
229 format, rate, channels, sig_bits);
230 if (ret) {
231 dev_err_ratelimited(module->dev, "set_pcm failed:%d\n",
232 ret);
233 return ret;
234 }
235 data->state[SNDRV_PCM_STREAM_CAPTURE] =
236 GBAUDIO_CODEC_HWPARAMS;
237 dev_dbg(module->dev, "Dynamic hw_params %d DAI\n", data_cport);
238 }
239
240 /* prepare */
241 if (module_state < GBAUDIO_CODEC_PREPARE) {
242 data_cport = data->connection->intf_cport_id;
243 ret = gb_audio_gb_set_rx_data_size(module->mgmt_connection,
244 data_cport, 192);
245 if (ret) {
246 dev_err_ratelimited(module->dev,
247 "set_rx_data_size failed:%d\n",
248 ret);
249 return ret;
250 }
251 ret = gb_audio_gb_activate_rx(module->mgmt_connection,
252 data_cport);
253 if (ret) {
254 dev_err_ratelimited(module->dev,
255 "activate_rx failed:%d\n", ret);
256 return ret;
257 }
258 data->state[SNDRV_PCM_STREAM_CAPTURE] =
259 GBAUDIO_CODEC_PREPARE;
260 dev_dbg(module->dev, "Dynamic prepare %d DAI\n", data_cport);
261 }
262
263 return 0;
264}
265
266static int gbaudio_module_disable_rx(struct gbaudio_module_info *module, int id)
267{
268 int ret;
269 uint16_t data_cport, cportid, i2s_port;
270 int module_state;
271 struct gbaudio_data_connection *data;
272
273 /* find the dai */
274 data = find_data(module, id);
275 if (!data) {
276 dev_err(module->dev, "%d:DATA connection missing\n", id);
277 return -ENODEV;
278 }
279 module_state = data->state[SNDRV_PCM_STREAM_CAPTURE];
280
281 if (module_state > GBAUDIO_CODEC_HWPARAMS) {
282 data_cport = data->connection->intf_cport_id;
283 ret = gb_audio_gb_deactivate_rx(module->mgmt_connection,
284 data_cport);
285 if (ret) {
286 dev_err_ratelimited(module->dev,
287 "deactivate_rx failed:%d\n", ret);
288 return ret;
289 }
290 dev_dbg(module->dev, "Dynamic deactivate %d DAI\n", data_cport);
291 data->state[SNDRV_PCM_STREAM_CAPTURE] =
292 GBAUDIO_CODEC_HWPARAMS;
293 }
294
295 if (module_state > GBAUDIO_CODEC_SHUTDOWN) {
296 i2s_port = 0; /* fixed for now */
297 cportid = data->connection->hd_cport_id;
298 ret = gb_audio_apbridgea_unregister_cport(data->connection,
299 i2s_port, cportid,
300 AUDIO_APBRIDGEA_DIRECTION_RX);
301 if (ret) {
302 dev_err_ratelimited(module->dev,
303 "unregister_cport failed:%d\n",
304 ret);
305 return ret;
306 }
307 dev_dbg(module->dev, "Dynamic Unregister %d DAI\n", cportid);
308 data->state[SNDRV_PCM_STREAM_CAPTURE] =
309 GBAUDIO_CODEC_SHUTDOWN;
310 }
311
312 return 0;
313}
314
315int gbaudio_module_update(struct gbaudio_codec_info *codec,
316 struct snd_soc_dapm_widget *w,
317 struct gbaudio_module_info *module, int enable)
318{
319 int dai_id, ret;
320 char intf_name[NAME_SIZE], dir[NAME_SIZE];
321
322 dev_dbg(module->dev, "%s:Module update %s sequence\n", w->name,
323 enable ? "Enable":"Disable");
324
325 if ((w->id != snd_soc_dapm_aif_in) && (w->id != snd_soc_dapm_aif_out)){
326 dev_dbg(codec->dev, "No action required for %s\n", w->name);
327 return 0;
328 }
329
330 /* parse dai_id from AIF widget's stream_name */
331 ret = sscanf(w->sname, "%s %d %s", intf_name, &dai_id, dir);
332 if (ret < 3) {
333 dev_err(codec->dev, "Error while parsing dai_id for %s\n",
334 w->name);
335 return -EINVAL;
336 }
337
338 mutex_lock(&codec->lock);
339 if (w->id == snd_soc_dapm_aif_in) {
340 if (enable)
341 ret = gbaudio_module_enable_tx(codec, module, dai_id);
342 else
343 ret = gbaudio_module_disable_tx(module, dai_id);
344 } else if (w->id == snd_soc_dapm_aif_out) {
345 if (enable)
346 ret = gbaudio_module_enable_rx(codec, module, dai_id);
347 else
348 ret = gbaudio_module_disable_rx(module, dai_id);
349 }
350
351 mutex_unlock(&codec->lock);
352
353 return ret;
354}
355EXPORT_SYMBOL(gbaudio_module_update);
356
357/*
358 * codec DAI ops
359 */
360static int gbcodec_startup(struct snd_pcm_substream *substream,
361 struct snd_soc_dai *dai)
362{
363 struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
364 struct gbaudio_stream_params *params;
365
366 mutex_lock(&codec->lock);
367
368 if (list_empty(&codec->module_list)) {
369 dev_err(codec->dev, "No codec module available\n");
370 mutex_unlock(&codec->lock);
371 return -ENODEV;
372 }
373
374 params = find_dai_stream_params(codec, dai->id, substream->stream);
375 if (!params) {
376 dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
377 mutex_unlock(&codec->lock);
378 return -EINVAL;
379 }
380 params->state = GBAUDIO_CODEC_STARTUP;
381 mutex_unlock(&codec->lock);
382 /* to prevent suspend in case of active audio */
383 pm_stay_awake(dai->dev);
384
385 return 0;
386}
387
388static void gbcodec_shutdown(struct snd_pcm_substream *substream,
389 struct snd_soc_dai *dai)
390{
391 struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
392 struct gbaudio_stream_params *params;
393
394 mutex_lock(&codec->lock);
395
396 if (list_empty(&codec->module_list))
397 dev_info(codec->dev, "No codec module available during shutdown\n");
398
399 params = find_dai_stream_params(codec, dai->id, substream->stream);
400 if (!params) {
401 dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
402 mutex_unlock(&codec->lock);
403 return;
404 }
405 params->state = GBAUDIO_CODEC_SHUTDOWN;
406 mutex_unlock(&codec->lock);
407 pm_relax(dai->dev);
408 return;
409}
410
411static int gbcodec_hw_params(struct snd_pcm_substream *substream,
412 struct snd_pcm_hw_params *hwparams,
413 struct snd_soc_dai *dai)
414{
415 int ret;
416 uint8_t sig_bits, channels;
417 uint32_t format, rate;
418 struct gbaudio_module_info *module;
419 struct gbaudio_data_connection *data;
420 struct gb_bundle *bundle;
421 struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
422 struct gbaudio_stream_params *params;
423
424 mutex_lock(&codec->lock);
425
426 if (list_empty(&codec->module_list)) {
427 dev_err(codec->dev, "No codec module available\n");
428 mutex_unlock(&codec->lock);
429 return -ENODEV;
430 }
431
432 /*
433 * assuming, currently only 48000 Hz, 16BIT_LE, stereo
434 * is supported, validate params before configuring codec
435 */
436 if (params_channels(hwparams) != 2) {
437 dev_err(dai->dev, "Invalid channel count:%d\n",
438 params_channels(hwparams));
439 mutex_unlock(&codec->lock);
440 return -EINVAL;
441 }
442 channels = params_channels(hwparams);
443
444 if (params_rate(hwparams) != 48000) {
445 dev_err(dai->dev, "Invalid sampling rate:%d\n",
446 params_rate(hwparams));
447 mutex_unlock(&codec->lock);
448 return -EINVAL;
449 }
450 rate = GB_AUDIO_PCM_RATE_48000;
451
452 if (params_format(hwparams) != SNDRV_PCM_FORMAT_S16_LE) {
453 dev_err(dai->dev, "Invalid format:%d\n",
454 params_format(hwparams));
455 mutex_unlock(&codec->lock);
456 return -EINVAL;
457 }
458 format = GB_AUDIO_PCM_FMT_S16_LE;
459
460 /* find the data connection */
461 list_for_each_entry(module, &codec->module_list, list) {
462 data = find_data(module, dai->id);
463 if (data)
464 break;
465 }
466
467 if (!data) {
468 dev_err(dai->dev, "DATA connection missing\n");
469 mutex_unlock(&codec->lock);
470 return -EINVAL;
471 }
472
473 params = find_dai_stream_params(codec, dai->id, substream->stream);
474 if (!params) {
475 dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
476 mutex_unlock(&codec->lock);
477 return -EINVAL;
478 }
479
480 bundle = to_gb_bundle(module->dev);
481 ret = gb_pm_runtime_get_sync(bundle);
482 if (ret) {
483 mutex_unlock(&codec->lock);
484 return ret;
485 }
486
487 ret = gb_audio_apbridgea_set_config(data->connection, 0,
488 AUDIO_APBRIDGEA_PCM_FMT_16,
489 AUDIO_APBRIDGEA_PCM_RATE_48000,
490 6144000);
491 if (ret) {
492 dev_err_ratelimited(dai->dev, "%d: Error during set_config\n",
493 ret);
494 mutex_unlock(&codec->lock);
495 return ret;
496 }
497
498 gb_pm_runtime_put_noidle(bundle);
499
500 params->state = GBAUDIO_CODEC_HWPARAMS;
501 params->format = format;
502 params->rate = rate;
503 params->channels = channels;
504 params->sig_bits = sig_bits;
505
506 mutex_unlock(&codec->lock);
507 return 0;
508}
509
510static int gbcodec_prepare(struct snd_pcm_substream *substream,
511 struct snd_soc_dai *dai)
512{
513 int ret;
514 struct gbaudio_module_info *module;
515 struct gbaudio_data_connection *data;
516 struct gb_bundle *bundle;
517 struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
518 struct gbaudio_stream_params *params;
519
520 mutex_lock(&codec->lock);
521
522 if (list_empty(&codec->module_list)) {
523 dev_err(codec->dev, "No codec module available\n");
524 mutex_unlock(&codec->lock);
525 return -ENODEV;
526 }
527
528 list_for_each_entry(module, &codec->module_list, list) {
529 /* find the dai */
530 data = find_data(module, dai->id);
531 if (data)
532 break;
533 }
534 if (!data) {
535 dev_err(dai->dev, "DATA connection missing\n");
536 mutex_unlock(&codec->lock);
537 return -ENODEV;
538 }
539
540 params = find_dai_stream_params(codec, dai->id, substream->stream);
541 if (!params) {
542 dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
543 mutex_unlock(&codec->lock);
544 return -EINVAL;
545 }
546
547 bundle = to_gb_bundle(module->dev);
548 ret = gb_pm_runtime_get_sync(bundle);
549 if (ret) {
550 mutex_unlock(&codec->lock);
551 return ret;
552 }
553
554 switch (substream->stream) {
555 case SNDRV_PCM_STREAM_PLAYBACK:
556 ret = gb_audio_apbridgea_set_tx_data_size(data->connection, 0,
557 192);
558 break;
559 case SNDRV_PCM_STREAM_CAPTURE:
560 ret = gb_audio_apbridgea_set_rx_data_size(data->connection, 0,
561 192);
562 break;
563 }
564 if (ret) {
565 mutex_unlock(&codec->lock);
566 dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n",
567 ret);
568 return ret;
569 }
570
571 gb_pm_runtime_put_noidle(bundle);
572
573 params->state = GBAUDIO_CODEC_PREPARE;
574 mutex_unlock(&codec->lock);
575 return 0;
576}
577
578static int gbcodec_mute_stream(struct snd_soc_dai *dai, int mute, int stream)
579{
580 int ret;
581 struct gbaudio_data_connection *data;
582 struct gbaudio_module_info *module;
583 struct gb_bundle *bundle;
584 struct gbaudio_codec_info *codec = dev_get_drvdata(dai->dev);
585 struct gbaudio_stream_params *params;
586
587
588 dev_dbg(dai->dev, "Mute:%d, Direction:%s\n", mute,
589 stream ? "CAPTURE":"PLAYBACK");
590
591 mutex_lock(&codec->lock);
592
593 params = find_dai_stream_params(codec, dai->id, stream);
594 if (!params) {
595 dev_err(codec->dev, "Failed to fetch dai_stream pointer\n");
596 mutex_unlock(&codec->lock);
597 return -EINVAL;
598 }
599
600 if (list_empty(&codec->module_list)) {
601 dev_err(codec->dev, "No codec module available\n");
602 if (mute) {
603 params->state = GBAUDIO_CODEC_STOP;
604 ret = 0;
605 } else {
606 ret = -ENODEV;
607 }
608 mutex_unlock(&codec->lock);
609 return ret;
610 }
611
612 list_for_each_entry(module, &codec->module_list, list) {
613 /* find the dai */
614 data = find_data(module, dai->id);
615 if (data)
616 break;
617 }
618 if (!data) {
619 dev_err(dai->dev, "%s:%s DATA connection missing\n",
620 dai->name, module->name);
621 mutex_unlock(&codec->lock);
622 return -ENODEV;
623 }
624
625 bundle = to_gb_bundle(module->dev);
626 ret = gb_pm_runtime_get_sync(bundle);
627 if (ret) {
628 mutex_unlock(&codec->lock);
629 return ret;
630 }
631
632 if (!mute && !stream) {/* start playback */
633 ret = gb_audio_apbridgea_prepare_tx(data->connection,
634 0);
635 if (!ret)
636 ret = gb_audio_apbridgea_start_tx(data->connection,
637 0, 0);
638 params->state = GBAUDIO_CODEC_START;
639 } else if (!mute && stream) {/* start capture */
640 ret = gb_audio_apbridgea_prepare_rx(data->connection,
641 0);
642 if (!ret)
643 ret = gb_audio_apbridgea_start_rx(data->connection,
644 0);
645 params->state = GBAUDIO_CODEC_START;
646 } else if (mute && !stream) {/* stop playback */
647 ret = gb_audio_apbridgea_stop_tx(data->connection, 0);
648 if (!ret)
649 ret = gb_audio_apbridgea_shutdown_tx(data->connection,
650 0);
651 params->state = GBAUDIO_CODEC_STOP;
652 } else if (mute && stream) {/* stop capture */
653 ret = gb_audio_apbridgea_stop_rx(data->connection, 0);
654 if (!ret)
655 ret = gb_audio_apbridgea_shutdown_rx(data->connection,
656 0);
657 params->state = GBAUDIO_CODEC_STOP;
658 } else
659 ret = -EINVAL;
660 if (ret)
661 dev_err_ratelimited(dai->dev,
662 "%s:Error during %s %s stream:%d\n",
663 module->name, mute ? "Mute" : "Unmute",
664 stream ? "Capture" : "Playback", ret);
665
666 gb_pm_runtime_put_noidle(bundle);
667 mutex_unlock(&codec->lock);
668 return ret;
669}
670
671static struct snd_soc_dai_ops gbcodec_dai_ops = {
672 .startup = gbcodec_startup,
673 .shutdown = gbcodec_shutdown,
674 .hw_params = gbcodec_hw_params,
675 .prepare = gbcodec_prepare,
676 .mute_stream = gbcodec_mute_stream,
677};
678
679static struct snd_soc_dai_driver gbaudio_dai[] = {
680 {
681 .name = "apb-i2s0",
682 .id = 0,
683 .playback = {
684 .stream_name = "I2S 0 Playback",
685 .rates = SNDRV_PCM_RATE_48000,
686 .formats = SNDRV_PCM_FORMAT_S16_LE,
687 .rate_max = 48000,
688 .rate_min = 48000,
689 .channels_min = 1,
690 .channels_max = 2,
691 },
692 .capture = {
693 .stream_name = "I2S 0 Capture",
694 .rates = SNDRV_PCM_RATE_48000,
695 .formats = SNDRV_PCM_FORMAT_S16_LE,
696 .rate_max = 48000,
697 .rate_min = 48000,
698 .channels_min = 1,
699 .channels_max = 2,
700 },
701 .ops = &gbcodec_dai_ops,
702 },
703};
704
705static int gbaudio_init_jack(struct gbaudio_module_info *module,
706 struct snd_soc_codec *codec)
707{
708 int ret;
709
710 if (!module->jack_mask)
711 return 0;
712
713 snprintf(module->jack_name, NAME_SIZE, "GB %d Headset Jack",
714 module->dev_id);
715 ret = snd_soc_jack_new(codec, module->jack_name, module->jack_mask,
716 &module->headset_jack);
717 if (ret) {
718 dev_err(module->dev, "Failed to create new jack\n");
719 return ret;
720 }
721
722 if (!module->button_mask)
723 return 0;
724
725 snprintf(module->button_name, NAME_SIZE, "GB %d Button Jack",
726 module->dev_id);
727 ret = snd_soc_jack_new(codec, module->button_name, module->button_mask,
728 &module->button_jack);
729 if (ret) {
730 dev_err(module->dev, "Failed to create button jack\n");
731 return ret;
732 }
733
734 /*
735 * Currently, max 4 buttons are supported with following key mapping
736 * BTN_0 = KEY_MEDIA
737 * BTN_1 = KEY_VOICECOMMAND
738 * BTN_2 = KEY_VOLUMEUP
739 * BTN_3 = KEY_VOLUMEDOWN
740 */
741
742 if (module->button_mask & SND_JACK_BTN_0) {
743 ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_0,
744 KEY_MEDIA);
745 if (ret) {
746 dev_err(module->dev, "Failed to set BTN_0\n");
747 return ret;
748 }
749 }
750
751 if (module->button_mask & SND_JACK_BTN_1) {
752 ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_1,
753 KEY_VOICECOMMAND);
754 if (ret) {
755 dev_err(module->dev, "Failed to set BTN_1\n");
756 return ret;
757 }
758 }
759
760 if (module->button_mask & SND_JACK_BTN_2) {
761 ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_2,
762 KEY_VOLUMEUP);
763 if (ret) {
764 dev_err(module->dev, "Failed to set BTN_2\n");
765 return ret;
766 }
767 }
768
769 if (module->button_mask & SND_JACK_BTN_3) {
770 ret = snd_jack_set_key(module->button_jack.jack, SND_JACK_BTN_3,
771 KEY_VOLUMEDOWN);
772 if (ret) {
773 dev_err(module->dev, "Failed to set BTN_0\n");
774 return ret;
775 }
776 }
777
778 /* FIXME
779 * verify if this is really required
780 set_bit(INPUT_PROP_NO_DUMMY_RELEASE,
781 module->button_jack.jack->input_dev->propbit);
782 */
783
784 return 0;
785}
786
787int gbaudio_register_module(struct gbaudio_module_info *module)
788{
789 int ret;
790 struct snd_soc_codec *codec;
791 struct snd_card *card;
792 struct snd_soc_jack *jack = NULL;
793
794 if (!gbcodec) {
795 dev_err(module->dev, "GB Codec not yet probed\n");
796 return -EAGAIN;
797 }
798
799 codec = gbcodec->codec;
800 card = codec->card->snd_card;
801
802 down_write(&card->controls_rwsem);
803
804 if (module->num_dais) {
805 dev_err(gbcodec->dev,
806 "%d:DAIs not supported via gbcodec driver\n",
807 module->num_dais);
808 up_write(&card->controls_rwsem);
809 return -EINVAL;
810 }
811
812 ret = gbaudio_init_jack(module, codec);
813 if (ret) {
814 up_write(&card->controls_rwsem);
815 return ret;
816 }
817
818 if (module->dapm_widgets)
819 snd_soc_dapm_new_controls(&codec->dapm, module->dapm_widgets,
820 module->num_dapm_widgets);
821 if (module->controls)
822 snd_soc_add_codec_controls(codec, module->controls,
823 module->num_controls);
824 if (module->dapm_routes)
825 snd_soc_dapm_add_routes(&codec->dapm, module->dapm_routes,
826 module->num_dapm_routes);
827
828 /* card already instantiated, create widgets here only */
829 if (codec->card->instantiated) {
830 snd_soc_dapm_link_component_dai_widgets(codec->card,
831 &codec->dapm);
832#ifdef CONFIG_SND_JACK
833 /* register jack devices for this module from codec->jack_list */
834 list_for_each_entry(jack, &codec->jack_list, list) {
835 if ((jack == &module->headset_jack)
836 || (jack == &module->button_jack))
837 snd_device_register(codec->card->snd_card,
838 jack->jack);
839 }
840#endif
841 }
842
843 mutex_lock(&gbcodec->lock);
844 list_add(&module->list, &gbcodec->module_list);
845 mutex_unlock(&gbcodec->lock);
846
847 if (codec->card->instantiated)
848 ret = snd_soc_dapm_new_widgets(&codec->dapm);
849 dev_dbg(codec->dev, "Registered %s module\n", module->name);
850
851 up_write(&card->controls_rwsem);
852 return ret;
853}
854EXPORT_SYMBOL(gbaudio_register_module);
855
856static void gbaudio_codec_clean_data_tx(struct gbaudio_data_connection *data)
857{
858 uint16_t i2s_port, cportid;
859 int ret;
860
861 if (list_is_singular(&gbcodec->module_list)) {
862 ret = gb_audio_apbridgea_stop_tx(data->connection, 0);
863 if (ret)
864 return;
865 ret = gb_audio_apbridgea_shutdown_tx(data->connection,
866 0);
867 if (ret)
868 return;
869 }
870 i2s_port = 0; /* fixed for now */
871 cportid = data->connection->hd_cport_id;
872 ret = gb_audio_apbridgea_unregister_cport(data->connection,
873 i2s_port, cportid,
874 AUDIO_APBRIDGEA_DIRECTION_TX);
875 data->state[0] = GBAUDIO_CODEC_SHUTDOWN;
876}
877
878static void gbaudio_codec_clean_data_rx(struct gbaudio_data_connection *data)
879{
880 uint16_t i2s_port, cportid;
881 int ret;
882
883 if (list_is_singular(&gbcodec->module_list)) {
884 ret = gb_audio_apbridgea_stop_rx(data->connection, 0);
885 if (ret)
886 return;
887 ret = gb_audio_apbridgea_shutdown_rx(data->connection,
888 0);
889 if (ret)
890 return;
891 }
892 i2s_port = 0; /* fixed for now */
893 cportid = data->connection->hd_cport_id;
894 ret = gb_audio_apbridgea_unregister_cport(data->connection,
895 i2s_port, cportid,
896 AUDIO_APBRIDGEA_DIRECTION_RX);
897 data->state[1] = GBAUDIO_CODEC_SHUTDOWN;
898}
899
900
901static void gbaudio_codec_cleanup(struct gbaudio_module_info *module)
902{
903 struct gbaudio_data_connection *data;
904 int pb_state, cap_state;
905
906 dev_dbg(gbcodec->dev, "%s: removed, cleanup APBridge\n", module->name);
907 list_for_each_entry(data, &module->data_list, list) {
908 pb_state = data->state[0];
909 cap_state = data->state[1];
910
911 if (pb_state > GBAUDIO_CODEC_SHUTDOWN)
912 gbaudio_codec_clean_data_tx(data);
913
914 if (cap_state > GBAUDIO_CODEC_SHUTDOWN)
915 gbaudio_codec_clean_data_rx(data);
916
917 }
918}
919
920void gbaudio_unregister_module(struct gbaudio_module_info *module)
921{
922 struct snd_soc_codec *codec = gbcodec->codec;
923 struct snd_card *card = codec->card->snd_card;
924 struct snd_soc_jack *jack, *next_j;
925 int mask;
926
927 dev_dbg(codec->dev, "Unregister %s module\n", module->name);
928
929 down_write(&card->controls_rwsem);
930 mutex_lock(&gbcodec->lock);
931 gbaudio_codec_cleanup(module);
932 list_del(&module->list);
933 dev_dbg(codec->dev, "Process Unregister %s module\n", module->name);
934 mutex_unlock(&gbcodec->lock);
935
936#ifdef CONFIG_SND_JACK
937 /* free jack devices for this module from codec->jack_list */
938 list_for_each_entry_safe(jack, next_j, &codec->jack_list, list) {
939 if (jack == &module->headset_jack)
940 mask = GBCODEC_JACK_MASK;
941 else if (jack == &module->button_jack)
942 mask = GBCODEC_JACK_BUTTON_MASK;
943 else
944 mask = 0;
945 if (mask) {
946 dev_dbg(module->dev, "Report %s removal\n",
947 jack->jack->id);
948 snd_soc_jack_report(jack, 0, mask);
949 snd_device_free(codec->card->snd_card, jack->jack);
950 list_del(&jack->list);
951 }
952 }
953#endif
954
955 if (module->dapm_routes) {
956 dev_dbg(codec->dev, "Removing %d routes\n",
957 module->num_dapm_routes);
958 snd_soc_dapm_del_routes(&codec->dapm, module->dapm_routes,
959 module->num_dapm_routes);
960 }
961 if (module->controls) {
962 dev_dbg(codec->dev, "Removing %d controls\n",
963 module->num_controls);
964 snd_soc_remove_codec_controls(codec, module->controls,
965 module->num_controls);
966 }
967 if (module->dapm_widgets) {
968 dev_dbg(codec->dev, "Removing %d widgets\n",
969 module->num_dapm_widgets);
970 snd_soc_dapm_free_controls(&codec->dapm, module->dapm_widgets,
971 module->num_dapm_widgets);
972 }
973
974 dev_dbg(codec->dev, "Unregistered %s module\n", module->name);
975
976 up_write(&card->controls_rwsem);
977}
978EXPORT_SYMBOL(gbaudio_unregister_module);
979
980/*
981 * codec driver ops
982 */
983static int gbcodec_probe(struct snd_soc_codec *codec)
984{
985 int i;
986 struct gbaudio_codec_info *info;
987 struct gbaudio_codec_dai *dai;
988
989 info = devm_kzalloc(codec->dev, sizeof(*info), GFP_KERNEL);
990 if (!info)
991 return -ENOMEM;
992
993 info->dev = codec->dev;
994 INIT_LIST_HEAD(&info->module_list);
995 mutex_init(&info->lock);
996 INIT_LIST_HEAD(&info->dai_list);
997
998 /* init dai_list used to maintain runtime stream info */
999 for (i = 0; i < ARRAY_SIZE(gbaudio_dai); i++) {
1000 dai = devm_kzalloc(codec->dev, sizeof(*dai), GFP_KERNEL);
1001 if (!dai)
1002 return -ENOMEM;
1003 dai->id = gbaudio_dai[i].id;
1004 list_add(&dai->list, &info->dai_list);
1005 }
1006
1007 info->codec = codec;
1008 snd_soc_codec_set_drvdata(codec, info);
1009 gbcodec = info;
1010
1011 device_init_wakeup(codec->dev, 1);
1012 return 0;
1013}
1014
1015static int gbcodec_remove(struct snd_soc_codec *codec)
1016{
1017 /* Empty function for now */
1018 return 0;
1019}
1020
1021static u8 gbcodec_reg[GBCODEC_REG_COUNT] = {
1022 [GBCODEC_CTL_REG] = GBCODEC_CTL_REG_DEFAULT,
1023 [GBCODEC_MUTE_REG] = GBCODEC_MUTE_REG_DEFAULT,
1024 [GBCODEC_PB_LVOL_REG] = GBCODEC_PB_VOL_REG_DEFAULT,
1025 [GBCODEC_PB_RVOL_REG] = GBCODEC_PB_VOL_REG_DEFAULT,
1026 [GBCODEC_CAP_LVOL_REG] = GBCODEC_CAP_VOL_REG_DEFAULT,
1027 [GBCODEC_CAP_RVOL_REG] = GBCODEC_CAP_VOL_REG_DEFAULT,
1028 [GBCODEC_APB1_MUX_REG] = GBCODEC_APB1_MUX_REG_DEFAULT,
1029 [GBCODEC_APB2_MUX_REG] = GBCODEC_APB2_MUX_REG_DEFAULT,
1030};
1031
1032static int gbcodec_write(struct snd_soc_codec *codec, unsigned int reg,
1033 unsigned int value)
1034{
1035 int ret = 0;
1036
1037 if (reg == SND_SOC_NOPM)
1038 return 0;
1039
1040 BUG_ON(reg >= GBCODEC_REG_COUNT);
1041
1042 gbcodec_reg[reg] = value;
1043 dev_dbg(codec->dev, "reg[%d] = 0x%x\n", reg, value);
1044
1045 return ret;
1046}
1047
1048static unsigned int gbcodec_read(struct snd_soc_codec *codec,
1049 unsigned int reg)
1050{
1051 unsigned int val = 0;
1052
1053 if (reg == SND_SOC_NOPM)
1054 return 0;
1055
1056 BUG_ON(reg >= GBCODEC_REG_COUNT);
1057
1058 val = gbcodec_reg[reg];
1059 dev_dbg(codec->dev, "reg[%d] = 0x%x\n", reg, val);
1060
1061 return val;
1062}
1063
1064static struct snd_soc_codec_driver soc_codec_dev_gbaudio = {
1065 .probe = gbcodec_probe,
1066 .remove = gbcodec_remove,
1067
1068 .read = gbcodec_read,
1069 .write = gbcodec_write,
1070
1071 .reg_cache_size = GBCODEC_REG_COUNT,
1072 .reg_cache_default = gbcodec_reg_defaults,
1073 .reg_word_size = 1,
1074
1075 .idle_bias_off = true,
1076 .ignore_pmdown_time = 1,
1077};
1078
1079#ifdef CONFIG_PM
1080static int gbaudio_codec_suspend(struct device *dev)
1081{
1082 dev_dbg(dev, "%s: suspend\n", __func__);
1083 return 0;
1084}
1085
1086static int gbaudio_codec_resume(struct device *dev)
1087{
1088 dev_dbg(dev, "%s: resume\n", __func__);
1089 return 0;
1090}
1091
1092static const struct dev_pm_ops gbaudio_codec_pm_ops = {
1093 .suspend = gbaudio_codec_suspend,
1094 .resume = gbaudio_codec_resume,
1095};
1096#endif
1097
1098static int gbaudio_codec_probe(struct platform_device *pdev)
1099{
1100 return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_gbaudio,
1101 gbaudio_dai, ARRAY_SIZE(gbaudio_dai));
1102}
1103
1104static int gbaudio_codec_remove(struct platform_device *pdev)
1105{
1106 snd_soc_unregister_codec(&pdev->dev);
1107 return 0;
1108}
1109
1110static const struct of_device_id greybus_asoc_machine_of_match[] = {
1111 { .compatible = "toshiba,apb-dummy-codec", },
1112 {},
1113};
1114
1115static struct platform_driver gbaudio_codec_driver = {
1116 .driver = {
1117 .name = "apb-dummy-codec",
1118 .owner = THIS_MODULE,
1119#ifdef CONFIG_PM
1120 .pm = &gbaudio_codec_pm_ops,
1121#endif
1122 .of_match_table = greybus_asoc_machine_of_match,
1123 },
1124 .probe = gbaudio_codec_probe,
1125 .remove = gbaudio_codec_remove,
1126};
1127module_platform_driver(gbaudio_codec_driver);
1128
1129MODULE_DESCRIPTION("APBridge ALSA SoC dummy codec driver");
1130MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
1131MODULE_LICENSE("GPL v2");
1132MODULE_ALIAS("platform:apb-dummy-codec");
diff --git a/drivers/staging/greybus/audio_codec.h b/drivers/staging/greybus/audio_codec.h
new file mode 100644
index 000000000000..0a864592560f
--- /dev/null
+++ b/drivers/staging/greybus/audio_codec.h
@@ -0,0 +1,283 @@
1/*
2 * Greybus audio driver
3 * Copyright 2015 Google Inc.
4 * Copyright 2015 Linaro Ltd.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#ifndef __LINUX_GBAUDIO_CODEC_H
10#define __LINUX_GBAUDIO_CODEC_H
11
12#include <sound/soc.h>
13#include <sound/jack.h>
14
15#include "greybus.h"
16#include "greybus_protocols.h"
17
18#define NAME_SIZE 32
19#define MAX_DAIS 2 /* APB1, APB2 */
20
21enum {
22 APB1_PCM = 0,
23 APB2_PCM,
24 NUM_CODEC_DAIS,
25};
26
27enum gbcodec_reg_index {
28 GBCODEC_CTL_REG,
29 GBCODEC_MUTE_REG,
30 GBCODEC_PB_LVOL_REG,
31 GBCODEC_PB_RVOL_REG,
32 GBCODEC_CAP_LVOL_REG,
33 GBCODEC_CAP_RVOL_REG,
34 GBCODEC_APB1_MUX_REG,
35 GBCODEC_APB2_MUX_REG,
36 GBCODEC_REG_COUNT
37};
38
39/* device_type should be same as defined in audio.h (Android media layer) */
40enum {
41 GBAUDIO_DEVICE_NONE = 0x0,
42 /* reserved bits */
43 GBAUDIO_DEVICE_BIT_IN = 0x80000000,
44 GBAUDIO_DEVICE_BIT_DEFAULT = 0x40000000,
45 /* output devices */
46 GBAUDIO_DEVICE_OUT_SPEAKER = 0x2,
47 GBAUDIO_DEVICE_OUT_WIRED_HEADSET = 0x4,
48 GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE = 0x8,
49 /* input devices */
50 GBAUDIO_DEVICE_IN_BUILTIN_MIC = GBAUDIO_DEVICE_BIT_IN | 0x4,
51 GBAUDIO_DEVICE_IN_WIRED_HEADSET = GBAUDIO_DEVICE_BIT_IN | 0x10,
52};
53
54/* bit 0-SPK, 1-HP, 2-DAC,
55 * 4-MIC, 5-HSMIC, 6-MIC2
56 */
57#define GBCODEC_CTL_REG_DEFAULT 0x00
58
59/* bit 0,1 - APB1-PB-L/R
60 * bit 2,3 - APB2-PB-L/R
61 * bit 4,5 - APB1-Cap-L/R
62 * bit 6,7 - APB2-Cap-L/R
63 */
64#define GBCODEC_MUTE_REG_DEFAULT 0x00
65
66/* 0-127 steps */
67#define GBCODEC_PB_VOL_REG_DEFAULT 0x00
68#define GBCODEC_CAP_VOL_REG_DEFAULT 0x00
69
70/* bit 0,1,2 - PB stereo, left, right
71 * bit 8,9,10 - Cap stereo, left, right
72 */
73#define GBCODEC_APB1_MUX_REG_DEFAULT 0x00
74#define GBCODEC_APB2_MUX_REG_DEFAULT 0x00
75
76#define GBCODEC_JACK_MASK 0x0000FFFF
77#define GBCODEC_JACK_BUTTON_MASK 0xFFFF0000
78
79static const u8 gbcodec_reg_defaults[GBCODEC_REG_COUNT] = {
80 GBCODEC_CTL_REG_DEFAULT,
81 GBCODEC_MUTE_REG_DEFAULT,
82 GBCODEC_PB_VOL_REG_DEFAULT,
83 GBCODEC_PB_VOL_REG_DEFAULT,
84 GBCODEC_CAP_VOL_REG_DEFAULT,
85 GBCODEC_CAP_VOL_REG_DEFAULT,
86 GBCODEC_APB1_MUX_REG_DEFAULT,
87 GBCODEC_APB2_MUX_REG_DEFAULT,
88};
89
90enum gbaudio_codec_state {
91 GBAUDIO_CODEC_SHUTDOWN = 0,
92 GBAUDIO_CODEC_STARTUP,
93 GBAUDIO_CODEC_HWPARAMS,
94 GBAUDIO_CODEC_PREPARE,
95 GBAUDIO_CODEC_START,
96 GBAUDIO_CODEC_STOP,
97};
98
99struct gbaudio_stream_params {
100 int state;
101 uint8_t sig_bits, channels;
102 uint32_t format, rate;
103};
104
105struct gbaudio_codec_dai {
106 int id;
107 /* runtime params for playback/capture streams */
108 struct gbaudio_stream_params params[2];
109 struct list_head list;
110};
111
112struct gbaudio_codec_info {
113 struct device *dev;
114 struct snd_soc_codec *codec;
115 struct list_head module_list;
116 /* to maintain runtime stream params for each DAI */
117 struct list_head dai_list;
118 struct mutex lock;
119 u8 reg[GBCODEC_REG_COUNT];
120};
121
122struct gbaudio_widget {
123 __u8 id;
124 const char *name;
125 struct list_head list;
126};
127
128struct gbaudio_control {
129 __u8 id;
130 char *name;
131 char *wname;
132 const char * const *texts;
133 int items;
134 struct list_head list;
135};
136
137struct gbaudio_data_connection {
138 int id;
139 __le16 data_cport;
140 struct gb_connection *connection;
141 struct list_head list;
142 /* maintain runtime state for playback/capture stream */
143 int state[2];
144};
145
146/* stream direction */
147#define GB_PLAYBACK BIT(0)
148#define GB_CAPTURE BIT(1)
149
150enum gbaudio_module_state {
151 GBAUDIO_MODULE_OFF = 0,
152 GBAUDIO_MODULE_ON,
153};
154
155struct gbaudio_module_info {
156 /* module info */
157 struct device *dev;
158 int dev_id; /* check if it should be bundle_id/hd_cport_id */
159 int vid;
160 int pid;
161 int slot;
162 int type;
163 int set_uevent;
164 char vstr[NAME_SIZE];
165 char pstr[NAME_SIZE];
166 struct list_head list;
167 /* need to share this info to above user space */
168 int manager_id;
169 char name[NAME_SIZE];
170 unsigned int ip_devices;
171 unsigned int op_devices;
172
173 /* jack related */
174 char jack_name[NAME_SIZE];
175 char button_name[NAME_SIZE];
176 int jack_type;
177 int jack_mask;
178 int button_mask;
179 int button_status;
180 struct snd_soc_jack headset_jack;
181 struct snd_soc_jack button_jack;
182
183 /* connection info */
184 struct gb_connection *mgmt_connection;
185 size_t num_data_connections;
186 struct list_head data_list;
187
188 /* topology related */
189 int num_dais;
190 int num_controls;
191 int num_dapm_widgets;
192 int num_dapm_routes;
193 unsigned long dai_offset;
194 unsigned long widget_offset;
195 unsigned long control_offset;
196 unsigned long route_offset;
197 struct snd_kcontrol_new *controls;
198 struct snd_soc_dapm_widget *dapm_widgets;
199 struct snd_soc_dapm_route *dapm_routes;
200 struct snd_soc_dai_driver *dais;
201
202 struct list_head widget_list;
203 struct list_head ctl_list;
204 struct list_head widget_ctl_list;
205
206 struct gb_audio_topology *topology;
207};
208
209int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
210 struct gb_audio_topology *tplg_data);
211void gbaudio_tplg_release(struct gbaudio_module_info *module);
212
213int gbaudio_module_update(struct gbaudio_codec_info *codec,
214 struct snd_soc_dapm_widget *w,
215 struct gbaudio_module_info *module,
216 int enable);
217int gbaudio_register_module(struct gbaudio_module_info *module);
218void gbaudio_unregister_module(struct gbaudio_module_info *module);
219
220/* protocol related */
221extern int gb_audio_gb_get_topology(struct gb_connection *connection,
222 struct gb_audio_topology **topology);
223extern int gb_audio_gb_get_control(struct gb_connection *connection,
224 uint8_t control_id, uint8_t index,
225 struct gb_audio_ctl_elem_value *value);
226extern int gb_audio_gb_set_control(struct gb_connection *connection,
227 uint8_t control_id, uint8_t index,
228 struct gb_audio_ctl_elem_value *value);
229extern int gb_audio_gb_enable_widget(struct gb_connection *connection,
230 uint8_t widget_id);
231extern int gb_audio_gb_disable_widget(struct gb_connection *connection,
232 uint8_t widget_id);
233extern int gb_audio_gb_get_pcm(struct gb_connection *connection,
234 uint16_t data_cport, uint32_t *format,
235 uint32_t *rate, uint8_t *channels,
236 uint8_t *sig_bits);
237extern int gb_audio_gb_set_pcm(struct gb_connection *connection,
238 uint16_t data_cport, uint32_t format,
239 uint32_t rate, uint8_t channels,
240 uint8_t sig_bits);
241extern int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
242 uint16_t data_cport, uint16_t size);
243extern int gb_audio_gb_activate_tx(struct gb_connection *connection,
244 uint16_t data_cport);
245extern int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
246 uint16_t data_cport);
247extern int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
248 uint16_t data_cport, uint16_t size);
249extern int gb_audio_gb_activate_rx(struct gb_connection *connection,
250 uint16_t data_cport);
251extern int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
252 uint16_t data_cport);
253extern int gb_audio_apbridgea_set_config(struct gb_connection *connection,
254 __u16 i2s_port, __u32 format,
255 __u32 rate, __u32 mclk_freq);
256extern int gb_audio_apbridgea_register_cport(struct gb_connection *connection,
257 __u16 i2s_port, __u16 cportid,
258 __u8 direction);
259extern int gb_audio_apbridgea_unregister_cport(struct gb_connection *connection,
260 __u16 i2s_port, __u16 cportid,
261 __u8 direction);
262extern int gb_audio_apbridgea_set_tx_data_size(struct gb_connection *connection,
263 __u16 i2s_port, __u16 size);
264extern int gb_audio_apbridgea_prepare_tx(struct gb_connection *connection,
265 __u16 i2s_port);
266extern int gb_audio_apbridgea_start_tx(struct gb_connection *connection,
267 __u16 i2s_port, __u64 timestamp);
268extern int gb_audio_apbridgea_stop_tx(struct gb_connection *connection,
269 __u16 i2s_port);
270extern int gb_audio_apbridgea_shutdown_tx(struct gb_connection *connection,
271 __u16 i2s_port);
272extern int gb_audio_apbridgea_set_rx_data_size(struct gb_connection *connection,
273 __u16 i2s_port, __u16 size);
274extern int gb_audio_apbridgea_prepare_rx(struct gb_connection *connection,
275 __u16 i2s_port);
276extern int gb_audio_apbridgea_start_rx(struct gb_connection *connection,
277 __u16 i2s_port);
278extern int gb_audio_apbridgea_stop_rx(struct gb_connection *connection,
279 __u16 i2s_port);
280extern int gb_audio_apbridgea_shutdown_rx(struct gb_connection *connection,
281 __u16 i2s_port);
282
283#endif /* __LINUX_GBAUDIO_CODEC_H */
diff --git a/drivers/staging/greybus/audio_gb.c b/drivers/staging/greybus/audio_gb.c
new file mode 100644
index 000000000000..a2f1c92e7445
--- /dev/null
+++ b/drivers/staging/greybus/audio_gb.c
@@ -0,0 +1,228 @@
1/*
2 * Greybus Audio Device Class Protocol helpers
3 *
4 * Copyright 2015-2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include "greybus.h"
10#include "greybus_protocols.h"
11#include "operation.h"
12#include "audio_codec.h"
13
14/* TODO: Split into separate calls */
15int gb_audio_gb_get_topology(struct gb_connection *connection,
16 struct gb_audio_topology **topology)
17{
18 struct gb_audio_get_topology_size_response size_resp;
19 struct gb_audio_topology *topo;
20 uint16_t size;
21 int ret;
22
23 ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE,
24 NULL, 0, &size_resp, sizeof(size_resp));
25 if (ret)
26 return ret;
27
28 size = le16_to_cpu(size_resp.size);
29 if (size < sizeof(*topo))
30 return -ENODATA;
31
32 topo = kzalloc(size, GFP_KERNEL);
33 if (!topo)
34 return -ENOMEM;
35
36 ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_TOPOLOGY, NULL, 0,
37 topo, size);
38 if (ret) {
39 kfree(topo);
40 return ret;
41 }
42
43 *topology = topo;
44
45 return 0;
46}
47EXPORT_SYMBOL_GPL(gb_audio_gb_get_topology);
48
49int gb_audio_gb_get_control(struct gb_connection *connection,
50 uint8_t control_id, uint8_t index,
51 struct gb_audio_ctl_elem_value *value)
52{
53 struct gb_audio_get_control_request req;
54 struct gb_audio_get_control_response resp;
55 int ret;
56
57 req.control_id = control_id;
58 req.index = index;
59
60 ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_CONTROL,
61 &req, sizeof(req), &resp, sizeof(resp));
62 if (ret)
63 return ret;
64
65 memcpy(value, &resp.value, sizeof(*value));
66
67 return 0;
68}
69EXPORT_SYMBOL_GPL(gb_audio_gb_get_control);
70
71int gb_audio_gb_set_control(struct gb_connection *connection,
72 uint8_t control_id, uint8_t index,
73 struct gb_audio_ctl_elem_value *value)
74{
75 struct gb_audio_set_control_request req;
76
77 req.control_id = control_id;
78 req.index = index;
79 memcpy(&req.value, value, sizeof(req.value));
80
81 return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_CONTROL,
82 &req, sizeof(req), NULL, 0);
83}
84EXPORT_SYMBOL_GPL(gb_audio_gb_set_control);
85
86int gb_audio_gb_enable_widget(struct gb_connection *connection,
87 uint8_t widget_id)
88{
89 struct gb_audio_enable_widget_request req;
90
91 req.widget_id = widget_id;
92
93 return gb_operation_sync(connection, GB_AUDIO_TYPE_ENABLE_WIDGET,
94 &req, sizeof(req), NULL, 0);
95}
96EXPORT_SYMBOL_GPL(gb_audio_gb_enable_widget);
97
98int gb_audio_gb_disable_widget(struct gb_connection *connection,
99 uint8_t widget_id)
100{
101 struct gb_audio_disable_widget_request req;
102
103 req.widget_id = widget_id;
104
105 return gb_operation_sync(connection, GB_AUDIO_TYPE_DISABLE_WIDGET,
106 &req, sizeof(req), NULL, 0);
107}
108EXPORT_SYMBOL_GPL(gb_audio_gb_disable_widget);
109
110int gb_audio_gb_get_pcm(struct gb_connection *connection, uint16_t data_cport,
111 uint32_t *format, uint32_t *rate, uint8_t *channels,
112 uint8_t *sig_bits)
113{
114 struct gb_audio_get_pcm_request req;
115 struct gb_audio_get_pcm_response resp;
116 int ret;
117
118 req.data_cport = cpu_to_le16(data_cport);
119
120 ret = gb_operation_sync(connection, GB_AUDIO_TYPE_GET_PCM,
121 &req, sizeof(req), &resp, sizeof(resp));
122 if (ret)
123 return ret;
124
125 *format = le32_to_cpu(resp.format);
126 *rate = le32_to_cpu(resp.rate);
127 *channels = resp.channels;
128 *sig_bits = resp.sig_bits;
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(gb_audio_gb_get_pcm);
133
134int gb_audio_gb_set_pcm(struct gb_connection *connection, uint16_t data_cport,
135 uint32_t format, uint32_t rate, uint8_t channels,
136 uint8_t sig_bits)
137{
138 struct gb_audio_set_pcm_request req;
139
140 req.data_cport = cpu_to_le16(data_cport);
141 req.format = cpu_to_le32(format);
142 req.rate = cpu_to_le32(rate);
143 req.channels = channels;
144 req.sig_bits = sig_bits;
145
146 return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_PCM,
147 &req, sizeof(req), NULL, 0);
148}
149EXPORT_SYMBOL_GPL(gb_audio_gb_set_pcm);
150
151int gb_audio_gb_set_tx_data_size(struct gb_connection *connection,
152 uint16_t data_cport, uint16_t size)
153{
154 struct gb_audio_set_tx_data_size_request req;
155
156 req.data_cport = cpu_to_le16(data_cport);
157 req.size = cpu_to_le16(size);
158
159 return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_TX_DATA_SIZE,
160 &req, sizeof(req), NULL, 0);
161}
162EXPORT_SYMBOL_GPL(gb_audio_gb_set_tx_data_size);
163
164int gb_audio_gb_activate_tx(struct gb_connection *connection,
165 uint16_t data_cport)
166{
167 struct gb_audio_activate_tx_request req;
168
169 req.data_cport = cpu_to_le16(data_cport);
170
171 return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_TX,
172 &req, sizeof(req), NULL, 0);
173}
174EXPORT_SYMBOL_GPL(gb_audio_gb_activate_tx);
175
176int gb_audio_gb_deactivate_tx(struct gb_connection *connection,
177 uint16_t data_cport)
178{
179 struct gb_audio_deactivate_tx_request req;
180
181 req.data_cport = cpu_to_le16(data_cport);
182
183 return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_TX,
184 &req, sizeof(req), NULL, 0);
185}
186EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_tx);
187
188int gb_audio_gb_set_rx_data_size(struct gb_connection *connection,
189 uint16_t data_cport, uint16_t size)
190{
191 struct gb_audio_set_rx_data_size_request req;
192
193 req.data_cport = cpu_to_le16(data_cport);
194 req.size = cpu_to_le16(size);
195
196 return gb_operation_sync(connection, GB_AUDIO_TYPE_SET_RX_DATA_SIZE,
197 &req, sizeof(req), NULL, 0);
198}
199EXPORT_SYMBOL_GPL(gb_audio_gb_set_rx_data_size);
200
201int gb_audio_gb_activate_rx(struct gb_connection *connection,
202 uint16_t data_cport)
203{
204 struct gb_audio_activate_rx_request req;
205
206 req.data_cport = cpu_to_le16(data_cport);
207
208 return gb_operation_sync(connection, GB_AUDIO_TYPE_ACTIVATE_RX,
209 &req, sizeof(req), NULL, 0);
210}
211EXPORT_SYMBOL_GPL(gb_audio_gb_activate_rx);
212
213int gb_audio_gb_deactivate_rx(struct gb_connection *connection,
214 uint16_t data_cport)
215{
216 struct gb_audio_deactivate_rx_request req;
217
218 req.data_cport = cpu_to_le16(data_cport);
219
220 return gb_operation_sync(connection, GB_AUDIO_TYPE_DEACTIVATE_RX,
221 &req, sizeof(req), NULL, 0);
222}
223EXPORT_SYMBOL_GPL(gb_audio_gb_deactivate_rx);
224
225MODULE_LICENSE("GPL v2");
226MODULE_ALIAS("greybus:audio-gb");
227MODULE_DESCRIPTION("Greybus Audio Device Class Protocol library");
228MODULE_AUTHOR("Mark Greer <mgreer@animalcreek.com>");
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
new file mode 100644
index 000000000000..aa6508b44fab
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager.c
@@ -0,0 +1,184 @@
1/*
2 * Greybus operations
3 *
4 * Copyright 2015-2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/string.h>
10#include <linux/sysfs.h>
11#include <linux/module.h>
12#include <linux/init.h>
13#include <linux/rwlock.h>
14#include <linux/idr.h>
15
16#include "audio_manager.h"
17#include "audio_manager_private.h"
18
19static struct kset *manager_kset;
20
21static LIST_HEAD(modules_list);
22static DECLARE_RWSEM(modules_rwsem);
23static DEFINE_IDA(module_id);
24
25/* helpers */
26static struct gb_audio_manager_module *gb_audio_manager_get_locked(int id)
27{
28 struct gb_audio_manager_module *module;
29
30 if (id < 0)
31 return NULL;
32
33 list_for_each_entry(module, &modules_list, list) {
34 if (module->id == id)
35 return module;
36 }
37
38 return NULL;
39}
40
41/* public API */
42int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc)
43{
44 struct gb_audio_manager_module *module;
45 int id;
46 int err;
47
48 id = ida_simple_get(&module_id, 0, 0, GFP_KERNEL);
49 err = gb_audio_manager_module_create(&module, manager_kset,
50 id, desc);
51 if (err) {
52 ida_simple_remove(&module_id, id);
53 return err;
54 }
55
56 /* Add it to the list */
57 down_write(&modules_rwsem);
58 list_add_tail(&module->list, &modules_list);
59 up_write(&modules_rwsem);
60
61 return module->id;
62}
63EXPORT_SYMBOL_GPL(gb_audio_manager_add);
64
65int gb_audio_manager_remove(int id)
66{
67 struct gb_audio_manager_module *module;
68
69 down_write(&modules_rwsem);
70
71 module = gb_audio_manager_get_locked(id);
72 if (!module) {
73 up_write(&modules_rwsem);
74 return -EINVAL;
75 }
76 list_del(&module->list);
77 kobject_put(&module->kobj);
78 up_write(&modules_rwsem);
79 ida_simple_remove(&module_id, id);
80 return 0;
81}
82EXPORT_SYMBOL_GPL(gb_audio_manager_remove);
83
84void gb_audio_manager_remove_all(void)
85{
86 struct gb_audio_manager_module *module, *next;
87 int is_empty = 1;
88
89 down_write(&modules_rwsem);
90
91 list_for_each_entry_safe(module, next, &modules_list, list) {
92 list_del(&module->list);
93 kobject_put(&module->kobj);
94 ida_simple_remove(&module_id, module->id);
95 }
96
97 is_empty = list_empty(&modules_list);
98
99 up_write(&modules_rwsem);
100
101 if (!is_empty)
102 pr_warn("Not all nodes were deleted\n");
103}
104EXPORT_SYMBOL_GPL(gb_audio_manager_remove_all);
105
106struct gb_audio_manager_module *gb_audio_manager_get_module(int id)
107{
108 struct gb_audio_manager_module *module;
109
110 down_read(&modules_rwsem);
111 module = gb_audio_manager_get_locked(id);
112 kobject_get(&module->kobj);
113 up_read(&modules_rwsem);
114 return module;
115}
116EXPORT_SYMBOL_GPL(gb_audio_manager_get_module);
117
118void gb_audio_manager_put_module(struct gb_audio_manager_module *module)
119{
120 kobject_put(&module->kobj);
121}
122EXPORT_SYMBOL_GPL(gb_audio_manager_put_module);
123
124int gb_audio_manager_dump_module(int id)
125{
126 struct gb_audio_manager_module *module;
127
128 down_read(&modules_rwsem);
129 module = gb_audio_manager_get_locked(id);
130 up_read(&modules_rwsem);
131
132 if (!module)
133 return -EINVAL;
134
135 gb_audio_manager_module_dump(module);
136 return 0;
137}
138EXPORT_SYMBOL_GPL(gb_audio_manager_dump_module);
139
140void gb_audio_manager_dump_all(void)
141{
142 struct gb_audio_manager_module *module;
143 int count = 0;
144
145 down_read(&modules_rwsem);
146 list_for_each_entry(module, &modules_list, list) {
147 gb_audio_manager_module_dump(module);
148 count++;
149 }
150 up_read(&modules_rwsem);
151
152 pr_info("Number of connected modules: %d\n", count);
153}
154EXPORT_SYMBOL_GPL(gb_audio_manager_dump_all);
155
156/*
157 * module init/deinit
158 */
159static int __init manager_init(void)
160{
161 manager_kset = kset_create_and_add(GB_AUDIO_MANAGER_NAME, NULL,
162 kernel_kobj);
163 if (!manager_kset)
164 return -ENOMEM;
165
166#ifdef GB_AUDIO_MANAGER_SYSFS
167 gb_audio_manager_sysfs_init(&manager_kset->kobj);
168#endif
169
170 return 0;
171}
172
173static void __exit manager_exit(void)
174{
175 gb_audio_manager_remove_all();
176 kset_unregister(manager_kset);
177 ida_destroy(&module_id);
178}
179
180module_init(manager_init);
181module_exit(manager_exit);
182
183MODULE_LICENSE("GPL");
184MODULE_AUTHOR("Svetlin Ankov <ankov_svetlin@projectara.com>");
diff --git a/drivers/staging/greybus/audio_manager.h b/drivers/staging/greybus/audio_manager.h
new file mode 100644
index 000000000000..c4ca09754a6a
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager.h
@@ -0,0 +1,83 @@
1/*
2 * Greybus operations
3 *
4 * Copyright 2015-2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#ifndef _GB_AUDIO_MANAGER_H_
10#define _GB_AUDIO_MANAGER_H_
11
12#include <linux/kobject.h>
13#include <linux/list.h>
14
15#define GB_AUDIO_MANAGER_NAME "gb_audio_manager"
16#define GB_AUDIO_MANAGER_MODULE_NAME_LEN 64
17#define GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "63"
18
19struct gb_audio_manager_module_descriptor {
20 char name[GB_AUDIO_MANAGER_MODULE_NAME_LEN];
21 int slot;
22 int vid;
23 int pid;
24 int cport;
25 unsigned int ip_devices;
26 unsigned int op_devices;
27};
28
29struct gb_audio_manager_module {
30 struct kobject kobj;
31 struct list_head list;
32 int id;
33 struct gb_audio_manager_module_descriptor desc;
34};
35
36/*
37 * Creates a new gb_audio_manager_module_descriptor, using the specified
38 * descriptor.
39 *
40 * Returns a negative result on error, or the id of the newly created module.
41 *
42 */
43int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc);
44
45/*
46 * Removes a connected gb_audio_manager_module_descriptor for the specified ID.
47 *
48 * Returns zero on success, or a negative value on error.
49 */
50int gb_audio_manager_remove(int id);
51
52/*
53 * Removes all connected gb_audio_modules
54 *
55 * Returns zero on success, or a negative value on error.
56 */
57void gb_audio_manager_remove_all(void);
58
59/*
60 * Retrieves a gb_audio_manager_module_descriptor for the specified id.
61 * Returns the gb_audio_manager_module_descriptor structure,
62 * or NULL if there is no module with the specified ID.
63 */
64struct gb_audio_manager_module *gb_audio_manager_get_module(int id);
65
66/*
67 * Decreases the refcount of the module, obtained by the get function.
68 * Modules are removed via gb_audio_manager_remove
69 */
70void gb_audio_manager_put_module(struct gb_audio_manager_module *module);
71
72/*
73 * Dumps the module for the specified id
74 * Return 0 on success
75 */
76int gb_audio_manager_dump_module(int id);
77
78/*
79 * Dumps all connected modules
80 */
81void gb_audio_manager_dump_all(void);
82
83#endif /* _GB_AUDIO_MANAGER_H_ */
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
new file mode 100644
index 000000000000..a10e96ad79c1
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -0,0 +1,258 @@
1/*
2 * Greybus operations
3 *
4 * Copyright 2015-2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/slab.h>
10
11#include "audio_manager.h"
12#include "audio_manager_private.h"
13
14#define to_gb_audio_module_attr(x) \
15 container_of(x, struct gb_audio_manager_module_attribute, attr)
16#define to_gb_audio_module(x) \
17 container_of(x, struct gb_audio_manager_module, kobj)
18
19struct gb_audio_manager_module_attribute {
20 struct attribute attr;
21 ssize_t (*show)(struct gb_audio_manager_module *module,
22 struct gb_audio_manager_module_attribute *attr,
23 char *buf);
24 ssize_t (*store)(struct gb_audio_manager_module *module,
25 struct gb_audio_manager_module_attribute *attr,
26 const char *buf, size_t count);
27};
28
29static ssize_t gb_audio_module_attr_show(
30 struct kobject *kobj, struct attribute *attr, char *buf)
31{
32 struct gb_audio_manager_module_attribute *attribute;
33 struct gb_audio_manager_module *module;
34
35 attribute = to_gb_audio_module_attr(attr);
36 module = to_gb_audio_module(kobj);
37
38 if (!attribute->show)
39 return -EIO;
40
41 return attribute->show(module, attribute, buf);
42}
43
44static ssize_t gb_audio_module_attr_store(struct kobject *kobj,
45 struct attribute *attr,
46 const char *buf, size_t len)
47{
48 struct gb_audio_manager_module_attribute *attribute;
49 struct gb_audio_manager_module *module;
50
51 attribute = to_gb_audio_module_attr(attr);
52 module = to_gb_audio_module(kobj);
53
54 if (!attribute->store)
55 return -EIO;
56
57 return attribute->store(module, attribute, buf, len);
58}
59
60static const struct sysfs_ops gb_audio_module_sysfs_ops = {
61 .show = gb_audio_module_attr_show,
62 .store = gb_audio_module_attr_store,
63};
64
65static void gb_audio_module_release(struct kobject *kobj)
66{
67 struct gb_audio_manager_module *module = to_gb_audio_module(kobj);
68
69 pr_info("Destroying audio module #%d\n", module->id);
70 /* TODO -> delete from list */
71 kfree(module);
72}
73
74static ssize_t gb_audio_module_name_show(
75 struct gb_audio_manager_module *module,
76 struct gb_audio_manager_module_attribute *attr, char *buf)
77{
78 return sprintf(buf, "%s", module->desc.name);
79}
80
81static struct gb_audio_manager_module_attribute gb_audio_module_name_attribute =
82 __ATTR(name, 0664, gb_audio_module_name_show, NULL);
83
84static ssize_t gb_audio_module_slot_show(
85 struct gb_audio_manager_module *module,
86 struct gb_audio_manager_module_attribute *attr, char *buf)
87{
88 return sprintf(buf, "%d", module->desc.slot);
89}
90
91static struct gb_audio_manager_module_attribute gb_audio_module_slot_attribute =
92 __ATTR(slot, 0664, gb_audio_module_slot_show, NULL);
93
94static ssize_t gb_audio_module_vid_show(
95 struct gb_audio_manager_module *module,
96 struct gb_audio_manager_module_attribute *attr, char *buf)
97{
98 return sprintf(buf, "%d", module->desc.vid);
99}
100
101static struct gb_audio_manager_module_attribute gb_audio_module_vid_attribute =
102 __ATTR(vid, 0664, gb_audio_module_vid_show, NULL);
103
104static ssize_t gb_audio_module_pid_show(
105 struct gb_audio_manager_module *module,
106 struct gb_audio_manager_module_attribute *attr, char *buf)
107{
108 return sprintf(buf, "%d", module->desc.pid);
109}
110
111static struct gb_audio_manager_module_attribute gb_audio_module_pid_attribute =
112 __ATTR(pid, 0664, gb_audio_module_pid_show, NULL);
113
114static ssize_t gb_audio_module_cport_show(
115 struct gb_audio_manager_module *module,
116 struct gb_audio_manager_module_attribute *attr, char *buf)
117{
118 return sprintf(buf, "%d", module->desc.cport);
119}
120
121static struct gb_audio_manager_module_attribute
122 gb_audio_module_cport_attribute =
123 __ATTR(cport, 0664, gb_audio_module_cport_show, NULL);
124
125static ssize_t gb_audio_module_ip_devices_show(
126 struct gb_audio_manager_module *module,
127 struct gb_audio_manager_module_attribute *attr, char *buf)
128{
129 return sprintf(buf, "0x%X", module->desc.ip_devices);
130}
131
132static struct gb_audio_manager_module_attribute
133 gb_audio_module_ip_devices_attribute =
134 __ATTR(ip_devices, 0664, gb_audio_module_ip_devices_show, NULL);
135
136static ssize_t gb_audio_module_op_devices_show(
137 struct gb_audio_manager_module *module,
138 struct gb_audio_manager_module_attribute *attr, char *buf)
139{
140 return sprintf(buf, "0x%X", module->desc.op_devices);
141}
142
143static struct gb_audio_manager_module_attribute
144 gb_audio_module_op_devices_attribute =
145 __ATTR(op_devices, 0664, gb_audio_module_op_devices_show, NULL);
146
147static struct attribute *gb_audio_module_default_attrs[] = {
148 &gb_audio_module_name_attribute.attr,
149 &gb_audio_module_slot_attribute.attr,
150 &gb_audio_module_vid_attribute.attr,
151 &gb_audio_module_pid_attribute.attr,
152 &gb_audio_module_cport_attribute.attr,
153 &gb_audio_module_ip_devices_attribute.attr,
154 &gb_audio_module_op_devices_attribute.attr,
155 NULL, /* need to NULL terminate the list of attributes */
156};
157
158static struct kobj_type gb_audio_module_type = {
159 .sysfs_ops = &gb_audio_module_sysfs_ops,
160 .release = gb_audio_module_release,
161 .default_attrs = gb_audio_module_default_attrs,
162};
163
164static void send_add_uevent(struct gb_audio_manager_module *module)
165{
166 char name_string[128];
167 char slot_string[64];
168 char vid_string[64];
169 char pid_string[64];
170 char cport_string[64];
171 char ip_devices_string[64];
172 char op_devices_string[64];
173
174 char *envp[] = {
175 name_string,
176 slot_string,
177 vid_string,
178 pid_string,
179 cport_string,
180 ip_devices_string,
181 op_devices_string,
182 NULL
183 };
184
185 snprintf(name_string, 128, "NAME=%s", module->desc.name);
186 snprintf(slot_string, 64, "SLOT=%d", module->desc.slot);
187 snprintf(vid_string, 64, "VID=%d", module->desc.vid);
188 snprintf(pid_string, 64, "PID=%d", module->desc.pid);
189 snprintf(cport_string, 64, "CPORT=%d", module->desc.cport);
190 snprintf(ip_devices_string, 64, "I/P DEVICES=0x%X",
191 module->desc.ip_devices);
192 snprintf(op_devices_string, 64, "O/P DEVICES=0x%X",
193 module->desc.op_devices);
194
195 kobject_uevent_env(&module->kobj, KOBJ_ADD, envp);
196}
197
198int gb_audio_manager_module_create(
199 struct gb_audio_manager_module **module,
200 struct kset *manager_kset,
201 int id, struct gb_audio_manager_module_descriptor *desc)
202{
203 int err;
204 struct gb_audio_manager_module *m;
205
206 m = kzalloc(sizeof(*m), GFP_ATOMIC);
207 if (!m)
208 return -ENOMEM;
209
210 /* Initialize the node */
211 INIT_LIST_HEAD(&m->list);
212
213 /* Set the module id */
214 m->id = id;
215
216 /* Copy the provided descriptor */
217 memcpy(&m->desc, desc, sizeof(*desc));
218
219 /* set the kset */
220 m->kobj.kset = manager_kset;
221
222 /*
223 * Initialize and add the kobject to the kernel. All the default files
224 * will be created here. As we have already specified a kset for this
225 * kobject, we don't have to set a parent for the kobject, the kobject
226 * will be placed beneath that kset automatically.
227 */
228 err = kobject_init_and_add(&m->kobj, &gb_audio_module_type, NULL, "%d",
229 id);
230 if (err) {
231 pr_err("failed initializing kobject for audio module #%d\n",
232 id);
233 kobject_put(&m->kobj);
234 return err;
235 }
236
237 /*
238 * Notify the object was created
239 */
240 send_add_uevent(m);
241
242 *module = m;
243 pr_info("Created audio module #%d\n", id);
244 return 0;
245}
246
247void gb_audio_manager_module_dump(struct gb_audio_manager_module *module)
248{
249 pr_info("audio module #%d name=%s slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X o/p devices=0x%X\n",
250 module->id,
251 module->desc.name,
252 module->desc.slot,
253 module->desc.vid,
254 module->desc.pid,
255 module->desc.cport,
256 module->desc.ip_devices,
257 module->desc.op_devices);
258}
diff --git a/drivers/staging/greybus/audio_manager_private.h b/drivers/staging/greybus/audio_manager_private.h
new file mode 100644
index 000000000000..079ce953c256
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager_private.h
@@ -0,0 +1,28 @@
1/*
2 * Greybus operations
3 *
4 * Copyright 2015-2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#ifndef _GB_AUDIO_MANAGER_PRIVATE_H_
10#define _GB_AUDIO_MANAGER_PRIVATE_H_
11
12#include <linux/kobject.h>
13
14#include "audio_manager.h"
15
16int gb_audio_manager_module_create(
17 struct gb_audio_manager_module **module,
18 struct kset *manager_kset,
19 int id, struct gb_audio_manager_module_descriptor *desc);
20
21/* module destroyed via kobject_put */
22
23void gb_audio_manager_module_dump(struct gb_audio_manager_module *module);
24
25/* sysfs control */
26void gb_audio_manager_sysfs_init(struct kobject *kobj);
27
28#endif /* _GB_AUDIO_MANAGER_PRIVATE_H_ */
diff --git a/drivers/staging/greybus/audio_manager_sysfs.c b/drivers/staging/greybus/audio_manager_sysfs.c
new file mode 100644
index 000000000000..d8bf8591ff9e
--- /dev/null
+++ b/drivers/staging/greybus/audio_manager_sysfs.c
@@ -0,0 +1,102 @@
1/*
2 * Greybus operations
3 *
4 * Copyright 2015-2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/string.h>
10#include <linux/sysfs.h>
11
12#include "audio_manager.h"
13#include "audio_manager_private.h"
14
15static ssize_t manager_sysfs_add_store(
16 struct kobject *kobj, struct kobj_attribute *attr,
17 const char *buf, size_t count)
18{
19 struct gb_audio_manager_module_descriptor desc = { {0} };
20
21 int num = sscanf(buf,
22 "name=%" GB_AUDIO_MANAGER_MODULE_NAME_LEN_SSCANF "s "
23 "slot=%d vid=%d pid=%d cport=%d i/p devices=0x%X"
24 "o/p devices=0x%X",
25 desc.name, &desc.slot, &desc.vid, &desc.pid,
26 &desc.cport, &desc.ip_devices, &desc.op_devices);
27
28 if (num != 7)
29 return -EINVAL;
30
31 num = gb_audio_manager_add(&desc);
32 if (num < 0)
33 return -EINVAL;
34
35 return count;
36}
37
38static struct kobj_attribute manager_add_attribute =
39 __ATTR(add, 0664, NULL, manager_sysfs_add_store);
40
41static ssize_t manager_sysfs_remove_store(
42 struct kobject *kobj, struct kobj_attribute *attr,
43 const char *buf, size_t count)
44{
45 int id;
46
47 int num = sscanf(buf, "%d", &id);
48
49 if (num != 1)
50 return -EINVAL;
51
52 num = gb_audio_manager_remove(id);
53 if (num)
54 return num;
55
56 return count;
57}
58
59static struct kobj_attribute manager_remove_attribute =
60 __ATTR(remove, 0664, NULL, manager_sysfs_remove_store);
61
62static ssize_t manager_sysfs_dump_store(
63 struct kobject *kobj, struct kobj_attribute *attr,
64 const char *buf, size_t count)
65{
66 int id;
67
68 int num = sscanf(buf, "%d", &id);
69
70 if (num == 1) {
71 num = gb_audio_manager_dump_module(id);
72 if (num)
73 return num;
74 } else if (!strncmp("all", buf, 3))
75 gb_audio_manager_dump_all();
76 else
77 return -EINVAL;
78
79 return count;
80}
81
82static struct kobj_attribute manager_dump_attribute =
83 __ATTR(dump, 0664, NULL, manager_sysfs_dump_store);
84
85static void manager_sysfs_init_attribute(
86 struct kobject *kobj, struct kobj_attribute *kattr)
87{
88 int err;
89
90 err = sysfs_create_file(kobj, &kattr->attr);
91 if (err) {
92 pr_warn("creating the sysfs entry for %s failed: %d\n",
93 kattr->attr.name, err);
94 }
95}
96
97void gb_audio_manager_sysfs_init(struct kobject *kobj)
98{
99 manager_sysfs_init_attribute(kobj, &manager_add_attribute);
100 manager_sysfs_init_attribute(kobj, &manager_remove_attribute);
101 manager_sysfs_init_attribute(kobj, &manager_dump_attribute);
102}
diff --git a/drivers/staging/greybus/audio_module.c b/drivers/staging/greybus/audio_module.c
new file mode 100644
index 000000000000..ae1c0fa85752
--- /dev/null
+++ b/drivers/staging/greybus/audio_module.c
@@ -0,0 +1,482 @@
1/*
2 * Greybus audio driver
3 * Copyright 2015 Google Inc.
4 * Copyright 2015 Linaro Ltd.
5 *
6 * Released under the GPLv2 only.
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <sound/soc.h>
11#include <sound/pcm_params.h>
12
13#include "audio_codec.h"
14#include "audio_apbridgea.h"
15#include "audio_manager.h"
16
17/*
18 * gb_snd management functions
19 */
20
21static int gbaudio_request_jack(struct gbaudio_module_info *module,
22 struct gb_audio_jack_event_request *req)
23{
24 int report;
25 struct snd_jack *jack = module->headset_jack.jack;
26 struct snd_jack *btn_jack = module->button_jack.jack;
27
28 if (!jack) {
29 dev_err_ratelimited(module->dev,
30 "Invalid jack event received:type: %u, event: %u\n",
31 req->jack_attribute, req->event);
32 return -EINVAL;
33 }
34
35 dev_warn_ratelimited(module->dev,
36 "Jack Event received: type: %u, event: %u\n",
37 req->jack_attribute, req->event);
38
39 if (req->event == GB_AUDIO_JACK_EVENT_REMOVAL) {
40 module->jack_type = 0;
41 if (btn_jack && module->button_status) {
42 snd_soc_jack_report(&module->button_jack, 0,
43 module->button_mask);
44 module->button_status = 0;
45 }
46 snd_soc_jack_report(&module->headset_jack, 0,
47 module->jack_mask);
48 return 0;
49 }
50
51 report = req->jack_attribute & module->jack_mask;
52 if (!report) {
53 dev_err_ratelimited(module->dev,
54 "Invalid jack event received:type: %u, event: %u\n",
55 req->jack_attribute, req->event);
56 return -EINVAL;
57 }
58
59 if (module->jack_type)
60 dev_warn_ratelimited(module->dev,
61 "Modifying jack from %d to %d\n",
62 module->jack_type, report);
63
64 module->jack_type = report;
65 snd_soc_jack_report(&module->headset_jack, report, module->jack_mask);
66
67 return 0;
68}
69
70static int gbaudio_request_button(struct gbaudio_module_info *module,
71 struct gb_audio_button_event_request *req)
72{
73 int soc_button_id, report;
74 struct snd_jack *btn_jack = module->button_jack.jack;
75
76 if (!btn_jack) {
77 dev_err_ratelimited(module->dev,
78 "Invalid button event received:type: %u, event: %u\n",
79 req->button_id, req->event);
80 return -EINVAL;
81 }
82
83 dev_warn_ratelimited(module->dev,
84 "Button Event received: id: %u, event: %u\n",
85 req->button_id, req->event);
86
87 /* currently supports 4 buttons only */
88 if (!module->jack_type) {
89 dev_err_ratelimited(module->dev,
90 "Jack not present. Bogus event!!\n");
91 return -EINVAL;
92 }
93
94 report = module->button_status & module->button_mask;
95 soc_button_id = 0;
96
97 switch (req->button_id) {
98 case 1:
99 soc_button_id = SND_JACK_BTN_0 & module->button_mask;
100 break;
101
102 case 2:
103 soc_button_id = SND_JACK_BTN_1 & module->button_mask;
104 break;
105
106 case 3:
107 soc_button_id = SND_JACK_BTN_2 & module->button_mask;
108 break;
109
110 case 4:
111 soc_button_id = SND_JACK_BTN_3 & module->button_mask;
112 break;
113 }
114
115 if (!soc_button_id) {
116 dev_err_ratelimited(module->dev,
117 "Invalid button request received\n");
118 return -EINVAL;
119 }
120
121 if (req->event == GB_AUDIO_BUTTON_EVENT_PRESS)
122 report = report | soc_button_id;
123 else
124 report = report & ~soc_button_id;
125
126 module->button_status = report;
127
128 snd_soc_jack_report(&module->button_jack, report, module->button_mask);
129
130 return 0;
131}
132
133static int gbaudio_request_stream(struct gbaudio_module_info *module,
134 struct gb_audio_streaming_event_request *req)
135{
136 dev_warn(module->dev, "Audio Event received: cport: %u, event: %u\n",
137 req->data_cport, req->event);
138
139 return 0;
140}
141
142static int gbaudio_codec_request_handler(struct gb_operation *op)
143{
144 struct gb_connection *connection = op->connection;
145 struct gbaudio_module_info *module =
146 greybus_get_drvdata(connection->bundle);
147 struct gb_operation_msg_hdr *header = op->request->header;
148 struct gb_audio_streaming_event_request *stream_req;
149 struct gb_audio_jack_event_request *jack_req;
150 struct gb_audio_button_event_request *button_req;
151 int ret;
152
153 switch (header->type) {
154 case GB_AUDIO_TYPE_STREAMING_EVENT:
155 stream_req = op->request->payload;
156 ret = gbaudio_request_stream(module, stream_req);
157 break;
158
159 case GB_AUDIO_TYPE_JACK_EVENT:
160 jack_req = op->request->payload;
161 ret = gbaudio_request_jack(module, jack_req);
162 break;
163
164 case GB_AUDIO_TYPE_BUTTON_EVENT:
165 button_req = op->request->payload;
166 ret = gbaudio_request_button(module, button_req);
167 break;
168
169 default:
170 dev_err_ratelimited(&connection->bundle->dev,
171 "Invalid Audio Event received\n");
172 return -EINVAL;
173 }
174
175 return ret;
176}
177
178static int gb_audio_add_mgmt_connection(struct gbaudio_module_info *gbmodule,
179 struct greybus_descriptor_cport *cport_desc,
180 struct gb_bundle *bundle)
181{
182 struct gb_connection *connection;
183
184 /* Management Cport */
185 if (gbmodule->mgmt_connection) {
186 dev_err(&bundle->dev,
187 "Can't have multiple Management connections\n");
188 return -ENODEV;
189 }
190
191 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
192 gbaudio_codec_request_handler);
193 if (IS_ERR(connection))
194 return PTR_ERR(connection);
195
196 greybus_set_drvdata(bundle, gbmodule);
197 gbmodule->mgmt_connection = connection;
198
199 return 0;
200}
201
202static int gb_audio_add_data_connection(struct gbaudio_module_info *gbmodule,
203 struct greybus_descriptor_cport *cport_desc,
204 struct gb_bundle *bundle)
205{
206 struct gb_connection *connection;
207 struct gbaudio_data_connection *dai;
208
209 dai = devm_kzalloc(gbmodule->dev, sizeof(*dai), GFP_KERNEL);
210 if (!dai) {
211 dev_err(gbmodule->dev, "DAI Malloc failure\n");
212 return -ENOMEM;
213 }
214
215 connection = gb_connection_create_offloaded(bundle,
216 le16_to_cpu(cport_desc->id),
217 GB_CONNECTION_FLAG_CSD);
218 if (IS_ERR(connection)) {
219 devm_kfree(gbmodule->dev, dai);
220 return PTR_ERR(connection);
221 }
222
223 greybus_set_drvdata(bundle, gbmodule);
224 dai->id = 0;
225 dai->data_cport = connection->intf_cport_id;
226 dai->connection = connection;
227 list_add(&dai->list, &gbmodule->data_list);
228
229 return 0;
230}
231
232/*
233 * This is the basic hook get things initialized and registered w/ gb
234 */
235
236static int gb_audio_probe(struct gb_bundle *bundle,
237 const struct greybus_bundle_id *id)
238{
239 struct device *dev = &bundle->dev;
240 struct gbaudio_module_info *gbmodule;
241 struct greybus_descriptor_cport *cport_desc;
242 struct gb_audio_manager_module_descriptor desc;
243 struct gbaudio_data_connection *dai, *_dai;
244 int ret, i;
245 struct gb_audio_topology *topology;
246
247 /* There should be at least one Management and one Data cport */
248 if (bundle->num_cports < 2)
249 return -ENODEV;
250
251 /*
252 * There can be only one Management connection and any number of data
253 * connections.
254 */
255 gbmodule = devm_kzalloc(dev, sizeof(*gbmodule), GFP_KERNEL);
256 if (!gbmodule)
257 return -ENOMEM;
258
259 gbmodule->num_data_connections = bundle->num_cports - 1;
260 INIT_LIST_HEAD(&gbmodule->data_list);
261 INIT_LIST_HEAD(&gbmodule->widget_list);
262 INIT_LIST_HEAD(&gbmodule->ctl_list);
263 INIT_LIST_HEAD(&gbmodule->widget_ctl_list);
264 gbmodule->dev = dev;
265 snprintf(gbmodule->name, NAME_SIZE, "%s.%s", dev->driver->name,
266 dev_name(dev));
267 greybus_set_drvdata(bundle, gbmodule);
268
269 /* Create all connections */
270 for (i = 0; i < bundle->num_cports; i++) {
271 cport_desc = &bundle->cport_desc[i];
272
273 switch (cport_desc->protocol_id) {
274 case GREYBUS_PROTOCOL_AUDIO_MGMT:
275 ret = gb_audio_add_mgmt_connection(gbmodule, cport_desc,
276 bundle);
277 if (ret)
278 goto destroy_connections;
279 break;
280 case GREYBUS_PROTOCOL_AUDIO_DATA:
281 ret = gb_audio_add_data_connection(gbmodule, cport_desc,
282 bundle);
283 if (ret)
284 goto destroy_connections;
285 break;
286 default:
287 dev_err(dev, "Unsupported protocol: 0x%02x\n",
288 cport_desc->protocol_id);
289 ret = -ENODEV;
290 goto destroy_connections;
291 }
292 }
293
294 /* There must be a management cport */
295 if (!gbmodule->mgmt_connection) {
296 ret = -EINVAL;
297 dev_err(dev, "Missing management connection\n");
298 goto destroy_connections;
299 }
300
301 /* Initialize management connection */
302 ret = gb_connection_enable(gbmodule->mgmt_connection);
303 if (ret) {
304 dev_err(dev, "%d: Error while enabling mgmt connection\n", ret);
305 goto destroy_connections;
306 }
307 gbmodule->dev_id = gbmodule->mgmt_connection->intf->interface_id;
308
309 /*
310 * FIXME: malloc for topology happens via audio_gb driver
311 * should be done within codec driver itself
312 */
313 ret = gb_audio_gb_get_topology(gbmodule->mgmt_connection, &topology);
314 if (ret) {
315 dev_err(dev, "%d:Error while fetching topology\n", ret);
316 goto disable_connection;
317 }
318
319 /* process topology data */
320 ret = gbaudio_tplg_parse_data(gbmodule, topology);
321 if (ret) {
322 dev_err(dev, "%d:Error while parsing topology data\n",
323 ret);
324 goto free_topology;
325 }
326 gbmodule->topology = topology;
327
328 /* Initialize data connections */
329 list_for_each_entry(dai, &gbmodule->data_list, list) {
330 ret = gb_connection_enable(dai->connection);
331 if (ret) {
332 dev_err(dev,
333 "%d:Error while enabling %d:data connection\n",
334 ret, dai->data_cport);
335 goto disable_data_connection;
336 }
337 }
338
339 /* register module with gbcodec */
340 ret = gbaudio_register_module(gbmodule);
341 if (ret)
342 goto disable_data_connection;
343
344 /* inform above layer for uevent */
345 dev_dbg(dev, "Inform set_event:%d to above layer\n", 1);
346 /* prepare for the audio manager */
347 strlcpy(desc.name, gbmodule->name, GB_AUDIO_MANAGER_MODULE_NAME_LEN);
348 desc.slot = 1; /* todo */
349 desc.vid = 2; /* todo */
350 desc.pid = 3; /* todo */
351 desc.cport = gbmodule->dev_id;
352 desc.op_devices = gbmodule->op_devices;
353 desc.ip_devices = gbmodule->ip_devices;
354 gbmodule->manager_id = gb_audio_manager_add(&desc);
355
356 dev_dbg(dev, "Add GB Audio device:%s\n", gbmodule->name);
357
358 gb_pm_runtime_put_autosuspend(bundle);
359
360 return 0;
361
362disable_data_connection:
363 list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list)
364 gb_connection_disable(dai->connection);
365 gbaudio_tplg_release(gbmodule);
366 gbmodule->topology = NULL;
367
368free_topology:
369 kfree(topology);
370
371disable_connection:
372 gb_connection_disable(gbmodule->mgmt_connection);
373
374destroy_connections:
375 list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
376 gb_connection_destroy(dai->connection);
377 list_del(&dai->list);
378 devm_kfree(dev, dai);
379 }
380
381 if (gbmodule->mgmt_connection)
382 gb_connection_destroy(gbmodule->mgmt_connection);
383
384 devm_kfree(dev, gbmodule);
385
386 return ret;
387}
388
389static void gb_audio_disconnect(struct gb_bundle *bundle)
390{
391 struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
392 struct gbaudio_data_connection *dai, *_dai;
393
394 gb_pm_runtime_get_sync(bundle);
395
396 /* cleanup module related resources first */
397 gbaudio_unregister_module(gbmodule);
398
399 /* inform uevent to above layers */
400 gb_audio_manager_remove(gbmodule->manager_id);
401
402 gbaudio_tplg_release(gbmodule);
403 kfree(gbmodule->topology);
404 gbmodule->topology = NULL;
405 gb_connection_disable(gbmodule->mgmt_connection);
406 list_for_each_entry_safe(dai, _dai, &gbmodule->data_list, list) {
407 gb_connection_disable(dai->connection);
408 gb_connection_destroy(dai->connection);
409 list_del(&dai->list);
410 devm_kfree(gbmodule->dev, dai);
411 }
412 gb_connection_destroy(gbmodule->mgmt_connection);
413 gbmodule->mgmt_connection = NULL;
414
415 devm_kfree(&bundle->dev, gbmodule);
416}
417
418static const struct greybus_bundle_id gb_audio_id_table[] = {
419 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_AUDIO) },
420 { }
421};
422MODULE_DEVICE_TABLE(greybus, gb_audio_id_table);
423
424#ifdef CONFIG_PM
425static int gb_audio_suspend(struct device *dev)
426{
427 struct gb_bundle *bundle = to_gb_bundle(dev);
428 struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
429 struct gbaudio_data_connection *dai;
430
431 list_for_each_entry(dai, &gbmodule->data_list, list)
432 gb_connection_disable(dai->connection);
433
434 gb_connection_disable(gbmodule->mgmt_connection);
435
436 return 0;
437}
438
439static int gb_audio_resume(struct device *dev)
440{
441 struct gb_bundle *bundle = to_gb_bundle(dev);
442 struct gbaudio_module_info *gbmodule = greybus_get_drvdata(bundle);
443 struct gbaudio_data_connection *dai;
444 int ret;
445
446 ret = gb_connection_enable(gbmodule->mgmt_connection);
447 if (ret) {
448 dev_err(dev, "%d:Error while enabling mgmt connection\n", ret);
449 return ret;
450 }
451
452 list_for_each_entry(dai, &gbmodule->data_list, list) {
453 ret = gb_connection_enable(dai->connection);
454 if (ret) {
455 dev_err(dev,
456 "%d:Error while enabling %d:data connection\n",
457 ret, dai->data_cport);
458 return ret;
459 }
460 }
461
462 return 0;
463}
464#endif
465
466static const struct dev_pm_ops gb_audio_pm_ops = {
467 SET_RUNTIME_PM_OPS(gb_audio_suspend, gb_audio_resume, NULL)
468};
469
470static struct greybus_driver gb_audio_driver = {
471 .name = "gb-audio",
472 .probe = gb_audio_probe,
473 .disconnect = gb_audio_disconnect,
474 .id_table = gb_audio_id_table,
475 .driver.pm = &gb_audio_pm_ops,
476};
477module_greybus_driver(gb_audio_driver);
478
479MODULE_DESCRIPTION("Greybus Audio module driver");
480MODULE_AUTHOR("Vaibhav Agarwal <vaibhav.agarwal@linaro.org>");
481MODULE_LICENSE("GPL v2");
482MODULE_ALIAS("platform:gbaudio-module");
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
new file mode 100644
index 000000000000..5eef5367896c
--- /dev/null
+++ b/drivers/staging/greybus/audio_topology.c
@@ -0,0 +1,1442 @@
1/*
2 * Greybus audio driver
3 * Copyright 2015-2016 Google Inc.
4 * Copyright 2015-2016 Linaro Ltd.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include "audio_codec.h"
10#include "greybus_protocols.h"
11
12#define GBAUDIO_INVALID_ID 0xFF
13
14/* mixer control */
15struct gb_mixer_control {
16 int min, max;
17 unsigned int reg, rreg, shift, rshift, invert;
18};
19
20struct gbaudio_ctl_pvt {
21 unsigned int ctl_id;
22 unsigned int data_cport;
23 unsigned int access;
24 unsigned int vcount;
25 struct gb_audio_ctl_elem_info *info;
26};
27
28static struct gbaudio_module_info *find_gb_module(
29 struct gbaudio_codec_info *codec,
30 char const *name)
31{
32 int dev_id, ret;
33 char begin[NAME_SIZE];
34 struct gbaudio_module_info *module;
35
36 if (!name)
37 return NULL;
38
39 ret = sscanf(name, "%s %d", begin, &dev_id);
40 dev_dbg(codec->dev, "%s:Find module#%d\n", __func__, dev_id);
41
42 mutex_lock(&codec->lock);
43 list_for_each_entry(module, &codec->module_list, list) {
44 if (module->dev_id == dev_id) {
45 mutex_unlock(&codec->lock);
46 return module;
47 }
48 }
49 mutex_unlock(&codec->lock);
50 dev_warn(codec->dev, "%s: module#%d missing in codec list\n", name,
51 dev_id);
52 return NULL;
53}
54
55static const char *gbaudio_map_controlid(struct gbaudio_module_info *module,
56 __u8 control_id, __u8 index)
57{
58 struct gbaudio_control *control;
59
60 if (control_id == GBAUDIO_INVALID_ID)
61 return NULL;
62
63 list_for_each_entry(control, &module->ctl_list, list) {
64 if (control->id == control_id) {
65 if (index == GBAUDIO_INVALID_ID)
66 return control->name;
67 if (index >= control->items)
68 return NULL;
69 return control->texts[index];
70 }
71 }
72 list_for_each_entry(control, &module->widget_ctl_list, list) {
73 if (control->id == control_id) {
74 if (index == GBAUDIO_INVALID_ID)
75 return control->name;
76 if (index >= control->items)
77 return NULL;
78 return control->texts[index];
79 }
80 }
81 return NULL;
82}
83
84static int gbaudio_map_controlname(struct gbaudio_module_info *module,
85 const char *name)
86{
87 struct gbaudio_control *control;
88
89 list_for_each_entry(control, &module->ctl_list, list) {
90 if (!strncmp(control->name, name, NAME_SIZE))
91 return control->id;
92 }
93
94 dev_warn(module->dev, "%s: missing in modules controls list\n", name);
95
96 return -EINVAL;
97}
98
99static int gbaudio_map_wcontrolname(struct gbaudio_module_info *module,
100 const char *name)
101{
102 struct gbaudio_control *control;
103
104 list_for_each_entry(control, &module->widget_ctl_list, list) {
105 if (!strncmp(control->wname, name, NAME_SIZE))
106 return control->id;
107 }
108 dev_warn(module->dev, "%s: missing in modules controls list\n", name);
109
110 return -EINVAL;
111}
112
113static int gbaudio_map_widgetname(struct gbaudio_module_info *module,
114 const char *name)
115{
116 struct gbaudio_widget *widget;
117 list_for_each_entry(widget, &module->widget_list, list) {
118 if (!strncmp(widget->name, name, NAME_SIZE))
119 return widget->id;
120 }
121 dev_warn(module->dev, "%s: missing in modules widgets list\n", name);
122
123 return -EINVAL;
124}
125
126static const char *gbaudio_map_widgetid(struct gbaudio_module_info *module,
127 __u8 widget_id)
128{
129 struct gbaudio_widget *widget;
130
131 list_for_each_entry(widget, &module->widget_list, list) {
132 if (widget->id == widget_id)
133 return widget->name;
134 }
135 return NULL;
136}
137
138static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb,
139 struct gb_audio_enumerated *gbenum)
140{
141 const char **strings;
142 int i;
143 __u8 *data;
144
145 strings = devm_kzalloc(gb->dev, sizeof(char *) * gbenum->items,
146 GFP_KERNEL);
147 data = gbenum->names;
148
149 for (i = 0; i < gbenum->items; i++) {
150 strings[i] = (const char *)data;
151 while (*data != '\0')
152 data++;
153 data++;
154 }
155
156 return strings;
157}
158
159static int gbcodec_mixer_ctl_info(struct snd_kcontrol *kcontrol,
160 struct snd_ctl_elem_info *uinfo)
161{
162 unsigned int max;
163 const char *name;
164 struct gbaudio_ctl_pvt *data;
165 struct gb_audio_ctl_elem_info *info;
166 struct gbaudio_module_info *module;
167 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
168 struct gbaudio_codec_info *gbcodec = snd_soc_codec_get_drvdata(codec);
169
170 dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
171 data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
172 info = (struct gb_audio_ctl_elem_info *)data->info;
173
174 if (!info) {
175 dev_err(module->dev, "NULL info for %s\n", uinfo->id.name);
176 return -EINVAL;
177 }
178
179 /* update uinfo */
180 uinfo->access = data->access;
181 uinfo->count = data->vcount;
182 uinfo->type = (snd_ctl_elem_type_t)info->type;
183
184 switch (info->type) {
185 case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
186 case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
187 uinfo->value.integer.min = info->value.integer.min;
188 uinfo->value.integer.max = info->value.integer.max;
189 break;
190 case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
191 max = info->value.enumerated.items;
192 uinfo->value.enumerated.items = max;
193 if (uinfo->value.enumerated.item > max - 1)
194 uinfo->value.enumerated.item = max - 1;
195 module = find_gb_module(gbcodec, kcontrol->id.name);
196 if (!module)
197 return -EINVAL;
198 name = gbaudio_map_controlid(module, data->ctl_id,
199 uinfo->value.enumerated.item);
200 strlcpy(uinfo->value.enumerated.name, name, NAME_SIZE);
201 break;
202 default:
203 dev_err(codec->dev, "Invalid type: %d for %s:kcontrol\n",
204 info->type, kcontrol->id.name);
205 break;
206 }
207 return 0;
208}
209
210static int gbcodec_mixer_ctl_get(struct snd_kcontrol *kcontrol,
211 struct snd_ctl_elem_value *ucontrol)
212{
213 int ret;
214 struct gb_audio_ctl_elem_info *info;
215 struct gbaudio_ctl_pvt *data;
216 struct gb_audio_ctl_elem_value gbvalue;
217 struct gbaudio_module_info *module;
218 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
219 struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
220 struct gb_bundle *bundle;
221
222 dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
223 module = find_gb_module(gb, kcontrol->id.name);
224 if (!module)
225 return -EINVAL;
226
227 data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
228 info = (struct gb_audio_ctl_elem_info *)data->info;
229 bundle = to_gb_bundle(module->dev);
230
231 ret = gb_pm_runtime_get_sync(bundle);
232 if (ret)
233 return ret;
234
235 ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
236 GB_AUDIO_INVALID_INDEX, &gbvalue);
237
238 gb_pm_runtime_put_autosuspend(bundle);
239
240 if (ret) {
241 dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
242 __func__, kcontrol->id.name);
243 return ret;
244 }
245
246 /* update ucontrol */
247 switch (info->type) {
248 case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
249 case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
250 ucontrol->value.integer.value[0] =
251 gbvalue.value.integer_value[0];
252 if (data->vcount == 2)
253 ucontrol->value.integer.value[1] =
254 gbvalue.value.integer_value[1];
255 break;
256 case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
257 ucontrol->value.enumerated.item[0] =
258 gbvalue.value.enumerated_item[0];
259 if (data->vcount == 2)
260 ucontrol->value.enumerated.item[1] =
261 gbvalue.value.enumerated_item[1];
262 break;
263 default:
264 dev_err(codec->dev, "Invalid type: %d for %s:kcontrol\n",
265 info->type, kcontrol->id.name);
266 ret = -EINVAL;
267 break;
268 }
269 return ret;
270}
271
272static int gbcodec_mixer_ctl_put(struct snd_kcontrol *kcontrol,
273 struct snd_ctl_elem_value *ucontrol)
274{
275 int ret = 0;
276 struct gb_audio_ctl_elem_info *info;
277 struct gbaudio_ctl_pvt *data;
278 struct gb_audio_ctl_elem_value gbvalue;
279 struct gbaudio_module_info *module;
280 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
281 struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
282 struct gb_bundle *bundle;
283
284 dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
285 module = find_gb_module(gb, kcontrol->id.name);
286 if (!module)
287 return -EINVAL;
288
289 data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
290 info = (struct gb_audio_ctl_elem_info *)data->info;
291 bundle = to_gb_bundle(module->dev);
292
293 /* update ucontrol */
294 switch (info->type) {
295 case GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN:
296 case GB_AUDIO_CTL_ELEM_TYPE_INTEGER:
297 gbvalue.value.integer_value[0] =
298 ucontrol->value.integer.value[0];
299 if (data->vcount == 2)
300 gbvalue.value.integer_value[1] =
301 ucontrol->value.integer.value[1];
302 break;
303 case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
304 gbvalue.value.enumerated_item[0] =
305 ucontrol->value.enumerated.item[0];
306 if (data->vcount == 2)
307 gbvalue.value.enumerated_item[1] =
308 ucontrol->value.enumerated.item[1];
309 break;
310 default:
311 dev_err(codec->dev, "Invalid type: %d for %s:kcontrol\n",
312 info->type, kcontrol->id.name);
313 ret = -EINVAL;
314 break;
315 }
316
317 if (ret)
318 return ret;
319
320 ret = gb_pm_runtime_get_sync(bundle);
321 if (ret)
322 return ret;
323
324 ret = gb_audio_gb_set_control(module->mgmt_connection, data->ctl_id,
325 GB_AUDIO_INVALID_INDEX, &gbvalue);
326
327 gb_pm_runtime_put_autosuspend(bundle);
328
329 if (ret) {
330 dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
331 __func__, kcontrol->id.name);
332 }
333
334 return ret;
335}
336
337#define SOC_MIXER_GB(xname, kcount, data) \
338{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
339 .count = kcount, .info = gbcodec_mixer_ctl_info, \
340 .get = gbcodec_mixer_ctl_get, .put = gbcodec_mixer_ctl_put, \
341 .private_value = (unsigned long)data }
342
343/*
344 * although below callback functions seems redundant to above functions.
345 * same are kept to allow provision for different handling in case
346 * of DAPM related sequencing, etc.
347 */
348static int gbcodec_mixer_dapm_ctl_info(struct snd_kcontrol *kcontrol,
349 struct snd_ctl_elem_info *uinfo)
350{
351 int platform_max, platform_min;
352 struct gbaudio_ctl_pvt *data;
353 struct gb_audio_ctl_elem_info *info;
354 struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
355 struct snd_soc_dapm_widget *widget = wlist->widgets[0];
356 struct snd_soc_codec *codec = widget->codec;
357
358 dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
359 data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
360 info = (struct gb_audio_ctl_elem_info *)data->info;
361
362 /* update uinfo */
363 platform_max = info->value.integer.max;
364 platform_min = info->value.integer.min;
365
366 if (platform_max == 1 &&
367 !strnstr(kcontrol->id.name, " Volume", NAME_SIZE))
368 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
369 else
370 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
371
372 uinfo->count = data->vcount;
373 uinfo->value.integer.min = 0;
374 if (info->value.integer.min < 0 &&
375 (uinfo->type == SNDRV_CTL_ELEM_TYPE_INTEGER))
376 uinfo->value.integer.max = platform_max - platform_min;
377 else
378 uinfo->value.integer.max = platform_max;
379
380 return 0;
381}
382
383static int gbcodec_mixer_dapm_ctl_get(struct snd_kcontrol *kcontrol,
384 struct snd_ctl_elem_value *ucontrol)
385{
386 int ret;
387 struct gb_audio_ctl_elem_info *info;
388 struct gbaudio_ctl_pvt *data;
389 struct gb_audio_ctl_elem_value gbvalue;
390 struct gbaudio_module_info *module;
391 struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
392 struct snd_soc_dapm_widget *widget = wlist->widgets[0];
393 struct snd_soc_codec *codec = widget->codec;
394 struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
395 struct gb_bundle *bundle;
396
397 dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
398 module = find_gb_module(gb, kcontrol->id.name);
399 if (!module)
400 return -EINVAL;
401
402 data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
403 info = (struct gb_audio_ctl_elem_info *)data->info;
404 bundle = to_gb_bundle(module->dev);
405
406 if (data->vcount == 2)
407 dev_warn(widget->dapm->dev,
408 "GB: Control '%s' is stereo, which is not supported\n",
409 kcontrol->id.name);
410
411 ret = gb_pm_runtime_get_sync(bundle);
412 if (ret)
413 return ret;
414
415 ret = gb_audio_gb_get_control(module->mgmt_connection, data->ctl_id,
416 GB_AUDIO_INVALID_INDEX, &gbvalue);
417
418 gb_pm_runtime_put_autosuspend(bundle);
419
420 if (ret) {
421 dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
422 __func__, kcontrol->id.name);
423 return ret;
424 }
425 /* update ucontrol */
426 ucontrol->value.integer.value[0] = gbvalue.value.integer_value[0];
427
428 return ret;
429}
430
431static int gbcodec_mixer_dapm_ctl_put(struct snd_kcontrol *kcontrol,
432 struct snd_ctl_elem_value *ucontrol)
433{
434 int ret, wi, max, connect;
435 unsigned int mask, val;
436 struct gb_audio_ctl_elem_info *info;
437 struct gbaudio_ctl_pvt *data;
438 struct gb_audio_ctl_elem_value gbvalue;
439 struct gbaudio_module_info *module;
440 struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
441 struct snd_soc_dapm_widget *widget = wlist->widgets[0];
442 struct snd_soc_codec *codec = widget->codec;
443 struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
444 struct gb_bundle *bundle;
445
446 dev_dbg(codec->dev, "Entered %s:%s\n", __func__, kcontrol->id.name);
447 module = find_gb_module(gb, kcontrol->id.name);
448 if (!module)
449 return -EINVAL;
450
451 data = (struct gbaudio_ctl_pvt *)kcontrol->private_value;
452 info = (struct gb_audio_ctl_elem_info *)data->info;
453 bundle = to_gb_bundle(module->dev);
454
455 if (data->vcount == 2)
456 dev_warn(widget->dapm->dev,
457 "GB: Control '%s' is stereo, which is not supported\n",
458 kcontrol->id.name);
459
460 max = info->value.integer.max;
461 mask = (1 << fls(max)) - 1;
462 val = (ucontrol->value.integer.value[0] & mask);
463 connect = !!val;
464
465 /* update ucontrol */
466 if (gbvalue.value.integer_value[0] != val) {
467 for (wi = 0; wi < wlist->num_widgets; wi++) {
468 widget = wlist->widgets[wi];
469
470 widget->value = val;
471 widget->dapm->update = NULL;
472 snd_soc_dapm_mixer_update_power(widget, kcontrol,
473 connect);
474 }
475 gbvalue.value.integer_value[0] =
476 ucontrol->value.integer.value[0];
477
478 ret = gb_pm_runtime_get_sync(bundle);
479 if (ret)
480 return ret;
481
482 ret = gb_audio_gb_set_control(module->mgmt_connection,
483 data->ctl_id,
484 GB_AUDIO_INVALID_INDEX, &gbvalue);
485
486 gb_pm_runtime_put_autosuspend(bundle);
487
488 if (ret) {
489 dev_err_ratelimited(codec->dev,
490 "%d:Error in %s for %s\n", ret,
491 __func__, kcontrol->id.name);
492 }
493 }
494
495 return ret;
496}
497
498#define SOC_DAPM_MIXER_GB(xname, kcount, data) \
499{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
500 .count = kcount, .info = gbcodec_mixer_dapm_ctl_info, \
501 .get = gbcodec_mixer_dapm_ctl_get, .put = gbcodec_mixer_dapm_ctl_put, \
502 .private_value = (unsigned long)data}
503
504static int gbcodec_event_spk(struct snd_soc_dapm_widget *w,
505 struct snd_kcontrol *k, int event)
506{
507 /* Ensure GB speaker is connected */
508
509 return 0;
510}
511
512static int gbcodec_event_hp(struct snd_soc_dapm_widget *w,
513 struct snd_kcontrol *k, int event)
514{
515 /* Ensure GB module supports jack slot */
516
517 return 0;
518}
519
520static int gbcodec_event_int_mic(struct snd_soc_dapm_widget *w,
521 struct snd_kcontrol *k, int event)
522{
523 /* Ensure GB module supports jack slot */
524
525 return 0;
526}
527
528static int gbaudio_validate_kcontrol_count(struct gb_audio_widget *w)
529{
530 int ret = 0;
531
532 switch (w->type) {
533 case snd_soc_dapm_spk:
534 case snd_soc_dapm_hp:
535 case snd_soc_dapm_mic:
536 case snd_soc_dapm_output:
537 case snd_soc_dapm_input:
538 if (w->ncontrols)
539 ret = -EINVAL;
540 break;
541 case snd_soc_dapm_switch:
542 case snd_soc_dapm_mux:
543 if (w->ncontrols != 1)
544 ret = -EINVAL;
545 break;
546 default:
547 break;
548 }
549
550 return ret;
551}
552
553static int gbcodec_enum_ctl_get(struct snd_kcontrol *kcontrol,
554 struct snd_ctl_elem_value *ucontrol)
555{
556 int ret, ctl_id;
557 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
558 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
559 struct gb_audio_ctl_elem_value gbvalue;
560 struct gbaudio_module_info *module;
561 struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
562 struct gb_bundle *bundle;
563
564 module = find_gb_module(gb, kcontrol->id.name);
565 if (!module)
566 return -EINVAL;
567
568 ctl_id = gbaudio_map_controlname(module, kcontrol->id.name);
569 if (ctl_id < 0)
570 return -EINVAL;
571
572 bundle = to_gb_bundle(module->dev);
573
574 ret = gb_pm_runtime_get_sync(bundle);
575 if (ret)
576 return ret;
577
578 ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
579 GB_AUDIO_INVALID_INDEX, &gbvalue);
580
581 gb_pm_runtime_put_autosuspend(bundle);
582
583 if (ret) {
584 dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
585 __func__, kcontrol->id.name);
586 return ret;
587 }
588
589 ucontrol->value.enumerated.item[0] = gbvalue.value.enumerated_item[0];
590 if (e->shift_l != e->shift_r)
591 ucontrol->value.enumerated.item[1] =
592 gbvalue.value.enumerated_item[1];
593
594 return 0;
595}
596
597static int gbcodec_enum_ctl_put(struct snd_kcontrol *kcontrol,
598 struct snd_ctl_elem_value *ucontrol)
599{
600 int ret, ctl_id;
601 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
602 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
603 struct gb_audio_ctl_elem_value gbvalue;
604 struct gbaudio_module_info *module;
605 struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
606 struct gb_bundle *bundle;
607
608 module = find_gb_module(gb, kcontrol->id.name);
609 if (!module)
610 return -EINVAL;
611
612 ctl_id = gbaudio_map_controlname(module, kcontrol->id.name);
613 if (ctl_id < 0)
614 return -EINVAL;
615
616 if (ucontrol->value.enumerated.item[0] > e->max - 1)
617 return -EINVAL;
618 gbvalue.value.enumerated_item[0] = ucontrol->value.enumerated.item[0];
619
620 if (e->shift_l != e->shift_r) {
621 if (ucontrol->value.enumerated.item[1] > e->max - 1)
622 return -EINVAL;
623 gbvalue.value.enumerated_item[1] =
624 ucontrol->value.enumerated.item[1];
625 }
626
627 bundle = to_gb_bundle(module->dev);
628
629 ret = gb_pm_runtime_get_sync(bundle);
630 if (ret)
631 return ret;
632
633 ret = gb_audio_gb_set_control(module->mgmt_connection, ctl_id,
634 GB_AUDIO_INVALID_INDEX, &gbvalue);
635
636 gb_pm_runtime_put_autosuspend(bundle);
637
638 if (ret) {
639 dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
640 __func__, kcontrol->id.name);
641 }
642
643 return ret;
644}
645
646static int gbaudio_tplg_create_enum_kctl(struct gbaudio_module_info *gb,
647 struct snd_kcontrol_new *kctl,
648 struct gb_audio_control *ctl)
649{
650 struct soc_enum *gbe;
651 struct gb_audio_enumerated *gb_enum;
652 int i;
653
654 gbe = devm_kzalloc(gb->dev, sizeof(*gbe), GFP_KERNEL);
655 if (!gbe)
656 return -ENOMEM;
657
658 gb_enum = &ctl->info.value.enumerated;
659
660 /* since count=1, and reg is dummy */
661 gbe->max = gb_enum->items;
662 gbe->texts = gb_generate_enum_strings(gb, gb_enum);
663
664 /* debug enum info */
665 dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gb_enum->items,
666 gb_enum->names_length);
667 for (i = 0; i < gb_enum->items; i++)
668 dev_dbg(gb->dev, "src[%d]: %s\n", i, gbe->texts[i]);
669
670 *kctl = (struct snd_kcontrol_new)
671 SOC_ENUM_EXT(ctl->name, *gbe, gbcodec_enum_ctl_get,
672 gbcodec_enum_ctl_put);
673 return 0;
674}
675
676static int gbaudio_tplg_create_kcontrol(struct gbaudio_module_info *gb,
677 struct snd_kcontrol_new *kctl,
678 struct gb_audio_control *ctl)
679{
680 int ret = 0;
681 struct gbaudio_ctl_pvt *ctldata;
682
683 switch (ctl->iface) {
684 case SNDRV_CTL_ELEM_IFACE_MIXER:
685 switch (ctl->info.type) {
686 case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
687 ret = gbaudio_tplg_create_enum_kctl(gb, kctl, ctl);
688 break;
689 default:
690 ctldata = devm_kzalloc(gb->dev,
691 sizeof(struct gbaudio_ctl_pvt),
692 GFP_KERNEL);
693 if (!ctldata)
694 return -ENOMEM;
695 ctldata->ctl_id = ctl->id;
696 ctldata->data_cport = ctl->data_cport;
697 ctldata->access = ctl->access;
698 ctldata->vcount = ctl->count_values;
699 ctldata->info = &ctl->info;
700 *kctl = (struct snd_kcontrol_new)
701 SOC_MIXER_GB(ctl->name, ctl->count, ctldata);
702 ctldata = NULL;
703 break;
704 }
705 break;
706 default:
707 return -EINVAL;
708 }
709
710 dev_dbg(gb->dev, "%s:%d control created\n", ctl->name, ctl->id);
711 return ret;
712}
713
714static int gbcodec_enum_dapm_ctl_get(struct snd_kcontrol *kcontrol,
715 struct snd_ctl_elem_value *ucontrol)
716{
717 int ret, ctl_id;
718 struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
719 struct snd_soc_dapm_widget *widget = wlist->widgets[0];
720 struct gbaudio_module_info *module;
721 struct gb_audio_ctl_elem_value gbvalue;
722 struct snd_soc_codec *codec = widget->codec;
723 struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
724 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
725 struct gb_bundle *bundle;
726
727 module = find_gb_module(gb, kcontrol->id.name);
728 if (!module)
729 return -EINVAL;
730
731 ctl_id = gbaudio_map_wcontrolname(module, kcontrol->id.name);
732 if (ctl_id < 0)
733 return -EINVAL;
734
735 bundle = to_gb_bundle(module->dev);
736
737 ret = gb_pm_runtime_get_sync(bundle);
738 if (ret)
739 return ret;
740
741 ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
742 GB_AUDIO_INVALID_INDEX, &gbvalue);
743
744 gb_pm_runtime_put_autosuspend(bundle);
745
746 if (ret) {
747 dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
748 __func__, kcontrol->id.name);
749 return ret;
750 }
751
752 ucontrol->value.enumerated.item[0] = gbvalue.value.enumerated_item[0];
753 if (e->shift_l != e->shift_r)
754 ucontrol->value.enumerated.item[1] =
755 gbvalue.value.enumerated_item[1];
756
757 return 0;
758}
759
760static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
761 struct snd_ctl_elem_value *ucontrol)
762{
763 int ret, wi, ctl_id;
764 unsigned int val, mux, change;
765 unsigned int mask;
766 struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
767 struct snd_soc_dapm_widget *widget = wlist->widgets[0];
768 struct gb_audio_ctl_elem_value gbvalue;
769 struct gbaudio_module_info *module;
770 struct snd_soc_codec *codec = widget->codec;
771 struct gbaudio_codec_info *gb = snd_soc_codec_get_drvdata(codec);
772 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
773 struct gb_bundle *bundle;
774
775 if (ucontrol->value.enumerated.item[0] > e->max - 1)
776 return -EINVAL;
777
778 module = find_gb_module(gb, kcontrol->id.name);
779 if (!module)
780 return -EINVAL;
781
782 ctl_id = gbaudio_map_wcontrolname(module, kcontrol->id.name);
783 if (ctl_id < 0)
784 return -EINVAL;
785
786 change = 0;
787 bundle = to_gb_bundle(module->dev);
788
789 ret = gb_pm_runtime_get_sync(bundle);
790 if (ret)
791 return ret;
792
793 ret = gb_audio_gb_get_control(module->mgmt_connection, ctl_id,
794 GB_AUDIO_INVALID_INDEX, &gbvalue);
795
796 gb_pm_runtime_put_autosuspend(bundle);
797
798 if (ret) {
799 dev_err_ratelimited(codec->dev, "%d:Error in %s for %s\n", ret,
800 __func__, kcontrol->id.name);
801 return ret;
802 }
803
804 mux = ucontrol->value.enumerated.item[0];
805 val = mux << e->shift_l;
806 mask = e->mask << e->shift_l;
807
808 if (gbvalue.value.enumerated_item[0] !=
809 ucontrol->value.enumerated.item[0]) {
810 change = 1;
811 gbvalue.value.enumerated_item[0] =
812 ucontrol->value.enumerated.item[0];
813 }
814
815 if (e->shift_l != e->shift_r) {
816 if (ucontrol->value.enumerated.item[1] > e->max - 1)
817 return -EINVAL;
818 val |= ucontrol->value.enumerated.item[1] << e->shift_r;
819 mask |= e->mask << e->shift_r;
820 if (gbvalue.value.enumerated_item[1] !=
821 ucontrol->value.enumerated.item[1]) {
822 change = 1;
823 gbvalue.value.enumerated_item[1] =
824 ucontrol->value.enumerated.item[1];
825 }
826 }
827
828 if (change) {
829 ret = gb_pm_runtime_get_sync(bundle);
830 if (ret)
831 return ret;
832
833 ret = gb_audio_gb_set_control(module->mgmt_connection, ctl_id,
834 GB_AUDIO_INVALID_INDEX, &gbvalue);
835
836 gb_pm_runtime_put_autosuspend(bundle);
837
838 if (ret) {
839 dev_err_ratelimited(codec->dev,
840 "%d:Error in %s for %s\n", ret,
841 __func__, kcontrol->id.name);
842 }
843 for (wi = 0; wi < wlist->num_widgets; wi++) {
844 widget = wlist->widgets[wi];
845
846 widget->value = val;
847 widget->dapm->update = NULL;
848 snd_soc_dapm_mux_update_power(widget, kcontrol, mux, e);
849 }
850 }
851
852 return change;
853}
854
855static int gbaudio_tplg_create_enum_ctl(struct gbaudio_module_info *gb,
856 struct snd_kcontrol_new *kctl,
857 struct gb_audio_control *ctl)
858{
859 struct soc_enum *gbe;
860 struct gb_audio_enumerated *gb_enum;
861 int i;
862
863 gbe = devm_kzalloc(gb->dev, sizeof(*gbe), GFP_KERNEL);
864 if (!gbe)
865 return -ENOMEM;
866
867 gb_enum = &ctl->info.value.enumerated;
868
869 /* since count=1, and reg is dummy */
870 gbe->max = gb_enum->items;
871 gbe->texts = gb_generate_enum_strings(gb, gb_enum);
872
873 /* debug enum info */
874 dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gb_enum->items,
875 gb_enum->names_length);
876 for (i = 0; i < gb_enum->items; i++)
877 dev_dbg(gb->dev, "src[%d]: %s\n", i, gbe->texts[i]);
878
879 *kctl = (struct snd_kcontrol_new)
880 SOC_DAPM_ENUM_EXT(ctl->name, *gbe, gbcodec_enum_dapm_ctl_get,
881 gbcodec_enum_dapm_ctl_put);
882 return 0;
883}
884
885static int gbaudio_tplg_create_mixer_ctl(struct gbaudio_module_info *gb,
886 struct snd_kcontrol_new *kctl,
887 struct gb_audio_control *ctl)
888{
889 struct gbaudio_ctl_pvt *ctldata;
890
891 ctldata = devm_kzalloc(gb->dev, sizeof(struct gbaudio_ctl_pvt),
892 GFP_KERNEL);
893 if (!ctldata)
894 return -ENOMEM;
895 ctldata->ctl_id = ctl->id;
896 ctldata->data_cport = ctl->data_cport;
897 ctldata->access = ctl->access;
898 ctldata->vcount = ctl->count_values;
899 ctldata->info = &ctl->info;
900 *kctl = (struct snd_kcontrol_new)
901 SOC_DAPM_MIXER_GB(ctl->name, ctl->count, ctldata);
902
903 return 0;
904}
905
906static int gbaudio_tplg_create_wcontrol(struct gbaudio_module_info *gb,
907 struct snd_kcontrol_new *kctl,
908 struct gb_audio_control *ctl)
909{
910 int ret;
911
912 switch (ctl->iface) {
913 case SNDRV_CTL_ELEM_IFACE_MIXER:
914 switch (ctl->info.type) {
915 case GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED:
916 ret = gbaudio_tplg_create_enum_ctl(gb, kctl, ctl);
917 break;
918 default:
919 ret = gbaudio_tplg_create_mixer_ctl(gb, kctl, ctl);
920 break;
921 }
922 break;
923 default:
924 return -EINVAL;
925
926 }
927
928 dev_dbg(gb->dev, "%s:%d DAPM control created, ret:%d\n", ctl->name,
929 ctl->id, ret);
930 return ret;
931}
932
933static int gbaudio_widget_event(struct snd_soc_dapm_widget *w,
934 struct snd_kcontrol *kcontrol, int event)
935{
936 int wid;
937 int ret;
938 struct snd_soc_codec *codec = w->codec;
939 struct gbaudio_codec_info *gbcodec = snd_soc_codec_get_drvdata(codec);
940 struct gbaudio_module_info *module;
941 struct gb_bundle *bundle;
942
943 dev_dbg(codec->dev, "%s %s %d\n", __func__, w->name, event);
944
945 /* Find relevant module */
946 module = find_gb_module(gbcodec, w->name);
947 if (!module)
948 return -EINVAL;
949
950 /* map name to widget id */
951 wid = gbaudio_map_widgetname(module, w->name);
952 if (wid < 0) {
953 dev_err(codec->dev, "Invalid widget name:%s\n", w->name);
954 return -EINVAL;
955 }
956
957 bundle = to_gb_bundle(module->dev);
958
959 ret = gb_pm_runtime_get_sync(bundle);
960 if (ret)
961 return ret;
962
963 switch (event) {
964 case SND_SOC_DAPM_PRE_PMU:
965 ret = gb_audio_gb_enable_widget(module->mgmt_connection, wid);
966 if (!ret)
967 ret = gbaudio_module_update(gbcodec, w, module, 1);
968 break;
969 case SND_SOC_DAPM_POST_PMD:
970 ret = gb_audio_gb_disable_widget(module->mgmt_connection, wid);
971 if (!ret)
972 ret = gbaudio_module_update(gbcodec, w, module, 0);
973 break;
974 }
975 if (ret)
976 dev_err_ratelimited(codec->dev,
977 "%d: widget, event:%d failed:%d\n", wid,
978 event, ret);
979
980 gb_pm_runtime_put_autosuspend(bundle);
981
982 return ret;
983}
984
985static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
986 struct snd_soc_dapm_widget *dw,
987 struct gb_audio_widget *w, int *w_size)
988{
989 int i, ret, csize;
990 struct snd_kcontrol_new *widget_kctls;
991 struct gb_audio_control *curr;
992 struct gbaudio_control *control, *_control;
993 size_t size;
994 char temp_name[NAME_SIZE];
995
996 ret = gbaudio_validate_kcontrol_count(w);
997 if (ret) {
998 dev_err(module->dev, "Inavlid kcontrol count=%d for %s\n",
999 w->ncontrols, w->name);
1000 return ret;
1001 }
1002
1003 /* allocate memory for kcontrol */
1004 if (w->ncontrols) {
1005 size = sizeof(struct snd_kcontrol_new) * w->ncontrols;
1006 widget_kctls = devm_kzalloc(module->dev, size, GFP_KERNEL);
1007 if (!widget_kctls)
1008 return -ENOMEM;
1009 }
1010
1011 *w_size = sizeof(struct gb_audio_widget);
1012
1013 /* create relevant kcontrols */
1014 curr = w->ctl;
1015 for (i = 0; i < w->ncontrols; i++) {
1016 ret = gbaudio_tplg_create_wcontrol(module, &widget_kctls[i],
1017 curr);
1018 if (ret) {
1019 dev_err(module->dev,
1020 "%s:%d type widget_ctl not supported\n",
1021 curr->name, curr->iface);
1022 goto error;
1023 }
1024 control = devm_kzalloc(module->dev,
1025 sizeof(struct gbaudio_control),
1026 GFP_KERNEL);
1027 if (!control) {
1028 ret = -ENOMEM;
1029 goto error;
1030 }
1031 control->id = curr->id;
1032 control->name = curr->name;
1033 control->wname = w->name;
1034
1035 if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
1036 struct gb_audio_enumerated *gbenum =
1037 &curr->info.value.enumerated;
1038
1039 csize = offsetof(struct gb_audio_control, info);
1040 csize += offsetof(struct gb_audio_ctl_elem_info, value);
1041 csize += offsetof(struct gb_audio_enumerated, names);
1042 csize += gbenum->names_length;
1043 control->texts = (const char * const *)
1044 gb_generate_enum_strings(module, gbenum);
1045 control->items = gbenum->items;
1046 } else
1047 csize = sizeof(struct gb_audio_control);
1048 *w_size += csize;
1049 curr = (void *)curr + csize;
1050 list_add(&control->list, &module->widget_ctl_list);
1051 dev_dbg(module->dev, "%s: control of type %d created\n",
1052 widget_kctls[i].name, widget_kctls[i].iface);
1053 }
1054
1055 /* Prefix dev_id to widget control_name */
1056 strlcpy(temp_name, w->name, NAME_SIZE);
1057 snprintf(w->name, NAME_SIZE, "GB %d %s", module->dev_id, temp_name);
1058
1059 switch (w->type) {
1060 case snd_soc_dapm_spk:
1061 *dw = (struct snd_soc_dapm_widget)
1062 SND_SOC_DAPM_SPK(w->name, gbcodec_event_spk);
1063 module->op_devices |= GBAUDIO_DEVICE_OUT_SPEAKER;
1064 break;
1065 case snd_soc_dapm_hp:
1066 *dw = (struct snd_soc_dapm_widget)
1067 SND_SOC_DAPM_HP(w->name, gbcodec_event_hp);
1068 module->op_devices |= (GBAUDIO_DEVICE_OUT_WIRED_HEADSET
1069 | GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE);
1070 module->ip_devices |= GBAUDIO_DEVICE_IN_WIRED_HEADSET;
1071 break;
1072 case snd_soc_dapm_mic:
1073 *dw = (struct snd_soc_dapm_widget)
1074 SND_SOC_DAPM_MIC(w->name, gbcodec_event_int_mic);
1075 module->ip_devices |= GBAUDIO_DEVICE_IN_BUILTIN_MIC;
1076 break;
1077 case snd_soc_dapm_output:
1078 *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_OUTPUT(w->name);
1079 break;
1080 case snd_soc_dapm_input:
1081 *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_INPUT(w->name);
1082 break;
1083 case snd_soc_dapm_switch:
1084 *dw = (struct snd_soc_dapm_widget)
1085 SND_SOC_DAPM_SWITCH_E(w->name, SND_SOC_NOPM, 0, 0,
1086 widget_kctls, gbaudio_widget_event,
1087 SND_SOC_DAPM_PRE_PMU |
1088 SND_SOC_DAPM_POST_PMD);
1089 break;
1090 case snd_soc_dapm_pga:
1091 *dw = (struct snd_soc_dapm_widget)
1092 SND_SOC_DAPM_PGA_E(w->name, SND_SOC_NOPM, 0, 0, NULL, 0,
1093 gbaudio_widget_event,
1094 SND_SOC_DAPM_PRE_PMU |
1095 SND_SOC_DAPM_POST_PMD);
1096 break;
1097 case snd_soc_dapm_mixer:
1098 *dw = (struct snd_soc_dapm_widget)
1099 SND_SOC_DAPM_MIXER_E(w->name, SND_SOC_NOPM, 0, 0, NULL,
1100 0, gbaudio_widget_event,
1101 SND_SOC_DAPM_PRE_PMU |
1102 SND_SOC_DAPM_POST_PMD);
1103 break;
1104 case snd_soc_dapm_mux:
1105 *dw = (struct snd_soc_dapm_widget)
1106 SND_SOC_DAPM_MUX_E(w->name, SND_SOC_NOPM, 0, 0,
1107 widget_kctls, gbaudio_widget_event,
1108 SND_SOC_DAPM_PRE_PMU |
1109 SND_SOC_DAPM_POST_PMD);
1110 break;
1111 case snd_soc_dapm_aif_in:
1112 *dw = (struct snd_soc_dapm_widget)
1113 SND_SOC_DAPM_AIF_IN_E(w->name, w->sname, 0,
1114 SND_SOC_NOPM,
1115 0, 0, gbaudio_widget_event,
1116 SND_SOC_DAPM_PRE_PMU |
1117 SND_SOC_DAPM_POST_PMD);
1118 break;
1119 case snd_soc_dapm_aif_out:
1120 *dw = (struct snd_soc_dapm_widget)
1121 SND_SOC_DAPM_AIF_OUT_E(w->name, w->sname, 0,
1122 SND_SOC_NOPM,
1123 0, 0, gbaudio_widget_event,
1124 SND_SOC_DAPM_PRE_PMU |
1125 SND_SOC_DAPM_POST_PMD);
1126 break;
1127 default:
1128 ret = -EINVAL;
1129 goto error;
1130 }
1131
1132 dev_dbg(module->dev, "%s: widget of type %d created\n", dw->name,
1133 dw->id);
1134 return 0;
1135error:
1136 list_for_each_entry_safe(control, _control, &module->widget_ctl_list,
1137 list) {
1138 list_del(&control->list);
1139 devm_kfree(module->dev, control);
1140 }
1141 return ret;
1142}
1143
1144static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
1145 struct gb_audio_control *controls)
1146{
1147 int i, csize, ret;
1148 struct snd_kcontrol_new *dapm_kctls;
1149 struct gb_audio_control *curr;
1150 struct gbaudio_control *control, *_control;
1151 size_t size;
1152 char temp_name[NAME_SIZE];
1153
1154 size = sizeof(struct snd_kcontrol_new) * module->num_controls;
1155 dapm_kctls = devm_kzalloc(module->dev, size, GFP_KERNEL);
1156 if (!dapm_kctls)
1157 return -ENOMEM;
1158
1159 curr = controls;
1160 for (i = 0; i < module->num_controls; i++) {
1161 ret = gbaudio_tplg_create_kcontrol(module, &dapm_kctls[i],
1162 curr);
1163 if (ret) {
1164 dev_err(module->dev, "%s:%d type not supported\n",
1165 curr->name, curr->iface);
1166 goto error;
1167 }
1168 control = devm_kzalloc(module->dev, sizeof(struct
1169 gbaudio_control),
1170 GFP_KERNEL);
1171 if (!control) {
1172 ret = -ENOMEM;
1173 goto error;
1174 }
1175 control->id = curr->id;
1176 /* Prefix dev_id to widget_name */
1177 strlcpy(temp_name, curr->name, NAME_SIZE);
1178 snprintf(curr->name, NAME_SIZE, "GB %d %s", module->dev_id,
1179 temp_name);
1180 control->name = curr->name;
1181 if (curr->info.type == GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED) {
1182 struct gb_audio_enumerated *gbenum =
1183 &curr->info.value.enumerated;
1184
1185 csize = offsetof(struct gb_audio_control, info);
1186 csize += offsetof(struct gb_audio_ctl_elem_info, value);
1187 csize += offsetof(struct gb_audio_enumerated, names);
1188 csize += gbenum->names_length;
1189 control->texts = (const char * const *)
1190 gb_generate_enum_strings(module, gbenum);
1191 control->items = gbenum->items;
1192 } else
1193 csize = sizeof(struct gb_audio_control);
1194
1195 list_add(&control->list, &module->ctl_list);
1196 dev_dbg(module->dev, "%d:%s created of type %d\n", curr->id,
1197 curr->name, curr->info.type);
1198 curr = (void *)curr + csize;
1199 }
1200 module->controls = dapm_kctls;
1201
1202 return 0;
1203error:
1204 list_for_each_entry_safe(control, _control, &module->ctl_list,
1205 list) {
1206 list_del(&control->list);
1207 devm_kfree(module->dev, control);
1208 }
1209 devm_kfree(module->dev, dapm_kctls);
1210 return ret;
1211}
1212
1213static int gbaudio_tplg_process_widgets(struct gbaudio_module_info *module,
1214 struct gb_audio_widget *widgets)
1215{
1216 int i, ret, w_size;
1217 struct snd_soc_dapm_widget *dapm_widgets;
1218 struct gb_audio_widget *curr;
1219 struct gbaudio_widget *widget, *_widget;
1220 size_t size;
1221
1222 size = sizeof(struct snd_soc_dapm_widget) * module->num_dapm_widgets;
1223 dapm_widgets = devm_kzalloc(module->dev, size, GFP_KERNEL);
1224 if (!dapm_widgets)
1225 return -ENOMEM;
1226
1227 curr = widgets;
1228 for (i = 0; i < module->num_dapm_widgets; i++) {
1229 ret = gbaudio_tplg_create_widget(module, &dapm_widgets[i],
1230 curr, &w_size);
1231 if (ret) {
1232 dev_err(module->dev, "%s:%d type not supported\n",
1233 curr->name, curr->type);
1234 goto error;
1235 }
1236 widget = devm_kzalloc(module->dev, sizeof(struct
1237 gbaudio_widget),
1238 GFP_KERNEL);
1239 if (!widget) {
1240 ret = -ENOMEM;
1241 goto error;
1242 }
1243 widget->id = curr->id;
1244 widget->name = curr->name;
1245 list_add(&widget->list, &module->widget_list);
1246 curr = (void *)curr + w_size;
1247 }
1248 module->dapm_widgets = dapm_widgets;
1249
1250 return 0;
1251
1252error:
1253 list_for_each_entry_safe(widget, _widget, &module->widget_list,
1254 list) {
1255 list_del(&widget->list);
1256 devm_kfree(module->dev, widget);
1257 }
1258 devm_kfree(module->dev, dapm_widgets);
1259 return ret;
1260}
1261
1262static int gbaudio_tplg_process_routes(struct gbaudio_module_info *module,
1263 struct gb_audio_route *routes)
1264{
1265 int i, ret;
1266 struct snd_soc_dapm_route *dapm_routes;
1267 struct gb_audio_route *curr;
1268 size_t size;
1269
1270 size = sizeof(struct snd_soc_dapm_route) * module->num_dapm_routes;
1271 dapm_routes = devm_kzalloc(module->dev, size, GFP_KERNEL);
1272 if (!dapm_routes)
1273 return -ENOMEM;
1274
1275 module->dapm_routes = dapm_routes;
1276 curr = routes;
1277
1278 for (i = 0; i < module->num_dapm_routes; i++) {
1279 dapm_routes->sink =
1280 gbaudio_map_widgetid(module, curr->destination_id);
1281 if (!dapm_routes->sink) {
1282 dev_err(module->dev, "%d:%d:%d:%d - Invalid sink\n",
1283 curr->source_id, curr->destination_id,
1284 curr->control_id, curr->index);
1285 ret = -EINVAL;
1286 goto error;
1287 }
1288 dapm_routes->source =
1289 gbaudio_map_widgetid(module, curr->source_id);
1290 if (!dapm_routes->source) {
1291 dev_err(module->dev, "%d:%d:%d:%d - Invalid source\n",
1292 curr->source_id, curr->destination_id,
1293 curr->control_id, curr->index);
1294 ret = -EINVAL;
1295 goto error;
1296 }
1297 dapm_routes->control =
1298 gbaudio_map_controlid(module,
1299 curr->control_id,
1300 curr->index);
1301 if ((curr->control_id != GBAUDIO_INVALID_ID) &&
1302 !dapm_routes->control) {
1303 dev_err(module->dev, "%d:%d:%d:%d - Invalid control\n",
1304 curr->source_id, curr->destination_id,
1305 curr->control_id, curr->index);
1306 ret = -EINVAL;
1307 goto error;
1308 }
1309 dev_dbg(module->dev, "Route {%s, %s, %s}\n", dapm_routes->sink,
1310 (dapm_routes->control) ? dapm_routes->control:"NULL",
1311 dapm_routes->source);
1312 dapm_routes++;
1313 curr++;
1314 }
1315
1316 return 0;
1317
1318error:
1319 devm_kfree(module->dev, module->dapm_routes);
1320 return ret;
1321}
1322
1323static int gbaudio_tplg_process_header(struct gbaudio_module_info *module,
1324 struct gb_audio_topology *tplg_data)
1325{
1326 /* fetch no. of kcontrols, widgets & routes */
1327 module->num_controls = tplg_data->num_controls;
1328 module->num_dapm_widgets = tplg_data->num_widgets;
1329 module->num_dapm_routes = tplg_data->num_routes;
1330
1331 /* update block offset */
1332 module->dai_offset = (unsigned long)&tplg_data->data;
1333 module->control_offset = module->dai_offset + tplg_data->size_dais;
1334 module->widget_offset = module->control_offset +
1335 tplg_data->size_controls;
1336 module->route_offset = module->widget_offset +
1337 tplg_data->size_widgets;
1338
1339 dev_dbg(module->dev, "DAI offset is 0x%lx\n", module->dai_offset);
1340 dev_dbg(module->dev, "control offset is %lx\n",
1341 module->control_offset);
1342 dev_dbg(module->dev, "widget offset is %lx\n", module->widget_offset);
1343 dev_dbg(module->dev, "route offset is %lx\n", module->route_offset);
1344
1345 return 0;
1346}
1347
1348int gbaudio_tplg_parse_data(struct gbaudio_module_info *module,
1349 struct gb_audio_topology *tplg_data)
1350{
1351 int ret;
1352 struct gb_audio_control *controls;
1353 struct gb_audio_widget *widgets;
1354 struct gb_audio_route *routes;
1355
1356 if (!tplg_data)
1357 return -EINVAL;
1358
1359 ret = gbaudio_tplg_process_header(module, tplg_data);
1360 if (ret) {
1361 dev_err(module->dev, "%d: Error in parsing topology header\n",
1362 ret);
1363 return ret;
1364 }
1365
1366 /* process control */
1367 controls = (struct gb_audio_control *)module->control_offset;
1368 ret = gbaudio_tplg_process_kcontrols(module, controls);
1369 if (ret) {
1370 dev_err(module->dev,
1371 "%d: Error in parsing controls data\n", ret);
1372 return ret;
1373 }
1374 dev_dbg(module->dev, "Control parsing finished\n");
1375
1376 /* process widgets */
1377 widgets = (struct gb_audio_widget *)module->widget_offset;
1378 ret = gbaudio_tplg_process_widgets(module, widgets);
1379 if (ret) {
1380 dev_err(module->dev,
1381 "%d: Error in parsing widgets data\n", ret);
1382 return ret;
1383 }
1384 dev_dbg(module->dev, "Widget parsing finished\n");
1385
1386 /* process route */
1387 routes = (struct gb_audio_route *)module->route_offset;
1388 ret = gbaudio_tplg_process_routes(module, routes);
1389 if (ret) {
1390 dev_err(module->dev,
1391 "%d: Error in parsing routes data\n", ret);
1392 return ret;
1393 }
1394 dev_dbg(module->dev, "Route parsing finished\n");
1395
1396 /* parse jack capabilities */
1397 if (tplg_data->jack_type) {
1398 module->jack_mask = tplg_data->jack_type & GBCODEC_JACK_MASK;
1399 module->button_mask = tplg_data->jack_type &
1400 GBCODEC_JACK_BUTTON_MASK;
1401 }
1402
1403 return ret;
1404}
1405
1406void gbaudio_tplg_release(struct gbaudio_module_info *module)
1407{
1408 struct gbaudio_control *control, *_control;
1409 struct gbaudio_widget *widget, *_widget;
1410
1411 if (!module->topology)
1412 return;
1413
1414 /* release kcontrols */
1415 list_for_each_entry_safe(control, _control, &module->ctl_list,
1416 list) {
1417 list_del(&control->list);
1418 devm_kfree(module->dev, control);
1419 }
1420 if (module->controls)
1421 devm_kfree(module->dev, module->controls);
1422
1423 /* release widget controls */
1424 list_for_each_entry_safe(control, _control, &module->widget_ctl_list,
1425 list) {
1426 list_del(&control->list);
1427 devm_kfree(module->dev, control);
1428 }
1429
1430 /* release widgets */
1431 list_for_each_entry_safe(widget, _widget, &module->widget_list,
1432 list) {
1433 list_del(&widget->list);
1434 devm_kfree(module->dev, widget);
1435 }
1436 if (module->dapm_widgets)
1437 devm_kfree(module->dev, module->dapm_widgets);
1438
1439 /* release routes */
1440 if (module->dapm_routes)
1441 devm_kfree(module->dev, module->dapm_routes);
1442}
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
new file mode 100644
index 000000000000..168626ba0c03
--- /dev/null
+++ b/drivers/staging/greybus/authentication.c
@@ -0,0 +1,429 @@
1/*
2 * Greybus Component Authentication Protocol (CAP) Driver.
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include "greybus.h"
11
12#include <linux/cdev.h>
13#include <linux/fs.h>
14#include <linux/ioctl.h>
15#include <linux/uaccess.h>
16
17#include "greybus_authentication.h"
18#include "firmware.h"
19#include "greybus.h"
20
21#define CAP_TIMEOUT_MS 1000
22
23/*
24 * Number of minor devices this driver supports.
25 * There will be exactly one required per Interface.
26 */
27#define NUM_MINORS U8_MAX
28
29struct gb_cap {
30 struct device *parent;
31 struct gb_connection *connection;
32 struct kref kref;
33 struct list_head node;
34 bool disabled; /* connection getting disabled */
35
36 struct mutex mutex;
37 struct cdev cdev;
38 struct device *class_device;
39 dev_t dev_num;
40};
41
42static struct class *cap_class;
43static dev_t cap_dev_num;
44static DEFINE_IDA(cap_minors_map);
45static LIST_HEAD(cap_list);
46static DEFINE_MUTEX(list_mutex);
47
48static void cap_kref_release(struct kref *kref)
49{
50 struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
51
52 kfree(cap);
53}
54
55/*
56 * All users of cap take a reference (from within list_mutex lock), before
57 * they get a pointer to play with. And the structure will be freed only after
58 * the last user has put the reference to it.
59 */
60static void put_cap(struct gb_cap *cap)
61{
62 kref_put(&cap->kref, cap_kref_release);
63}
64
65/* Caller must call put_cap() after using struct gb_cap */
66static struct gb_cap *get_cap(struct cdev *cdev)
67{
68 struct gb_cap *cap;
69
70 mutex_lock(&list_mutex);
71
72 list_for_each_entry(cap, &cap_list, node) {
73 if (&cap->cdev == cdev) {
74 kref_get(&cap->kref);
75 goto unlock;
76 }
77 }
78
79 cap = NULL;
80
81unlock:
82 mutex_unlock(&list_mutex);
83
84 return cap;
85}
86
87static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
88{
89 struct gb_connection *connection = cap->connection;
90 struct gb_cap_get_endpoint_uid_response response;
91 int ret;
92
93 ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
94 0, &response, sizeof(response));
95 if (ret) {
96 dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
97 return ret;
98 }
99
100 memcpy(euid, response.uid, sizeof(response.uid));
101
102 return 0;
103}
104
105static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
106 u8 *certificate, u32 *size, u8 *result)
107{
108 struct gb_connection *connection = cap->connection;
109 struct gb_cap_get_ims_certificate_request *request;
110 struct gb_cap_get_ims_certificate_response *response;
111 size_t max_size = gb_operation_get_payload_size_max(connection);
112 struct gb_operation *op;
113 int ret;
114
115 op = gb_operation_create_flags(connection,
116 GB_CAP_TYPE_GET_IMS_CERTIFICATE,
117 sizeof(*request), max_size,
118 GB_OPERATION_FLAG_SHORT_RESPONSE,
119 GFP_KERNEL);
120 if (!op)
121 return -ENOMEM;
122
123 request = op->request->payload;
124 request->certificate_class = cpu_to_le32(class);
125 request->certificate_id = cpu_to_le32(id);
126
127 ret = gb_operation_request_send_sync(op);
128 if (ret) {
129 dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
130 goto done;
131 }
132
133 response = op->response->payload;
134 *result = response->result_code;
135 *size = op->response->payload_size - sizeof(*response);
136 memcpy(certificate, response->certificate, *size);
137
138done:
139 gb_operation_put(op);
140 return ret;
141}
142
143static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
144 u8 *challenge, u8 *result, u8 *auth_response,
145 u32 *signature_size, u8 *signature)
146{
147 struct gb_connection *connection = cap->connection;
148 struct gb_cap_authenticate_request *request;
149 struct gb_cap_authenticate_response *response;
150 size_t max_size = gb_operation_get_payload_size_max(connection);
151 struct gb_operation *op;
152 int ret;
153
154 op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
155 sizeof(*request), max_size,
156 GB_OPERATION_FLAG_SHORT_RESPONSE,
157 GFP_KERNEL);
158 if (!op)
159 return -ENOMEM;
160
161 request = op->request->payload;
162 request->auth_type = cpu_to_le32(auth_type);
163 memcpy(request->uid, uid, sizeof(request->uid));
164 memcpy(request->challenge, challenge, sizeof(request->challenge));
165
166 ret = gb_operation_request_send_sync(op);
167 if (ret) {
168 dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
169 goto done;
170 }
171
172 response = op->response->payload;
173 *result = response->result_code;
174 *signature_size = op->response->payload_size - sizeof(*response);
175 memcpy(auth_response, response->response, sizeof(response->response));
176 memcpy(signature, response->signature, *signature_size);
177
178done:
179 gb_operation_put(op);
180 return ret;
181}
182
183/* Char device fops */
184
185static int cap_open(struct inode *inode, struct file *file)
186{
187 struct gb_cap *cap = get_cap(inode->i_cdev);
188
189 /* cap structure can't get freed until file descriptor is closed */
190 if (cap) {
191 file->private_data = cap;
192 return 0;
193 }
194
195 return -ENODEV;
196}
197
198static int cap_release(struct inode *inode, struct file *file)
199{
200 struct gb_cap *cap = file->private_data;
201
202 put_cap(cap);
203 return 0;
204}
205
206static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
207 void __user *buf)
208{
209 struct cap_ioc_get_endpoint_uid endpoint_uid;
210 struct cap_ioc_get_ims_certificate *ims_cert;
211 struct cap_ioc_authenticate *authenticate;
212 size_t size;
213 int ret;
214
215 switch (cmd) {
216 case CAP_IOC_GET_ENDPOINT_UID:
217 ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
218 if (ret)
219 return ret;
220
221 if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
222 return -EFAULT;
223
224 return 0;
225 case CAP_IOC_GET_IMS_CERTIFICATE:
226 size = sizeof(*ims_cert);
227 ims_cert = memdup_user(buf, size);
228 if (IS_ERR(ims_cert))
229 return PTR_ERR(ims_cert);
230
231 ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
232 ims_cert->certificate_id,
233 ims_cert->certificate,
234 &ims_cert->cert_size,
235 &ims_cert->result_code);
236 if (!ret && copy_to_user(buf, ims_cert, size))
237 ret = -EFAULT;
238 kfree(ims_cert);
239
240 return ret;
241 case CAP_IOC_AUTHENTICATE:
242 size = sizeof(*authenticate);
243 authenticate = memdup_user(buf, size);
244 if (IS_ERR(authenticate))
245 return PTR_ERR(authenticate);
246
247 ret = cap_authenticate(cap, authenticate->auth_type,
248 authenticate->uid,
249 authenticate->challenge,
250 &authenticate->result_code,
251 authenticate->response,
252 &authenticate->signature_size,
253 authenticate->signature);
254 if (!ret && copy_to_user(buf, authenticate, size))
255 ret = -EFAULT;
256 kfree(authenticate);
257
258 return ret;
259 default:
260 return -ENOTTY;
261 }
262}
263
264static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
265 unsigned long arg)
266{
267 struct gb_cap *cap = file->private_data;
268 struct gb_bundle *bundle = cap->connection->bundle;
269 int ret = -ENODEV;
270
271 /*
272 * Serialize ioctls.
273 *
274 * We don't want the user to do multiple authentication operations in
275 * parallel.
276 *
277 * This is also used to protect ->disabled, which is used to check if
278 * the connection is getting disconnected, so that we don't start any
279 * new operations.
280 */
281 mutex_lock(&cap->mutex);
282 if (!cap->disabled) {
283 ret = gb_pm_runtime_get_sync(bundle);
284 if (!ret) {
285 ret = cap_ioctl(cap, cmd, (void __user *)arg);
286 gb_pm_runtime_put_autosuspend(bundle);
287 }
288 }
289 mutex_unlock(&cap->mutex);
290
291 return ret;
292}
293
294static const struct file_operations cap_fops = {
295 .owner = THIS_MODULE,
296 .open = cap_open,
297 .release = cap_release,
298 .unlocked_ioctl = cap_ioctl_unlocked,
299};
300
301int gb_cap_connection_init(struct gb_connection *connection)
302{
303 struct gb_cap *cap;
304 int ret, minor;
305
306 if (!connection)
307 return 0;
308
309 cap = kzalloc(sizeof(*cap), GFP_KERNEL);
310 if (!cap)
311 return -ENOMEM;
312
313 cap->parent = &connection->bundle->dev;
314 cap->connection = connection;
315 mutex_init(&cap->mutex);
316 gb_connection_set_data(connection, cap);
317 kref_init(&cap->kref);
318
319 mutex_lock(&list_mutex);
320 list_add(&cap->node, &cap_list);
321 mutex_unlock(&list_mutex);
322
323 ret = gb_connection_enable(connection);
324 if (ret)
325 goto err_list_del;
326
327 minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
328 if (minor < 0) {
329 ret = minor;
330 goto err_connection_disable;
331 }
332
333 /* Add a char device to allow userspace to interact with cap */
334 cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
335 cdev_init(&cap->cdev, &cap_fops);
336
337 ret = cdev_add(&cap->cdev, cap->dev_num, 1);
338 if (ret)
339 goto err_remove_ida;
340
341 /* Add a soft link to the previously added char-dev within the bundle */
342 cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
343 NULL, "gb-authenticate-%d", minor);
344 if (IS_ERR(cap->class_device)) {
345 ret = PTR_ERR(cap->class_device);
346 goto err_del_cdev;
347 }
348
349 return 0;
350
351err_del_cdev:
352 cdev_del(&cap->cdev);
353err_remove_ida:
354 ida_simple_remove(&cap_minors_map, minor);
355err_connection_disable:
356 gb_connection_disable(connection);
357err_list_del:
358 mutex_lock(&list_mutex);
359 list_del(&cap->node);
360 mutex_unlock(&list_mutex);
361
362 put_cap(cap);
363
364 return ret;
365}
366
367void gb_cap_connection_exit(struct gb_connection *connection)
368{
369 struct gb_cap *cap;
370
371 if (!connection)
372 return;
373
374 cap = gb_connection_get_data(connection);
375
376 device_destroy(cap_class, cap->dev_num);
377 cdev_del(&cap->cdev);
378 ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
379
380 /*
381 * Disallow any new ioctl operations on the char device and wait for
382 * existing ones to finish.
383 */
384 mutex_lock(&cap->mutex);
385 cap->disabled = true;
386 mutex_unlock(&cap->mutex);
387
388 /* All pending greybus operations should have finished by now */
389 gb_connection_disable(cap->connection);
390
391 /* Disallow new users to get access to the cap structure */
392 mutex_lock(&list_mutex);
393 list_del(&cap->node);
394 mutex_unlock(&list_mutex);
395
396 /*
397 * All current users of cap would have taken a reference to it by
398 * now, we can drop our reference and wait the last user will get
399 * cap freed.
400 */
401 put_cap(cap);
402}
403
404int cap_init(void)
405{
406 int ret;
407
408 cap_class = class_create(THIS_MODULE, "gb_authenticate");
409 if (IS_ERR(cap_class))
410 return PTR_ERR(cap_class);
411
412 ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
413 "gb_authenticate");
414 if (ret)
415 goto err_remove_class;
416
417 return 0;
418
419err_remove_class:
420 class_destroy(cap_class);
421 return ret;
422}
423
424void cap_exit(void)
425{
426 unregister_chrdev_region(cap_dev_num, NUM_MINORS);
427 class_destroy(cap_class);
428 ida_destroy(&cap_minors_map);
429}
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
new file mode 100644
index 000000000000..5f90721bcc51
--- /dev/null
+++ b/drivers/staging/greybus/bootrom.c
@@ -0,0 +1,524 @@
1/*
2 * BOOTROM Greybus driver.
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/firmware.h>
11#include <linux/jiffies.h>
12#include <linux/mutex.h>
13#include <linux/workqueue.h>
14
15#include "greybus.h"
16#include "firmware.h"
17
18/* Timeout, in jiffies, within which the next request must be received */
19#define NEXT_REQ_TIMEOUT_MS 1000
20
21/*
22 * FIXME: Reduce this timeout once svc core handles parallel processing of
23 * events from the SVC, which are handled sequentially today.
24 */
25#define MODE_SWITCH_TIMEOUT_MS 10000
26
27enum next_request_type {
28 NEXT_REQ_FIRMWARE_SIZE,
29 NEXT_REQ_GET_FIRMWARE,
30 NEXT_REQ_READY_TO_BOOT,
31 NEXT_REQ_MODE_SWITCH,
32};
33
34struct gb_bootrom {
35 struct gb_connection *connection;
36 const struct firmware *fw;
37 u8 protocol_major;
38 u8 protocol_minor;
39 enum next_request_type next_request;
40 struct delayed_work dwork;
41 struct mutex mutex; /* Protects bootrom->fw */
42};
43
44static void free_firmware(struct gb_bootrom *bootrom)
45{
46 if (!bootrom->fw)
47 return;
48
49 release_firmware(bootrom->fw);
50 bootrom->fw = NULL;
51}
52
53static void gb_bootrom_timedout(struct work_struct *work)
54{
55 struct delayed_work *dwork = to_delayed_work(work);
56 struct gb_bootrom *bootrom = container_of(dwork, struct gb_bootrom, dwork);
57 struct device *dev = &bootrom->connection->bundle->dev;
58 const char *reason;
59
60 switch (bootrom->next_request) {
61 case NEXT_REQ_FIRMWARE_SIZE:
62 reason = "Firmware Size Request";
63 break;
64 case NEXT_REQ_GET_FIRMWARE:
65 reason = "Get Firmware Request";
66 break;
67 case NEXT_REQ_READY_TO_BOOT:
68 reason = "Ready to Boot Request";
69 break;
70 case NEXT_REQ_MODE_SWITCH:
71 reason = "Interface Mode Switch";
72 break;
73 default:
74 reason = NULL;
75 dev_err(dev, "Invalid next-request: %u", bootrom->next_request);
76 break;
77 }
78
79 dev_err(dev, "Timed out waiting for %s from the Module\n", reason);
80
81 mutex_lock(&bootrom->mutex);
82 free_firmware(bootrom);
83 mutex_unlock(&bootrom->mutex);
84
85 /* TODO: Power-off Module ? */
86}
87
88static void gb_bootrom_set_timeout(struct gb_bootrom *bootrom,
89 enum next_request_type next, unsigned long timeout)
90{
91 bootrom->next_request = next;
92 schedule_delayed_work(&bootrom->dwork, msecs_to_jiffies(timeout));
93}
94
95static void gb_bootrom_cancel_timeout(struct gb_bootrom *bootrom)
96{
97 cancel_delayed_work_sync(&bootrom->dwork);
98}
99
100/*
101 * The es2 chip doesn't have VID/PID programmed into the hardware and we need to
102 * hack that up to distinguish different modules and their firmware blobs.
103 *
104 * This fetches VID/PID (over bootrom protocol) for es2 chip only, when VID/PID
105 * already sent during hotplug are 0.
106 *
107 * Otherwise, we keep intf->vendor_id/product_id same as what's passed
108 * during hotplug.
109 */
110static void bootrom_es2_fixup_vid_pid(struct gb_bootrom *bootrom)
111{
112 struct gb_bootrom_get_vid_pid_response response;
113 struct gb_connection *connection = bootrom->connection;
114 struct gb_interface *intf = connection->bundle->intf;
115 int ret;
116
117 if (!(intf->quirks & GB_INTERFACE_QUIRK_NO_GMP_IDS))
118 return;
119
120 ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_GET_VID_PID,
121 NULL, 0, &response, sizeof(response));
122 if (ret) {
123 dev_err(&connection->bundle->dev,
124 "Bootrom get vid/pid operation failed (%d)\n", ret);
125 return;
126 }
127
128 /*
129 * NOTE: This is hacked, so that the same values of VID/PID can be used
130 * by next firmware level as well. The uevent for bootrom will still
131 * have VID/PID as 0, though after this point the sysfs files will start
132 * showing the updated values. But yeah, that's a bit racy as the same
133 * sysfs files would be showing 0 before this point.
134 */
135 intf->vendor_id = le32_to_cpu(response.vendor_id);
136 intf->product_id = le32_to_cpu(response.product_id);
137
138 dev_dbg(&connection->bundle->dev, "Bootrom got vid (0x%x)/pid (0x%x)\n",
139 intf->vendor_id, intf->product_id);
140}
141
142/* This returns path of the firmware blob on the disk */
143static int find_firmware(struct gb_bootrom *bootrom, u8 stage)
144{
145 struct gb_connection *connection = bootrom->connection;
146 struct gb_interface *intf = connection->bundle->intf;
147 char firmware_name[49];
148 int rc;
149
150 /* Already have a firmware, free it */
151 free_firmware(bootrom);
152
153 /* Bootrom protocol is only supported for loading Stage 2 firmware */
154 if (stage != 2) {
155 dev_err(&connection->bundle->dev, "Invalid boot stage: %u\n",
156 stage);
157 return -EINVAL;
158 }
159
160 /*
161 * Create firmware name
162 *
163 * XXX Name it properly..
164 */
165 snprintf(firmware_name, sizeof(firmware_name),
166 FW_NAME_PREFIX "%08x_%08x_%08x_%08x_s2l.tftf",
167 intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
168 intf->vendor_id, intf->product_id);
169
170 // FIXME:
171 // Turn to dev_dbg later after everyone has valid bootloaders with good
172 // ids, but leave this as dev_info for now to make it easier to track
173 // down "empty" vid/pid modules.
174 dev_info(&connection->bundle->dev, "Firmware file '%s' requested\n",
175 firmware_name);
176
177 rc = request_firmware(&bootrom->fw, firmware_name,
178 &connection->bundle->dev);
179 if (rc) {
180 dev_err(&connection->bundle->dev,
181 "failed to find %s firmware (%d)\n", firmware_name, rc);
182 }
183
184 return rc;
185}
186
187static int gb_bootrom_firmware_size_request(struct gb_operation *op)
188{
189 struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
190 struct gb_bootrom_firmware_size_request *size_request = op->request->payload;
191 struct gb_bootrom_firmware_size_response *size_response;
192 struct device *dev = &op->connection->bundle->dev;
193 int ret;
194
195 /* Disable timeouts */
196 gb_bootrom_cancel_timeout(bootrom);
197
198 if (op->request->payload_size != sizeof(*size_request)) {
199 dev_err(dev, "%s: illegal size of firmware size request (%zu != %zu)\n",
200 __func__, op->request->payload_size,
201 sizeof(*size_request));
202 ret = -EINVAL;
203 goto queue_work;
204 }
205
206 mutex_lock(&bootrom->mutex);
207
208 ret = find_firmware(bootrom, size_request->stage);
209 if (ret)
210 goto unlock;
211
212 if (!gb_operation_response_alloc(op, sizeof(*size_response),
213 GFP_KERNEL)) {
214 dev_err(dev, "%s: error allocating response\n", __func__);
215 free_firmware(bootrom);
216 ret = -ENOMEM;
217 goto unlock;
218 }
219
220 size_response = op->response->payload;
221 size_response->size = cpu_to_le32(bootrom->fw->size);
222
223 dev_dbg(dev, "%s: firmware size %d bytes\n", __func__, size_response->size);
224
225unlock:
226 mutex_unlock(&bootrom->mutex);
227
228queue_work:
229 if (!ret) {
230 /* Refresh timeout */
231 gb_bootrom_set_timeout(bootrom, NEXT_REQ_GET_FIRMWARE,
232 NEXT_REQ_TIMEOUT_MS);
233 }
234
235 return ret;
236}
237
238static int gb_bootrom_get_firmware(struct gb_operation *op)
239{
240 struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
241 const struct firmware *fw;
242 struct gb_bootrom_get_firmware_request *firmware_request;
243 struct gb_bootrom_get_firmware_response *firmware_response;
244 struct device *dev = &op->connection->bundle->dev;
245 unsigned int offset, size;
246 enum next_request_type next_request;
247 int ret = 0;
248
249 /* Disable timeouts */
250 gb_bootrom_cancel_timeout(bootrom);
251
252 if (op->request->payload_size != sizeof(*firmware_request)) {
253 dev_err(dev, "%s: Illegal size of get firmware request (%zu %zu)\n",
254 __func__, op->request->payload_size,
255 sizeof(*firmware_request));
256 ret = -EINVAL;
257 goto queue_work;
258 }
259
260 mutex_lock(&bootrom->mutex);
261
262 fw = bootrom->fw;
263 if (!fw) {
264 dev_err(dev, "%s: firmware not available\n", __func__);
265 ret = -EINVAL;
266 goto unlock;
267 }
268
269 firmware_request = op->request->payload;
270 offset = le32_to_cpu(firmware_request->offset);
271 size = le32_to_cpu(firmware_request->size);
272
273 if (offset >= fw->size || size > fw->size - offset) {
274 dev_warn(dev, "bad firmware request (offs = %u, size = %u)\n",
275 offset, size);
276 ret = -EINVAL;
277 goto unlock;
278 }
279
280 if (!gb_operation_response_alloc(op, sizeof(*firmware_response) + size,
281 GFP_KERNEL)) {
282 dev_err(dev, "%s: error allocating response\n", __func__);
283 ret = -ENOMEM;
284 goto unlock;
285 }
286
287 firmware_response = op->response->payload;
288 memcpy(firmware_response->data, fw->data + offset, size);
289
290 dev_dbg(dev, "responding with firmware (offs = %u, size = %u)\n", offset,
291 size);
292
293unlock:
294 mutex_unlock(&bootrom->mutex);
295
296queue_work:
297 /* Refresh timeout */
298 if (!ret && (offset + size == fw->size))
299 next_request = NEXT_REQ_READY_TO_BOOT;
300 else
301 next_request = NEXT_REQ_GET_FIRMWARE;
302
303 gb_bootrom_set_timeout(bootrom, next_request, NEXT_REQ_TIMEOUT_MS);
304
305 return ret;
306}
307
308static int gb_bootrom_ready_to_boot(struct gb_operation *op)
309{
310 struct gb_connection *connection = op->connection;
311 struct gb_bootrom *bootrom = gb_connection_get_data(connection);
312 struct gb_bootrom_ready_to_boot_request *rtb_request;
313 struct device *dev = &connection->bundle->dev;
314 u8 status;
315 int ret = 0;
316
317 /* Disable timeouts */
318 gb_bootrom_cancel_timeout(bootrom);
319
320 if (op->request->payload_size != sizeof(*rtb_request)) {
321 dev_err(dev, "%s: Illegal size of ready to boot request (%zu %zu)\n",
322 __func__, op->request->payload_size,
323 sizeof(*rtb_request));
324 ret = -EINVAL;
325 goto queue_work;
326 }
327
328 rtb_request = op->request->payload;
329 status = rtb_request->status;
330
331 /* Return error if the blob was invalid */
332 if (status == GB_BOOTROM_BOOT_STATUS_INVALID) {
333 ret = -EINVAL;
334 goto queue_work;
335 }
336
337 /*
338 * XXX Should we return error for insecure firmware?
339 */
340 dev_dbg(dev, "ready to boot: 0x%x, 0\n", status);
341
342queue_work:
343 /*
344 * Refresh timeout, the Interface shall load the new personality and
345 * send a new hotplug request, which shall get rid of the bootrom
346 * connection. As that can take some time, increase the timeout a bit.
347 */
348 gb_bootrom_set_timeout(bootrom, NEXT_REQ_MODE_SWITCH,
349 MODE_SWITCH_TIMEOUT_MS);
350
351 return ret;
352}
353
354static int gb_bootrom_request_handler(struct gb_operation *op)
355{
356 u8 type = op->type;
357
358 switch (type) {
359 case GB_BOOTROM_TYPE_FIRMWARE_SIZE:
360 return gb_bootrom_firmware_size_request(op);
361 case GB_BOOTROM_TYPE_GET_FIRMWARE:
362 return gb_bootrom_get_firmware(op);
363 case GB_BOOTROM_TYPE_READY_TO_BOOT:
364 return gb_bootrom_ready_to_boot(op);
365 default:
366 dev_err(&op->connection->bundle->dev,
367 "unsupported request: %u\n", type);
368 return -EINVAL;
369 }
370}
371
372static int gb_bootrom_get_version(struct gb_bootrom *bootrom)
373{
374 struct gb_bundle *bundle = bootrom->connection->bundle;
375 struct gb_bootrom_version_request request;
376 struct gb_bootrom_version_response response;
377 int ret;
378
379 request.major = GB_BOOTROM_VERSION_MAJOR;
380 request.minor = GB_BOOTROM_VERSION_MINOR;
381
382 ret = gb_operation_sync(bootrom->connection,
383 GB_BOOTROM_TYPE_VERSION,
384 &request, sizeof(request), &response,
385 sizeof(response));
386 if (ret) {
387 dev_err(&bundle->dev,
388 "failed to get protocol version: %d\n",
389 ret);
390 return ret;
391 }
392
393 if (response.major > request.major) {
394 dev_err(&bundle->dev,
395 "unsupported major protocol version (%u > %u)\n",
396 response.major, request.major);
397 return -ENOTSUPP;
398 }
399
400 bootrom->protocol_major = response.major;
401 bootrom->protocol_minor = response.minor;
402
403 dev_dbg(&bundle->dev, "%s - %u.%u\n", __func__, response.major,
404 response.minor);
405
406 return 0;
407}
408
409static int gb_bootrom_probe(struct gb_bundle *bundle,
410 const struct greybus_bundle_id *id)
411{
412 struct greybus_descriptor_cport *cport_desc;
413 struct gb_connection *connection;
414 struct gb_bootrom *bootrom;
415 int ret;
416
417 if (bundle->num_cports != 1)
418 return -ENODEV;
419
420 cport_desc = &bundle->cport_desc[0];
421 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_BOOTROM)
422 return -ENODEV;
423
424 bootrom = kzalloc(sizeof(*bootrom), GFP_KERNEL);
425 if (!bootrom)
426 return -ENOMEM;
427
428 connection = gb_connection_create(bundle,
429 le16_to_cpu(cport_desc->id),
430 gb_bootrom_request_handler);
431 if (IS_ERR(connection)) {
432 ret = PTR_ERR(connection);
433 goto err_free_bootrom;
434 }
435
436 gb_connection_set_data(connection, bootrom);
437
438 bootrom->connection = connection;
439
440 mutex_init(&bootrom->mutex);
441 INIT_DELAYED_WORK(&bootrom->dwork, gb_bootrom_timedout);
442 greybus_set_drvdata(bundle, bootrom);
443
444 ret = gb_connection_enable_tx(connection);
445 if (ret)
446 goto err_connection_destroy;
447
448 ret = gb_bootrom_get_version(bootrom);
449 if (ret)
450 goto err_connection_disable;
451
452 bootrom_es2_fixup_vid_pid(bootrom);
453
454 ret = gb_connection_enable(connection);
455 if (ret)
456 goto err_connection_disable;
457
458 /* Refresh timeout */
459 gb_bootrom_set_timeout(bootrom, NEXT_REQ_FIRMWARE_SIZE,
460 NEXT_REQ_TIMEOUT_MS);
461
462 /* Tell bootrom we're ready. */
463 ret = gb_operation_sync(connection, GB_BOOTROM_TYPE_AP_READY, NULL, 0,
464 NULL, 0);
465 if (ret) {
466 dev_err(&connection->bundle->dev,
467 "failed to send AP READY: %d\n", ret);
468 goto err_cancel_timeout;
469 }
470
471 dev_dbg(&bundle->dev, "AP_READY sent\n");
472
473 return 0;
474
475err_cancel_timeout:
476 gb_bootrom_cancel_timeout(bootrom);
477err_connection_disable:
478 gb_connection_disable(connection);
479err_connection_destroy:
480 gb_connection_destroy(connection);
481err_free_bootrom:
482 kfree(bootrom);
483
484 return ret;
485}
486
487static void gb_bootrom_disconnect(struct gb_bundle *bundle)
488{
489 struct gb_bootrom *bootrom = greybus_get_drvdata(bundle);
490
491 dev_dbg(&bundle->dev, "%s\n", __func__);
492
493 gb_connection_disable(bootrom->connection);
494
495 /* Disable timeouts */
496 gb_bootrom_cancel_timeout(bootrom);
497
498 /*
499 * Release firmware:
500 *
501 * As the connection and the delayed work are already disabled, we don't
502 * need to lock access to bootrom->fw here.
503 */
504 free_firmware(bootrom);
505
506 gb_connection_destroy(bootrom->connection);
507 kfree(bootrom);
508}
509
510static const struct greybus_bundle_id gb_bootrom_id_table[] = {
511 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BOOTROM) },
512 { }
513};
514
515static struct greybus_driver gb_bootrom_driver = {
516 .name = "bootrom",
517 .probe = gb_bootrom_probe,
518 .disconnect = gb_bootrom_disconnect,
519 .id_table = gb_bootrom_id_table,
520};
521
522module_greybus_driver(gb_bootrom_driver);
523
524MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/bundle.c b/drivers/staging/greybus/bundle.c
new file mode 100644
index 000000000000..d2ef57d090be
--- /dev/null
+++ b/drivers/staging/greybus/bundle.c
@@ -0,0 +1,253 @@
1/*
2 * Greybus bundles
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include "greybus.h"
11#include "greybus_trace.h"
12
13static ssize_t bundle_class_show(struct device *dev,
14 struct device_attribute *attr, char *buf)
15{
16 struct gb_bundle *bundle = to_gb_bundle(dev);
17
18 return sprintf(buf, "0x%02x\n", bundle->class);
19}
20static DEVICE_ATTR_RO(bundle_class);
21
22static ssize_t bundle_id_show(struct device *dev,
23 struct device_attribute *attr, char *buf)
24{
25 struct gb_bundle *bundle = to_gb_bundle(dev);
26
27 return sprintf(buf, "%u\n", bundle->id);
28}
29static DEVICE_ATTR_RO(bundle_id);
30
31static ssize_t state_show(struct device *dev, struct device_attribute *attr,
32 char *buf)
33{
34 struct gb_bundle *bundle = to_gb_bundle(dev);
35
36 if (bundle->state == NULL)
37 return sprintf(buf, "\n");
38
39 return sprintf(buf, "%s\n", bundle->state);
40}
41
42static ssize_t state_store(struct device *dev, struct device_attribute *attr,
43 const char *buf, size_t size)
44{
45 struct gb_bundle *bundle = to_gb_bundle(dev);
46
47 kfree(bundle->state);
48 bundle->state = kstrdup(buf, GFP_KERNEL);
49 if (!bundle->state)
50 return -ENOMEM;
51
52 /* Tell userspace that the file contents changed */
53 sysfs_notify(&bundle->dev.kobj, NULL, "state");
54
55 return size;
56}
57static DEVICE_ATTR_RW(state);
58
59static struct attribute *bundle_attrs[] = {
60 &dev_attr_bundle_class.attr,
61 &dev_attr_bundle_id.attr,
62 &dev_attr_state.attr,
63 NULL,
64};
65
66ATTRIBUTE_GROUPS(bundle);
67
68static struct gb_bundle *gb_bundle_find(struct gb_interface *intf,
69 u8 bundle_id)
70{
71 struct gb_bundle *bundle;
72
73 list_for_each_entry(bundle, &intf->bundles, links) {
74 if (bundle->id == bundle_id)
75 return bundle;
76 }
77
78 return NULL;
79}
80
81static void gb_bundle_release(struct device *dev)
82{
83 struct gb_bundle *bundle = to_gb_bundle(dev);
84
85 trace_gb_bundle_release(bundle);
86
87 kfree(bundle->state);
88 kfree(bundle->cport_desc);
89 kfree(bundle);
90}
91
92#ifdef CONFIG_PM
93static void gb_bundle_disable_all_connections(struct gb_bundle *bundle)
94{
95 struct gb_connection *connection;
96
97 list_for_each_entry(connection, &bundle->connections, bundle_links)
98 gb_connection_disable(connection);
99}
100
101static void gb_bundle_enable_all_connections(struct gb_bundle *bundle)
102{
103 struct gb_connection *connection;
104
105 list_for_each_entry(connection, &bundle->connections, bundle_links)
106 gb_connection_enable(connection);
107}
108
109static int gb_bundle_suspend(struct device *dev)
110{
111 struct gb_bundle *bundle = to_gb_bundle(dev);
112 const struct dev_pm_ops *pm = dev->driver->pm;
113 int ret;
114
115 if (pm && pm->runtime_suspend) {
116 ret = pm->runtime_suspend(&bundle->dev);
117 if (ret)
118 return ret;
119 } else {
120 gb_bundle_disable_all_connections(bundle);
121 }
122
123 ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id);
124 if (ret) {
125 if (pm && pm->runtime_resume)
126 ret = pm->runtime_resume(dev);
127 else
128 gb_bundle_enable_all_connections(bundle);
129
130 return ret;
131 }
132
133 return 0;
134}
135
136static int gb_bundle_resume(struct device *dev)
137{
138 struct gb_bundle *bundle = to_gb_bundle(dev);
139 const struct dev_pm_ops *pm = dev->driver->pm;
140 int ret;
141
142 ret = gb_control_bundle_resume(bundle->intf->control, bundle->id);
143 if (ret)
144 return ret;
145
146 if (pm && pm->runtime_resume) {
147 ret = pm->runtime_resume(dev);
148 if (ret)
149 return ret;
150 } else {
151 gb_bundle_enable_all_connections(bundle);
152 }
153
154 return 0;
155}
156
157static int gb_bundle_idle(struct device *dev)
158{
159 pm_runtime_mark_last_busy(dev);
160 pm_request_autosuspend(dev);
161
162 return 0;
163}
164#endif
165
166static const struct dev_pm_ops gb_bundle_pm_ops = {
167 SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
168};
169
170struct device_type greybus_bundle_type = {
171 .name = "greybus_bundle",
172 .release = gb_bundle_release,
173 .pm = &gb_bundle_pm_ops,
174};
175
176/*
177 * Create a gb_bundle structure to represent a discovered
178 * bundle. Returns a pointer to the new bundle or a null
179 * pointer if a failure occurs due to memory exhaustion.
180 */
181struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
182 u8 class)
183{
184 struct gb_bundle *bundle;
185
186 if (bundle_id == BUNDLE_ID_NONE) {
187 dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id);
188 return NULL;
189 }
190
191 /*
192 * Reject any attempt to reuse a bundle id. We initialize
193 * these serially, so there's no need to worry about keeping
194 * the interface bundle list locked here.
195 */
196 if (gb_bundle_find(intf, bundle_id)) {
197 dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id);
198 return NULL;
199 }
200
201 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
202 if (!bundle)
203 return NULL;
204
205 bundle->intf = intf;
206 bundle->id = bundle_id;
207 bundle->class = class;
208 INIT_LIST_HEAD(&bundle->connections);
209
210 bundle->dev.parent = &intf->dev;
211 bundle->dev.bus = &greybus_bus_type;
212 bundle->dev.type = &greybus_bundle_type;
213 bundle->dev.groups = bundle_groups;
214 bundle->dev.dma_mask = intf->dev.dma_mask;
215 device_initialize(&bundle->dev);
216 dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id);
217
218 list_add(&bundle->links, &intf->bundles);
219
220 trace_gb_bundle_create(bundle);
221
222 return bundle;
223}
224
225int gb_bundle_add(struct gb_bundle *bundle)
226{
227 int ret;
228
229 ret = device_add(&bundle->dev);
230 if (ret) {
231 dev_err(&bundle->dev, "failed to register bundle: %d\n", ret);
232 return ret;
233 }
234
235 trace_gb_bundle_add(bundle);
236
237 return 0;
238}
239
240/*
241 * Tear down a previously set up bundle.
242 */
243void gb_bundle_destroy(struct gb_bundle *bundle)
244{
245 trace_gb_bundle_destroy(bundle);
246
247 if (device_is_registered(&bundle->dev))
248 device_del(&bundle->dev);
249
250 list_del(&bundle->links);
251
252 put_device(&bundle->dev);
253}
diff --git a/drivers/staging/greybus/bundle.h b/drivers/staging/greybus/bundle.h
new file mode 100644
index 000000000000..0c3491def96c
--- /dev/null
+++ b/drivers/staging/greybus/bundle.h
@@ -0,0 +1,90 @@
1/*
2 * Greybus bundles
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __BUNDLE_H
11#define __BUNDLE_H
12
13#include <linux/list.h>
14
15#define BUNDLE_ID_NONE U8_MAX
16
17/* Greybus "public" definitions" */
18struct gb_bundle {
19 struct device dev;
20 struct gb_interface *intf;
21
22 u8 id;
23 u8 class;
24 u8 class_major;
25 u8 class_minor;
26
27 size_t num_cports;
28 struct greybus_descriptor_cport *cport_desc;
29
30 struct list_head connections;
31 u8 *state;
32
33 struct list_head links; /* interface->bundles */
34};
35#define to_gb_bundle(d) container_of(d, struct gb_bundle, dev)
36
37/* Greybus "private" definitions" */
38struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id,
39 u8 class);
40int gb_bundle_add(struct gb_bundle *bundle);
41void gb_bundle_destroy(struct gb_bundle *bundle);
42
43/* Bundle Runtime PM wrappers */
44#ifdef CONFIG_PM
45static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
46{
47 int retval;
48
49 retval = pm_runtime_get_sync(&bundle->dev);
50 if (retval < 0) {
51 dev_err(&bundle->dev,
52 "pm_runtime_get_sync failed: %d\n", retval);
53 pm_runtime_put_noidle(&bundle->dev);
54 return retval;
55 }
56
57 return 0;
58}
59
60static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
61{
62 int retval;
63
64 pm_runtime_mark_last_busy(&bundle->dev);
65 retval = pm_runtime_put_autosuspend(&bundle->dev);
66
67 return retval;
68}
69
70static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle)
71{
72 pm_runtime_get_noresume(&bundle->dev);
73}
74
75static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle)
76{
77 pm_runtime_put_noidle(&bundle->dev);
78}
79
80#else
81static inline int gb_pm_runtime_get_sync(struct gb_bundle *bundle)
82{ return 0; }
83static inline int gb_pm_runtime_put_autosuspend(struct gb_bundle *bundle)
84{ return 0; }
85
86static inline void gb_pm_runtime_get_noresume(struct gb_bundle *bundle) {}
87static inline void gb_pm_runtime_put_noidle(struct gb_bundle *bundle) {}
88#endif
89
90#endif /* __BUNDLE_H */
diff --git a/drivers/staging/greybus/camera.c b/drivers/staging/greybus/camera.c
new file mode 100644
index 000000000000..46d2e8a9e490
--- /dev/null
+++ b/drivers/staging/greybus/camera.c
@@ -0,0 +1,1400 @@
1/*
2 * Greybus Camera protocol driver.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/debugfs.h>
11#include <linux/fs.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/uaccess.h>
17#include <linux/vmalloc.h>
18
19#include "gb-camera.h"
20#include "greybus.h"
21#include "greybus_protocols.h"
22
23enum gb_camera_debugs_buffer_id {
24 GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES,
25 GB_CAMERA_DEBUGFS_BUFFER_STREAMS,
26 GB_CAMERA_DEBUGFS_BUFFER_CAPTURE,
27 GB_CAMERA_DEBUGFS_BUFFER_FLUSH,
28 GB_CAMERA_DEBUGFS_BUFFER_MAX,
29};
30
31struct gb_camera_debugfs_buffer {
32 char data[PAGE_SIZE];
33 size_t length;
34};
35
36enum gb_camera_state {
37 GB_CAMERA_STATE_UNCONFIGURED,
38 GB_CAMERA_STATE_CONFIGURED,
39};
40
41/**
42 * struct gb_camera - A Greybus Camera Device
43 * @connection: the greybus connection for camera management
44 * @data_connection: the greybus connection for camera data
45 * @data_cport_id: the data CPort ID on the module side
46 * @mutex: protects the connection and state fields
47 * @state: the current module state
48 * @debugfs: debugfs entries for camera protocol operations testing
49 * @module: Greybus camera module registered to HOST processor.
50 */
51struct gb_camera {
52 struct gb_bundle *bundle;
53 struct gb_connection *connection;
54 struct gb_connection *data_connection;
55 u16 data_cport_id;
56
57 struct mutex mutex;
58 enum gb_camera_state state;
59
60 struct {
61 struct dentry *root;
62 struct gb_camera_debugfs_buffer *buffers;
63 } debugfs;
64
65 struct gb_camera_module module;
66};
67
68struct gb_camera_stream_config {
69 unsigned int width;
70 unsigned int height;
71 unsigned int format;
72 unsigned int vc;
73 unsigned int dt[2];
74 unsigned int max_size;
75};
76
77struct gb_camera_fmt_info {
78 enum v4l2_mbus_pixelcode mbus_code;
79 unsigned int gb_format;
80 unsigned int bpp;
81};
82
83/* GB format to media code map */
84static const struct gb_camera_fmt_info gb_fmt_info[] = {
85 {
86 .mbus_code = V4L2_MBUS_FMT_UYVY8_1X16,
87 .gb_format = 0x01,
88 .bpp = 16,
89 },
90 {
91 .mbus_code = V4L2_MBUS_FMT_NV12_1x8,
92 .gb_format = 0x12,
93 .bpp = 12,
94 },
95 {
96 .mbus_code = V4L2_MBUS_FMT_NV21_1x8,
97 .gb_format = 0x13,
98 .bpp = 12,
99 },
100 {
101 .mbus_code = V4L2_MBUS_FMT_YU12_1x8,
102 .gb_format = 0x16,
103 .bpp = 12,
104 },
105 {
106 .mbus_code = V4L2_MBUS_FMT_YV12_1x8,
107 .gb_format = 0x17,
108 .bpp = 12,
109 },
110 {
111 .mbus_code = V4L2_MBUS_FMT_JPEG_1X8,
112 .gb_format = 0x40,
113 .bpp = 0,
114 },
115 {
116 .mbus_code = V4L2_MBUS_FMT_GB_CAM_METADATA_1X8,
117 .gb_format = 0x41,
118 .bpp = 0,
119 },
120 {
121 .mbus_code = V4L2_MBUS_FMT_GB_CAM_DEBUG_DATA_1X8,
122 .gb_format = 0x42,
123 .bpp = 0,
124 },
125 {
126 .mbus_code = V4L2_MBUS_FMT_SBGGR10_1X10,
127 .gb_format = 0x80,
128 .bpp = 10,
129 },
130 {
131 .mbus_code = V4L2_MBUS_FMT_SGBRG10_1X10,
132 .gb_format = 0x81,
133 .bpp = 10,
134 },
135 {
136 .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
137 .gb_format = 0x82,
138 .bpp = 10,
139 },
140 {
141 .mbus_code = V4L2_MBUS_FMT_SRGGB10_1X10,
142 .gb_format = 0x83,
143 .bpp = 10,
144 },
145 {
146 .mbus_code = V4L2_MBUS_FMT_SBGGR12_1X12,
147 .gb_format = 0x84,
148 .bpp = 12,
149 },
150 {
151 .mbus_code = V4L2_MBUS_FMT_SGBRG12_1X12,
152 .gb_format = 0x85,
153 .bpp = 12,
154 },
155 {
156 .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
157 .gb_format = 0x86,
158 .bpp = 12,
159 },
160 {
161 .mbus_code = V4L2_MBUS_FMT_SRGGB12_1X12,
162 .gb_format = 0x87,
163 .bpp = 12,
164 },
165};
166
167static const struct gb_camera_fmt_info *gb_camera_get_format_info(u16 gb_fmt)
168{
169 unsigned int i;
170
171 for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
172 if (gb_fmt_info[i].gb_format == gb_fmt)
173 return &gb_fmt_info[i];
174 }
175
176 return NULL;
177}
178
179#define ES2_APB_CDSI0_CPORT 16
180#define ES2_APB_CDSI1_CPORT 17
181
182#define GB_CAMERA_MAX_SETTINGS_SIZE 8192
183
184#define gcam_dbg(gcam, format...) dev_dbg(&gcam->bundle->dev, format)
185#define gcam_info(gcam, format...) dev_info(&gcam->bundle->dev, format)
186#define gcam_err(gcam, format...) dev_err(&gcam->bundle->dev, format)
187
188static int gb_camera_operation_sync_flags(struct gb_connection *connection,
189 int type, unsigned int flags,
190 void *request, size_t request_size,
191 void *response, size_t *response_size)
192{
193 struct gb_operation *operation;
194 int ret;
195
196 operation = gb_operation_create_flags(connection, type, request_size,
197 *response_size, flags,
198 GFP_KERNEL);
199 if (!operation)
200 return -ENOMEM;
201
202 if (request_size)
203 memcpy(operation->request->payload, request, request_size);
204
205 ret = gb_operation_request_send_sync(operation);
206 if (ret) {
207 dev_err(&connection->hd->dev,
208 "%s: synchronous operation of type 0x%02x failed: %d\n",
209 connection->name, type, ret);
210 } else {
211 *response_size = operation->response->payload_size;
212
213 if (operation->response->payload_size)
214 memcpy(response, operation->response->payload,
215 operation->response->payload_size);
216 }
217
218 gb_operation_put(operation);
219
220 return ret;
221}
222
223static int gb_camera_get_max_pkt_size(struct gb_camera *gcam,
224 struct gb_camera_configure_streams_response *resp)
225{
226 unsigned int max_pkt_size = 0;
227 unsigned int i;
228
229 for (i = 0; i < resp->num_streams; i++) {
230 struct gb_camera_stream_config_response *cfg = &resp->config[i];
231 const struct gb_camera_fmt_info *fmt_info;
232 unsigned int pkt_size;
233
234 fmt_info = gb_camera_get_format_info(cfg->format);
235 if (!fmt_info) {
236 gcam_err(gcam, "unsupported greybus image format: %d\n",
237 cfg->format);
238 return -EIO;
239 }
240
241 if (fmt_info->bpp == 0) {
242 pkt_size = le32_to_cpu(cfg->max_pkt_size);
243
244 if (pkt_size == 0) {
245 gcam_err(gcam,
246 "Stream %u: invalid zero maximum packet size\n",
247 i);
248 return -EIO;
249 }
250 } else {
251 pkt_size = le16_to_cpu(cfg->width) * fmt_info->bpp / 8;
252
253 if (pkt_size != le32_to_cpu(cfg->max_pkt_size)) {
254 gcam_err(gcam,
255 "Stream %u: maximum packet size mismatch (%u/%u)\n",
256 i, pkt_size, cfg->max_pkt_size);
257 return -EIO;
258 }
259 }
260
261 max_pkt_size = max(pkt_size, max_pkt_size);
262 }
263
264 return max_pkt_size;
265}
266
267/*
268 * Validate the stream configuration response verifying padding is correctly
269 * set and the returned number of streams is supported
270 */
271static const int gb_camera_configure_streams_validate_response(
272 struct gb_camera *gcam,
273 struct gb_camera_configure_streams_response *resp,
274 unsigned int nstreams)
275{
276 unsigned int i;
277
278 /* Validate the returned response structure */
279 if (resp->padding[0] || resp->padding[1]) {
280 gcam_err(gcam, "response padding != 0\n");
281 return -EIO;
282 }
283
284 if (resp->num_streams > nstreams) {
285 gcam_err(gcam, "got #streams %u > request %u\n",
286 resp->num_streams, nstreams);
287 return -EIO;
288 }
289
290 for (i = 0; i < resp->num_streams; i++) {
291 struct gb_camera_stream_config_response *cfg = &resp->config[i];
292 if (cfg->padding) {
293 gcam_err(gcam, "stream #%u padding != 0\n", i);
294 return -EIO;
295 }
296 }
297
298 return 0;
299}
300
301/* -----------------------------------------------------------------------------
302 * Hardware Configuration
303 */
304
305static int gb_camera_set_intf_power_mode(struct gb_camera *gcam, u8 intf_id,
306 bool hs)
307{
308 struct gb_svc *svc = gcam->connection->hd->svc;
309 int ret;
310
311 if (hs)
312 ret = gb_svc_intf_set_power_mode(svc, intf_id,
313 GB_SVC_UNIPRO_HS_SERIES_A,
314 GB_SVC_UNIPRO_FAST_MODE, 2, 2,
315 GB_SVC_SMALL_AMPLITUDE,
316 GB_SVC_NO_DE_EMPHASIS,
317 GB_SVC_UNIPRO_FAST_MODE, 2, 2,
318 GB_SVC_PWRM_RXTERMINATION |
319 GB_SVC_PWRM_TXTERMINATION, 0,
320 NULL, NULL);
321 else
322 ret = gb_svc_intf_set_power_mode(svc, intf_id,
323 GB_SVC_UNIPRO_HS_SERIES_A,
324 GB_SVC_UNIPRO_SLOW_AUTO_MODE,
325 2, 1,
326 GB_SVC_SMALL_AMPLITUDE,
327 GB_SVC_NO_DE_EMPHASIS,
328 GB_SVC_UNIPRO_SLOW_AUTO_MODE,
329 2, 1,
330 0, 0,
331 NULL, NULL);
332
333 return ret;
334}
335
336static int gb_camera_set_power_mode(struct gb_camera *gcam, bool hs)
337{
338 struct gb_interface *intf = gcam->connection->intf;
339 struct gb_svc *svc = gcam->connection->hd->svc;
340 int ret;
341
342 ret = gb_camera_set_intf_power_mode(gcam, intf->interface_id, hs);
343 if (ret < 0) {
344 gcam_err(gcam, "failed to set module interface to %s (%d)\n",
345 hs ? "HS" : "PWM", ret);
346 return ret;
347 }
348
349 ret = gb_camera_set_intf_power_mode(gcam, svc->ap_intf_id, hs);
350 if (ret < 0) {
351 gb_camera_set_intf_power_mode(gcam, intf->interface_id, !hs);
352 gcam_err(gcam, "failed to set AP interface to %s (%d)\n",
353 hs ? "HS" : "PWM", ret);
354 return ret;
355 }
356
357 return 0;
358}
359
360struct ap_csi_config_request {
361 __u8 csi_id;
362 __u8 flags;
363#define GB_CAMERA_CSI_FLAG_CLOCK_CONTINUOUS 0x01
364 __u8 num_lanes;
365 __u8 padding;
366 __le32 csi_clk_freq;
367 __le32 max_pkt_size;
368} __packed;
369
370/*
371 * TODO: Compute the number of lanes dynamically based on bandwidth
372 * requirements.
373 */
374#define GB_CAMERA_CSI_NUM_DATA_LANES 4
375
376#define GB_CAMERA_CSI_CLK_FREQ_MAX 999000000U
377#define GB_CAMERA_CSI_CLK_FREQ_MIN 100000000U
378#define GB_CAMERA_CSI_CLK_FREQ_MARGIN 150000000U
379
380static int gb_camera_setup_data_connection(struct gb_camera *gcam,
381 struct gb_camera_configure_streams_response *resp,
382 struct gb_camera_csi_params *csi_params)
383{
384 struct ap_csi_config_request csi_cfg;
385 struct gb_connection *conn;
386 unsigned int clk_freq;
387 int ret;
388
389 /*
390 * Create the data connection between the camera module data CPort and
391 * APB CDSI1. The CDSI1 CPort ID is hardcoded by the ES2 bridge.
392 */
393 conn = gb_connection_create_offloaded(gcam->bundle, gcam->data_cport_id,
394 GB_CONNECTION_FLAG_NO_FLOWCTRL |
395 GB_CONNECTION_FLAG_CDSI1);
396 if (IS_ERR(conn))
397 return PTR_ERR(conn);
398
399 gcam->data_connection = conn;
400 gb_connection_set_data(conn, gcam);
401
402 ret = gb_connection_enable(conn);
403 if (ret)
404 goto error_conn_destroy;
405
406 /* Set the UniPro link to high speed mode. */
407 ret = gb_camera_set_power_mode(gcam, true);
408 if (ret < 0)
409 goto error_conn_disable;
410
411 /*
412 * Configure the APB-A CSI-2 transmitter.
413 *
414 * Hardcode the number of lanes to 4 and compute the bus clock frequency
415 * based on the module bandwidth requirements with a safety margin.
416 */
417 memset(&csi_cfg, 0, sizeof(csi_cfg));
418 csi_cfg.csi_id = 1;
419 csi_cfg.flags = 0;
420 csi_cfg.num_lanes = GB_CAMERA_CSI_NUM_DATA_LANES;
421
422 clk_freq = resp->data_rate / 2 / GB_CAMERA_CSI_NUM_DATA_LANES;
423 clk_freq = clamp(clk_freq + GB_CAMERA_CSI_CLK_FREQ_MARGIN,
424 GB_CAMERA_CSI_CLK_FREQ_MIN,
425 GB_CAMERA_CSI_CLK_FREQ_MAX);
426 csi_cfg.csi_clk_freq = clk_freq;
427
428 ret = gb_camera_get_max_pkt_size(gcam, resp);
429 if (ret < 0) {
430 ret = -EIO;
431 goto error_power;
432 }
433 csi_cfg.max_pkt_size = ret;
434
435 ret = gb_hd_output(gcam->connection->hd, &csi_cfg,
436 sizeof(csi_cfg),
437 GB_APB_REQUEST_CSI_TX_CONTROL, false);
438 if (ret < 0) {
439 gcam_err(gcam, "failed to start the CSI transmitter\n");
440 goto error_power;
441 }
442
443 if (csi_params) {
444 csi_params->clk_freq = csi_cfg.csi_clk_freq;
445 csi_params->num_lanes = csi_cfg.num_lanes;
446 }
447
448 return 0;
449
450error_power:
451 gb_camera_set_power_mode(gcam, false);
452error_conn_disable:
453 gb_connection_disable(gcam->data_connection);
454error_conn_destroy:
455 gb_connection_destroy(gcam->data_connection);
456 gcam->data_connection = NULL;
457 return ret;
458}
459
460static void gb_camera_teardown_data_connection(struct gb_camera *gcam)
461{
462 struct ap_csi_config_request csi_cfg;
463 int ret;
464
465 /* Stop the APB1 CSI transmitter. */
466 memset(&csi_cfg, 0, sizeof(csi_cfg));
467 csi_cfg.csi_id = 1;
468
469 ret = gb_hd_output(gcam->connection->hd, &csi_cfg,
470 sizeof(csi_cfg),
471 GB_APB_REQUEST_CSI_TX_CONTROL, false);
472
473 if (ret < 0)
474 gcam_err(gcam, "failed to stop the CSI transmitter\n");
475
476 /* Set the UniPro link to low speed mode. */
477 gb_camera_set_power_mode(gcam, false);
478
479 /* Destroy the data connection. */
480 gb_connection_disable(gcam->data_connection);
481 gb_connection_destroy(gcam->data_connection);
482 gcam->data_connection = NULL;
483}
484
485/* -----------------------------------------------------------------------------
486 * Camera Protocol Operations
487 */
488
489static int gb_camera_capabilities(struct gb_camera *gcam,
490 u8 *capabilities, size_t *size)
491{
492 int ret;
493
494 ret = gb_pm_runtime_get_sync(gcam->bundle);
495 if (ret)
496 return ret;
497
498 mutex_lock(&gcam->mutex);
499
500 if (!gcam->connection) {
501 ret = -EINVAL;
502 goto done;
503 }
504
505 ret = gb_camera_operation_sync_flags(gcam->connection,
506 GB_CAMERA_TYPE_CAPABILITIES,
507 GB_OPERATION_FLAG_SHORT_RESPONSE,
508 NULL, 0,
509 (void *)capabilities, size);
510 if (ret)
511 gcam_err(gcam, "failed to retrieve capabilities: %d\n", ret);
512
513done:
514 mutex_unlock(&gcam->mutex);
515
516 gb_pm_runtime_put_autosuspend(gcam->bundle);
517
518 return ret;
519}
520
521static int gb_camera_configure_streams(struct gb_camera *gcam,
522 unsigned int *num_streams,
523 unsigned int *flags,
524 struct gb_camera_stream_config *streams,
525 struct gb_camera_csi_params *csi_params)
526{
527 struct gb_camera_configure_streams_request *req;
528 struct gb_camera_configure_streams_response *resp;
529 unsigned int nstreams = *num_streams;
530 unsigned int i;
531 size_t req_size;
532 size_t resp_size;
533 int ret;
534
535 if (nstreams > GB_CAMERA_MAX_STREAMS)
536 return -EINVAL;
537
538 req_size = sizeof(*req) + nstreams * sizeof(req->config[0]);
539 resp_size = sizeof(*resp) + nstreams * sizeof(resp->config[0]);
540
541 req = kmalloc(req_size, GFP_KERNEL);
542 resp = kmalloc(resp_size, GFP_KERNEL);
543 if (!req || !resp) {
544 kfree(req);
545 kfree(resp);
546 return -ENOMEM;
547 }
548
549 req->num_streams = nstreams;
550 req->flags = *flags;
551 req->padding = 0;
552
553 for (i = 0; i < nstreams; ++i) {
554 struct gb_camera_stream_config_request *cfg = &req->config[i];
555
556 cfg->width = cpu_to_le16(streams[i].width);
557 cfg->height = cpu_to_le16(streams[i].height);
558 cfg->format = cpu_to_le16(streams[i].format);
559 cfg->padding = 0;
560 }
561
562 mutex_lock(&gcam->mutex);
563
564 ret = gb_pm_runtime_get_sync(gcam->bundle);
565 if (ret)
566 goto done_skip_pm_put;
567
568 if (!gcam->connection) {
569 ret = -EINVAL;
570 goto done;
571 }
572
573 ret = gb_camera_operation_sync_flags(gcam->connection,
574 GB_CAMERA_TYPE_CONFIGURE_STREAMS,
575 GB_OPERATION_FLAG_SHORT_RESPONSE,
576 req, req_size,
577 resp, &resp_size);
578 if (ret < 0)
579 goto done;
580
581 ret = gb_camera_configure_streams_validate_response(gcam, resp,
582 nstreams);
583 if (ret < 0)
584 goto done;
585
586 *flags = resp->flags;
587 *num_streams = resp->num_streams;
588
589 for (i = 0; i < resp->num_streams; ++i) {
590 struct gb_camera_stream_config_response *cfg = &resp->config[i];
591
592 streams[i].width = le16_to_cpu(cfg->width);
593 streams[i].height = le16_to_cpu(cfg->height);
594 streams[i].format = le16_to_cpu(cfg->format);
595 streams[i].vc = cfg->virtual_channel;
596 streams[i].dt[0] = cfg->data_type[0];
597 streams[i].dt[1] = cfg->data_type[1];
598 streams[i].max_size = le32_to_cpu(cfg->max_size);
599 }
600
601 if ((resp->flags & GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED) ||
602 (req->flags & GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY))
603 goto done;
604
605 if (gcam->state == GB_CAMERA_STATE_CONFIGURED) {
606 gb_camera_teardown_data_connection(gcam);
607 gcam->state = GB_CAMERA_STATE_UNCONFIGURED;
608
609 /*
610 * When unconfiguring streams release the PM runtime reference
611 * that was acquired when streams were configured. The bundle
612 * won't be suspended until the PM runtime reference acquired at
613 * the beginning of this function gets released right before
614 * returning.
615 */
616 gb_pm_runtime_put_noidle(gcam->bundle);
617 }
618
619 if (resp->num_streams == 0)
620 goto done;
621
622 /*
623 * Make sure the bundle won't be suspended until streams get
624 * unconfigured after the stream is configured successfully
625 */
626 gb_pm_runtime_get_noresume(gcam->bundle);
627
628 /* Setup CSI-2 connection from APB-A to AP */
629 ret = gb_camera_setup_data_connection(gcam, resp, csi_params);
630 if (ret < 0) {
631 memset(req, 0, sizeof(*req));
632 gb_operation_sync(gcam->connection,
633 GB_CAMERA_TYPE_CONFIGURE_STREAMS,
634 req, sizeof(*req),
635 resp, sizeof(*resp));
636 *flags = 0;
637 *num_streams = 0;
638 gb_pm_runtime_put_noidle(gcam->bundle);
639 goto done;
640 }
641
642 gcam->state = GB_CAMERA_STATE_CONFIGURED;
643
644done:
645 gb_pm_runtime_put_autosuspend(gcam->bundle);
646
647done_skip_pm_put:
648 mutex_unlock(&gcam->mutex);
649 kfree(req);
650 kfree(resp);
651 return ret;
652}
653
654static int gb_camera_capture(struct gb_camera *gcam, u32 request_id,
655 unsigned int streams, unsigned int num_frames,
656 size_t settings_size, const void *settings)
657{
658 struct gb_camera_capture_request *req;
659 size_t req_size;
660 int ret;
661
662 if (settings_size > GB_CAMERA_MAX_SETTINGS_SIZE)
663 return -EINVAL;
664
665 req_size = sizeof(*req) + settings_size;
666 req = kmalloc(req_size, GFP_KERNEL);
667 if (!req)
668 return -ENOMEM;
669
670 req->request_id = cpu_to_le32(request_id);
671 req->streams = streams;
672 req->padding = 0;
673 req->num_frames = cpu_to_le16(num_frames);
674 memcpy(req->settings, settings, settings_size);
675
676 mutex_lock(&gcam->mutex);
677
678 if (!gcam->connection) {
679 ret = -EINVAL;
680 goto done;
681 }
682
683 ret = gb_operation_sync(gcam->connection, GB_CAMERA_TYPE_CAPTURE,
684 req, req_size, NULL, 0);
685done:
686 mutex_unlock(&gcam->mutex);
687
688 kfree(req);
689
690 return ret;
691}
692
693static int gb_camera_flush(struct gb_camera *gcam, u32 *request_id)
694{
695 struct gb_camera_flush_response resp;
696 int ret;
697
698 mutex_lock(&gcam->mutex);
699
700 if (!gcam->connection) {
701 ret = -EINVAL;
702 goto done;
703 }
704
705 ret = gb_operation_sync(gcam->connection, GB_CAMERA_TYPE_FLUSH, NULL, 0,
706 &resp, sizeof(resp));
707
708 if (ret < 0)
709 goto done;
710
711 if (request_id)
712 *request_id = le32_to_cpu(resp.request_id);
713
714done:
715 mutex_unlock(&gcam->mutex);
716
717 return ret;
718}
719
720static int gb_camera_request_handler(struct gb_operation *op)
721{
722 struct gb_camera *gcam = gb_connection_get_data(op->connection);
723 struct gb_camera_metadata_request *payload;
724 struct gb_message *request;
725
726 if (op->type != GB_CAMERA_TYPE_METADATA) {
727 gcam_err(gcam, "Unsupported unsolicited event: %u\n", op->type);
728 return -EINVAL;
729 }
730
731 request = op->request;
732
733 if (request->payload_size < sizeof(*payload)) {
734 gcam_err(gcam, "Wrong event size received (%zu < %zu)\n",
735 request->payload_size, sizeof(*payload));
736 return -EINVAL;
737 }
738
739 payload = request->payload;
740
741 gcam_dbg(gcam, "received metadata for request %u, frame %u, stream %u\n",
742 payload->request_id, payload->frame_number, payload->stream);
743
744 return 0;
745}
746
747/* -----------------------------------------------------------------------------
748 * Interface with HOST gmp camera.
749 */
750static unsigned int gb_camera_mbus_to_gb(enum v4l2_mbus_pixelcode mbus_code)
751{
752 unsigned int i;
753
754 for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
755 if (gb_fmt_info[i].mbus_code == mbus_code)
756 return gb_fmt_info[i].gb_format;
757 }
758 return gb_fmt_info[0].gb_format;
759}
760
761static enum v4l2_mbus_pixelcode gb_camera_gb_to_mbus(u16 gb_fmt)
762{
763 unsigned int i;
764
765 for (i = 0; i < ARRAY_SIZE(gb_fmt_info); i++) {
766 if (gb_fmt_info[i].gb_format == gb_fmt)
767 return gb_fmt_info[i].mbus_code;
768 }
769 return gb_fmt_info[0].mbus_code;
770}
771
772static ssize_t gb_camera_op_capabilities(void *priv, char *data, size_t len)
773{
774 struct gb_camera *gcam = priv;
775 size_t capabilities_len = len;
776 int ret;
777
778 ret = gb_camera_capabilities(gcam, data, &capabilities_len);
779 if (ret)
780 return ret;
781
782 return capabilities_len;
783}
784
785static int gb_camera_op_configure_streams(void *priv, unsigned int *nstreams,
786 unsigned int *flags, struct gb_camera_stream *streams,
787 struct gb_camera_csi_params *csi_params)
788{
789 struct gb_camera *gcam = priv;
790 struct gb_camera_stream_config *gb_streams;
791 unsigned int gb_flags = 0;
792 unsigned int gb_nstreams = *nstreams;
793 unsigned int i;
794 int ret;
795
796 if (gb_nstreams > GB_CAMERA_MAX_STREAMS)
797 return -EINVAL;
798
799 gb_streams = kzalloc(gb_nstreams * sizeof(*gb_streams), GFP_KERNEL);
800 if (!gb_streams)
801 return -ENOMEM;
802
803 for (i = 0; i < gb_nstreams; i++) {
804 gb_streams[i].width = streams[i].width;
805 gb_streams[i].height = streams[i].height;
806 gb_streams[i].format =
807 gb_camera_mbus_to_gb(streams[i].pixel_code);
808 }
809
810 if (*flags & GB_CAMERA_IN_FLAG_TEST)
811 gb_flags |= GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY;
812
813 ret = gb_camera_configure_streams(gcam, &gb_nstreams,
814 &gb_flags, gb_streams, csi_params);
815 if (ret < 0)
816 goto done;
817 if (gb_nstreams > *nstreams) {
818 ret = -EINVAL;
819 goto done;
820 }
821
822 *flags = 0;
823 if (gb_flags & GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED)
824 *flags |= GB_CAMERA_OUT_FLAG_ADJUSTED;
825
826 for (i = 0; i < gb_nstreams; i++) {
827 streams[i].width = gb_streams[i].width;
828 streams[i].height = gb_streams[i].height;
829 streams[i].vc = gb_streams[i].vc;
830 streams[i].dt[0] = gb_streams[i].dt[0];
831 streams[i].dt[1] = gb_streams[i].dt[1];
832 streams[i].max_size = gb_streams[i].max_size;
833 streams[i].pixel_code =
834 gb_camera_gb_to_mbus(gb_streams[i].format);
835 }
836 *nstreams = gb_nstreams;
837
838done:
839 kfree(gb_streams);
840 return ret;
841}
842
843static int gb_camera_op_capture(void *priv, u32 request_id,
844 unsigned int streams, unsigned int num_frames,
845 size_t settings_size, const void *settings)
846{
847 struct gb_camera *gcam = priv;
848
849 return gb_camera_capture(gcam, request_id, streams, num_frames,
850 settings_size, settings);
851}
852
853static int gb_camera_op_flush(void *priv, u32 *request_id)
854{
855 struct gb_camera *gcam = priv;
856
857 return gb_camera_flush(gcam, request_id);
858}
859
860static const struct gb_camera_ops gb_cam_ops = {
861 .capabilities = gb_camera_op_capabilities,
862 .configure_streams = gb_camera_op_configure_streams,
863 .capture = gb_camera_op_capture,
864 .flush = gb_camera_op_flush,
865};
866
867/* -----------------------------------------------------------------------------
868 * DebugFS
869 */
870
871static ssize_t gb_camera_debugfs_capabilities(struct gb_camera *gcam,
872 char *buf, size_t len)
873{
874 struct gb_camera_debugfs_buffer *buffer =
875 &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES];
876 size_t size = 1024;
877 unsigned int i;
878 u8 *caps;
879 int ret;
880
881 caps = kmalloc(size, GFP_KERNEL);
882 if (!caps)
883 return -ENOMEM;
884
885 ret = gb_camera_capabilities(gcam, caps, &size);
886 if (ret < 0)
887 goto done;
888
889 /*
890 * hex_dump_to_buffer() doesn't return the number of bytes dumped prior
891 * to v4.0, we need our own implementation :-(
892 */
893 buffer->length = 0;
894
895 for (i = 0; i < size; i += 16) {
896 unsigned int nbytes = min_t(unsigned int, size - i, 16);
897
898 buffer->length += sprintf(buffer->data + buffer->length,
899 "%*ph\n", nbytes, caps + i);
900 }
901
902done:
903 kfree(caps);
904 return ret;
905}
906
907static ssize_t gb_camera_debugfs_configure_streams(struct gb_camera *gcam,
908 char *buf, size_t len)
909{
910 struct gb_camera_debugfs_buffer *buffer =
911 &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_STREAMS];
912 struct gb_camera_stream_config *streams;
913 unsigned int nstreams;
914 unsigned int flags;
915 unsigned int i;
916 char *token;
917 int ret;
918
919 /* Retrieve number of streams to configure */
920 token = strsep(&buf, ";");
921 if (token == NULL)
922 return -EINVAL;
923
924 ret = kstrtouint(token, 10, &nstreams);
925 if (ret < 0)
926 return ret;
927
928 if (nstreams > GB_CAMERA_MAX_STREAMS)
929 return -EINVAL;
930
931 token = strsep(&buf, ";");
932 if (token == NULL)
933 return -EINVAL;
934
935 ret = kstrtouint(token, 10, &flags);
936 if (ret < 0)
937 return ret;
938
939 /* For each stream to configure parse width, height and format */
940 streams = kzalloc(nstreams * sizeof(*streams), GFP_KERNEL);
941 if (!streams)
942 return -ENOMEM;
943
944 for (i = 0; i < nstreams; ++i) {
945 struct gb_camera_stream_config *stream = &streams[i];
946
947 /* width */
948 token = strsep(&buf, ";");
949 if (token == NULL) {
950 ret = -EINVAL;
951 goto done;
952 }
953 ret = kstrtouint(token, 10, &stream->width);
954 if (ret < 0)
955 goto done;
956
957 /* height */
958 token = strsep(&buf, ";");
959 if (token == NULL)
960 goto done;
961
962 ret = kstrtouint(token, 10, &stream->height);
963 if (ret < 0)
964 goto done;
965
966 /* Image format code */
967 token = strsep(&buf, ";");
968 if (token == NULL)
969 goto done;
970
971 ret = kstrtouint(token, 16, &stream->format);
972 if (ret < 0)
973 goto done;
974 }
975
976 ret = gb_camera_configure_streams(gcam, &nstreams, &flags, streams,
977 NULL);
978 if (ret < 0)
979 goto done;
980
981 buffer->length = sprintf(buffer->data, "%u;%u;", nstreams, flags);
982
983 for (i = 0; i < nstreams; ++i) {
984 struct gb_camera_stream_config *stream = &streams[i];
985
986 buffer->length += sprintf(buffer->data + buffer->length,
987 "%u;%u;%u;%u;%u;%u;%u;",
988 stream->width, stream->height,
989 stream->format, stream->vc,
990 stream->dt[0], stream->dt[1],
991 stream->max_size);
992 }
993
994 ret = len;
995
996done:
997 kfree(streams);
998 return ret;
999};
1000
1001static ssize_t gb_camera_debugfs_capture(struct gb_camera *gcam,
1002 char *buf, size_t len)
1003{
1004 unsigned int request_id;
1005 unsigned int streams_mask;
1006 unsigned int num_frames;
1007 char *token;
1008 int ret;
1009
1010 /* Request id */
1011 token = strsep(&buf, ";");
1012 if (token == NULL)
1013 return -EINVAL;
1014 ret = kstrtouint(token, 10, &request_id);
1015 if (ret < 0)
1016 return ret;
1017
1018 /* Stream mask */
1019 token = strsep(&buf, ";");
1020 if (token == NULL)
1021 return -EINVAL;
1022 ret = kstrtouint(token, 16, &streams_mask);
1023 if (ret < 0)
1024 return ret;
1025
1026 /* number of frames */
1027 token = strsep(&buf, ";");
1028 if (token == NULL)
1029 return -EINVAL;
1030 ret = kstrtouint(token, 10, &num_frames);
1031 if (ret < 0)
1032 return ret;
1033
1034 ret = gb_camera_capture(gcam, request_id, streams_mask, num_frames, 0,
1035 NULL);
1036 if (ret < 0)
1037 return ret;
1038
1039 return len;
1040}
1041
1042static ssize_t gb_camera_debugfs_flush(struct gb_camera *gcam,
1043 char *buf, size_t len)
1044{
1045 struct gb_camera_debugfs_buffer *buffer =
1046 &gcam->debugfs.buffers[GB_CAMERA_DEBUGFS_BUFFER_FLUSH];
1047 unsigned int req_id;
1048 int ret;
1049
1050 ret = gb_camera_flush(gcam, &req_id);
1051 if (ret < 0)
1052 return ret;
1053
1054 buffer->length = sprintf(buffer->data, "%u", req_id);
1055
1056 return len;
1057}
1058
1059struct gb_camera_debugfs_entry {
1060 const char *name;
1061 unsigned int mask;
1062 unsigned int buffer;
1063 ssize_t (*execute)(struct gb_camera *gcam, char *buf, size_t len);
1064};
1065
1066static const struct gb_camera_debugfs_entry gb_camera_debugfs_entries[] = {
1067 {
1068 .name = "capabilities",
1069 .mask = S_IFREG | S_IRUGO,
1070 .buffer = GB_CAMERA_DEBUGFS_BUFFER_CAPABILITIES,
1071 .execute = gb_camera_debugfs_capabilities,
1072 }, {
1073 .name = "configure_streams",
1074 .mask = S_IFREG | S_IRUGO | S_IWUGO,
1075 .buffer = GB_CAMERA_DEBUGFS_BUFFER_STREAMS,
1076 .execute = gb_camera_debugfs_configure_streams,
1077 }, {
1078 .name = "capture",
1079 .mask = S_IFREG | S_IRUGO | S_IWUGO,
1080 .buffer = GB_CAMERA_DEBUGFS_BUFFER_CAPTURE,
1081 .execute = gb_camera_debugfs_capture,
1082 }, {
1083 .name = "flush",
1084 .mask = S_IFREG | S_IRUGO | S_IWUGO,
1085 .buffer = GB_CAMERA_DEBUGFS_BUFFER_FLUSH,
1086 .execute = gb_camera_debugfs_flush,
1087 },
1088};
1089
1090static ssize_t gb_camera_debugfs_read(struct file *file, char __user *buf,
1091 size_t len, loff_t *offset)
1092{
1093 const struct gb_camera_debugfs_entry *op = file->private_data;
1094 struct gb_camera *gcam = file->f_inode->i_private;
1095 struct gb_camera_debugfs_buffer *buffer;
1096 ssize_t ret;
1097
1098 /* For read-only entries the operation is triggered by a read. */
1099 if (!(op->mask & S_IWUGO)) {
1100 ret = op->execute(gcam, NULL, 0);
1101 if (ret < 0)
1102 return ret;
1103 }
1104
1105 buffer = &gcam->debugfs.buffers[op->buffer];
1106
1107 return simple_read_from_buffer(buf, len, offset, buffer->data,
1108 buffer->length);
1109}
1110
1111static ssize_t gb_camera_debugfs_write(struct file *file,
1112 const char __user *buf, size_t len,
1113 loff_t *offset)
1114{
1115 const struct gb_camera_debugfs_entry *op = file->private_data;
1116 struct gb_camera *gcam = file->f_inode->i_private;
1117 ssize_t ret;
1118 char *kbuf;
1119
1120 if (len > 1024)
1121 return -EINVAL;
1122
1123 kbuf = kmalloc(len + 1, GFP_KERNEL);
1124 if (kbuf == NULL)
1125 return -ENOMEM;
1126
1127 if (copy_from_user(kbuf, buf, len)) {
1128 ret = -EFAULT;
1129 goto done;
1130 }
1131
1132 kbuf[len] = '\0';
1133
1134 ret = op->execute(gcam, kbuf, len);
1135
1136done:
1137 kfree(kbuf);
1138 return ret;
1139}
1140
1141static int gb_camera_debugfs_open(struct inode *inode, struct file *file)
1142{
1143 unsigned int i;
1144
1145 for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
1146 const struct gb_camera_debugfs_entry *entry =
1147 &gb_camera_debugfs_entries[i];
1148
1149 if (!strcmp(file->f_path.dentry->d_iname, entry->name)) {
1150 file->private_data = (void *)entry;
1151 break;
1152 }
1153 }
1154
1155 return 0;
1156}
1157
1158static const struct file_operations gb_camera_debugfs_ops = {
1159 .open = gb_camera_debugfs_open,
1160 .read = gb_camera_debugfs_read,
1161 .write = gb_camera_debugfs_write,
1162};
1163
1164static int gb_camera_debugfs_init(struct gb_camera *gcam)
1165{
1166 struct gb_connection *connection = gcam->connection;
1167 char dirname[27];
1168 unsigned int i;
1169
1170 /*
1171 * Create root debugfs entry and a file entry for each camera operation.
1172 */
1173 snprintf(dirname, 27, "camera-%u.%u", connection->intf->interface_id,
1174 gcam->bundle->id);
1175
1176 gcam->debugfs.root = debugfs_create_dir(dirname, gb_debugfs_get());
1177 if (IS_ERR(gcam->debugfs.root)) {
1178 gcam_err(gcam, "debugfs root create failed (%ld)\n",
1179 PTR_ERR(gcam->debugfs.root));
1180 return PTR_ERR(gcam->debugfs.root);
1181 }
1182
1183 gcam->debugfs.buffers = vmalloc(sizeof(*gcam->debugfs.buffers) *
1184 GB_CAMERA_DEBUGFS_BUFFER_MAX);
1185 if (!gcam->debugfs.buffers)
1186 return -ENOMEM;
1187
1188 for (i = 0; i < ARRAY_SIZE(gb_camera_debugfs_entries); ++i) {
1189 const struct gb_camera_debugfs_entry *entry =
1190 &gb_camera_debugfs_entries[i];
1191 struct dentry *dentry;
1192
1193 gcam->debugfs.buffers[i].length = 0;
1194
1195 dentry = debugfs_create_file(entry->name, entry->mask,
1196 gcam->debugfs.root, gcam,
1197 &gb_camera_debugfs_ops);
1198 if (IS_ERR(dentry)) {
1199 gcam_err(gcam,
1200 "debugfs operation %s create failed (%ld)\n",
1201 entry->name, PTR_ERR(dentry));
1202 return PTR_ERR(dentry);
1203 }
1204 }
1205
1206 return 0;
1207}
1208
1209static void gb_camera_debugfs_cleanup(struct gb_camera *gcam)
1210{
1211 debugfs_remove_recursive(gcam->debugfs.root);
1212
1213 vfree(gcam->debugfs.buffers);
1214}
1215
1216/* -----------------------------------------------------------------------------
1217 * Init & Cleanup
1218 */
1219
1220static void gb_camera_cleanup(struct gb_camera *gcam)
1221{
1222 gb_camera_debugfs_cleanup(gcam);
1223
1224 mutex_lock(&gcam->mutex);
1225 if (gcam->data_connection) {
1226 gb_connection_disable(gcam->data_connection);
1227 gb_connection_destroy(gcam->data_connection);
1228 gcam->data_connection = NULL;
1229 }
1230
1231 if (gcam->connection) {
1232 gb_connection_disable(gcam->connection);
1233 gb_connection_destroy(gcam->connection);
1234 gcam->connection = NULL;
1235 }
1236 mutex_unlock(&gcam->mutex);
1237}
1238
1239static void gb_camera_release_module(struct kref *ref)
1240{
1241 struct gb_camera_module *cam_mod =
1242 container_of(ref, struct gb_camera_module, refcount);
1243 kfree(cam_mod->priv);
1244}
1245
1246static int gb_camera_probe(struct gb_bundle *bundle,
1247 const struct greybus_bundle_id *id)
1248{
1249 struct gb_connection *conn;
1250 struct gb_camera *gcam;
1251 u16 mgmt_cport_id = 0;
1252 u16 data_cport_id = 0;
1253 unsigned int i;
1254 int ret;
1255
1256 /*
1257 * The camera bundle must contain exactly two CPorts, one for the
1258 * camera management protocol and one for the camera data protocol.
1259 */
1260 if (bundle->num_cports != 2)
1261 return -ENODEV;
1262
1263 for (i = 0; i < bundle->num_cports; ++i) {
1264 struct greybus_descriptor_cport *desc = &bundle->cport_desc[i];
1265
1266 switch (desc->protocol_id) {
1267 case GREYBUS_PROTOCOL_CAMERA_MGMT:
1268 mgmt_cport_id = le16_to_cpu(desc->id);
1269 break;
1270 case GREYBUS_PROTOCOL_CAMERA_DATA:
1271 data_cport_id = le16_to_cpu(desc->id);
1272 break;
1273 default:
1274 return -ENODEV;
1275 }
1276 }
1277
1278 if (!mgmt_cport_id || !data_cport_id)
1279 return -ENODEV;
1280
1281 gcam = kzalloc(sizeof(*gcam), GFP_KERNEL);
1282 if (!gcam)
1283 return -ENOMEM;
1284
1285 mutex_init(&gcam->mutex);
1286
1287 gcam->bundle = bundle;
1288 gcam->state = GB_CAMERA_STATE_UNCONFIGURED;
1289 gcam->data_cport_id = data_cport_id;
1290
1291 conn = gb_connection_create(bundle, mgmt_cport_id,
1292 gb_camera_request_handler);
1293 if (IS_ERR(conn)) {
1294 ret = PTR_ERR(conn);
1295 goto error;
1296 }
1297
1298 gcam->connection = conn;
1299 gb_connection_set_data(conn, gcam);
1300
1301 ret = gb_connection_enable(conn);
1302 if (ret)
1303 goto error;
1304
1305 ret = gb_camera_debugfs_init(gcam);
1306 if (ret < 0)
1307 goto error;
1308
1309 gcam->module.priv = gcam;
1310 gcam->module.ops = &gb_cam_ops;
1311 gcam->module.interface_id = gcam->connection->intf->interface_id;
1312 gcam->module.release = gb_camera_release_module;
1313 ret = gb_camera_register(&gcam->module);
1314 if (ret < 0)
1315 goto error;
1316
1317 greybus_set_drvdata(bundle, gcam);
1318
1319 gb_pm_runtime_put_autosuspend(gcam->bundle);
1320
1321 return 0;
1322
1323error:
1324 gb_camera_cleanup(gcam);
1325 kfree(gcam);
1326 return ret;
1327}
1328
1329static void gb_camera_disconnect(struct gb_bundle *bundle)
1330{
1331 struct gb_camera *gcam = greybus_get_drvdata(bundle);
1332 int ret;
1333
1334 ret = gb_pm_runtime_get_sync(bundle);
1335 if (ret)
1336 gb_pm_runtime_get_noresume(bundle);
1337
1338 gb_camera_cleanup(gcam);
1339 gb_camera_unregister(&gcam->module);
1340}
1341
1342static const struct greybus_bundle_id gb_camera_id_table[] = {
1343 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_CAMERA) },
1344 { },
1345};
1346
1347#ifdef CONFIG_PM
1348static int gb_camera_suspend(struct device *dev)
1349{
1350 struct gb_bundle *bundle = to_gb_bundle(dev);
1351 struct gb_camera *gcam = greybus_get_drvdata(bundle);
1352
1353 if (gcam->data_connection)
1354 gb_connection_disable(gcam->data_connection);
1355
1356 gb_connection_disable(gcam->connection);
1357
1358 return 0;
1359}
1360
1361static int gb_camera_resume(struct device *dev)
1362{
1363 struct gb_bundle *bundle = to_gb_bundle(dev);
1364 struct gb_camera *gcam = greybus_get_drvdata(bundle);
1365 int ret;
1366
1367 ret = gb_connection_enable(gcam->connection);
1368 if (ret) {
1369 gcam_err(gcam, "failed to enable connection: %d\n", ret);
1370 return ret;
1371 }
1372
1373 if (gcam->data_connection) {
1374 ret = gb_connection_enable(gcam->data_connection);
1375 if (ret) {
1376 gcam_err(gcam,
1377 "failed to enable data connection: %d\n", ret);
1378 return ret;
1379 }
1380 }
1381
1382 return 0;
1383}
1384#endif
1385
1386static const struct dev_pm_ops gb_camera_pm_ops = {
1387 SET_RUNTIME_PM_OPS(gb_camera_suspend, gb_camera_resume, NULL)
1388};
1389
1390static struct greybus_driver gb_camera_driver = {
1391 .name = "camera",
1392 .probe = gb_camera_probe,
1393 .disconnect = gb_camera_disconnect,
1394 .id_table = gb_camera_id_table,
1395 .driver.pm = &gb_camera_pm_ops,
1396};
1397
1398module_greybus_driver(gb_camera_driver);
1399
1400MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/connection.c b/drivers/staging/greybus/connection.c
new file mode 100644
index 000000000000..557075147f2d
--- /dev/null
+++ b/drivers/staging/greybus/connection.c
@@ -0,0 +1,938 @@
1/*
2 * Greybus connections
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/workqueue.h>
11
12#include "greybus.h"
13#include "greybus_trace.h"
14
15
16#define GB_CONNECTION_CPORT_QUIESCE_TIMEOUT 1000
17
18
19static void gb_connection_kref_release(struct kref *kref);
20
21
22static DEFINE_SPINLOCK(gb_connections_lock);
23static DEFINE_MUTEX(gb_connection_mutex);
24
25
26/* Caller holds gb_connection_mutex. */
27static bool gb_connection_cport_in_use(struct gb_interface *intf, u16 cport_id)
28{
29 struct gb_host_device *hd = intf->hd;
30 struct gb_connection *connection;
31
32 list_for_each_entry(connection, &hd->connections, hd_links) {
33 if (connection->intf == intf &&
34 connection->intf_cport_id == cport_id)
35 return true;
36 }
37
38 return false;
39}
40
41static void gb_connection_get(struct gb_connection *connection)
42{
43 kref_get(&connection->kref);
44
45 trace_gb_connection_get(connection);
46}
47
48static void gb_connection_put(struct gb_connection *connection)
49{
50 trace_gb_connection_put(connection);
51
52 kref_put(&connection->kref, gb_connection_kref_release);
53}
54
55/*
56 * Returns a reference-counted pointer to the connection if found.
57 */
58static struct gb_connection *
59gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id)
60{
61 struct gb_connection *connection;
62 unsigned long flags;
63
64 spin_lock_irqsave(&gb_connections_lock, flags);
65 list_for_each_entry(connection, &hd->connections, hd_links)
66 if (connection->hd_cport_id == cport_id) {
67 gb_connection_get(connection);
68 goto found;
69 }
70 connection = NULL;
71found:
72 spin_unlock_irqrestore(&gb_connections_lock, flags);
73
74 return connection;
75}
76
77/*
78 * Callback from the host driver to let us know that data has been
79 * received on the bundle.
80 */
81void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
82 u8 *data, size_t length)
83{
84 struct gb_connection *connection;
85
86 trace_gb_hd_in(hd);
87
88 connection = gb_connection_hd_find(hd, cport_id);
89 if (!connection) {
90 dev_err(&hd->dev,
91 "nonexistent connection (%zu bytes dropped)\n", length);
92 return;
93 }
94 gb_connection_recv(connection, data, length);
95 gb_connection_put(connection);
96}
97EXPORT_SYMBOL_GPL(greybus_data_rcvd);
98
99static void gb_connection_kref_release(struct kref *kref)
100{
101 struct gb_connection *connection;
102
103 connection = container_of(kref, struct gb_connection, kref);
104
105 trace_gb_connection_release(connection);
106
107 kfree(connection);
108}
109
110static void gb_connection_init_name(struct gb_connection *connection)
111{
112 u16 hd_cport_id = connection->hd_cport_id;
113 u16 cport_id = 0;
114 u8 intf_id = 0;
115
116 if (connection->intf) {
117 intf_id = connection->intf->interface_id;
118 cport_id = connection->intf_cport_id;
119 }
120
121 snprintf(connection->name, sizeof(connection->name),
122 "%u/%u:%u", hd_cport_id, intf_id, cport_id);
123}
124
125/*
126 * _gb_connection_create() - create a Greybus connection
127 * @hd: host device of the connection
128 * @hd_cport_id: host-device cport id, or -1 for dynamic allocation
129 * @intf: remote interface, or NULL for static connections
130 * @bundle: remote-interface bundle (may be NULL)
131 * @cport_id: remote-interface cport id, or 0 for static connections
132 * @handler: request handler (may be NULL)
133 * @flags: connection flags
134 *
135 * Create a Greybus connection, representing the bidirectional link
136 * between a CPort on a (local) Greybus host device and a CPort on
137 * another Greybus interface.
138 *
139 * A connection also maintains the state of operations sent over the
140 * connection.
141 *
142 * Serialised against concurrent create and destroy using the
143 * gb_connection_mutex.
144 *
145 * Return: A pointer to the new connection if successful, or an ERR_PTR
146 * otherwise.
147 */
148static struct gb_connection *
149_gb_connection_create(struct gb_host_device *hd, int hd_cport_id,
150 struct gb_interface *intf,
151 struct gb_bundle *bundle, int cport_id,
152 gb_request_handler_t handler,
153 unsigned long flags)
154{
155 struct gb_connection *connection;
156 int ret;
157
158 mutex_lock(&gb_connection_mutex);
159
160 if (intf && gb_connection_cport_in_use(intf, cport_id)) {
161 dev_err(&intf->dev, "cport %u already in use\n", cport_id);
162 ret = -EBUSY;
163 goto err_unlock;
164 }
165
166 ret = gb_hd_cport_allocate(hd, hd_cport_id, flags);
167 if (ret < 0) {
168 dev_err(&hd->dev, "failed to allocate cport: %d\n", ret);
169 goto err_unlock;
170 }
171 hd_cport_id = ret;
172
173 connection = kzalloc(sizeof(*connection), GFP_KERNEL);
174 if (!connection) {
175 ret = -ENOMEM;
176 goto err_hd_cport_release;
177 }
178
179 connection->hd_cport_id = hd_cport_id;
180 connection->intf_cport_id = cport_id;
181 connection->hd = hd;
182 connection->intf = intf;
183 connection->bundle = bundle;
184 connection->handler = handler;
185 connection->flags = flags;
186 if (intf && (intf->quirks & GB_INTERFACE_QUIRK_NO_CPORT_FEATURES))
187 connection->flags |= GB_CONNECTION_FLAG_NO_FLOWCTRL;
188 connection->state = GB_CONNECTION_STATE_DISABLED;
189
190 atomic_set(&connection->op_cycle, 0);
191 mutex_init(&connection->mutex);
192 spin_lock_init(&connection->lock);
193 INIT_LIST_HEAD(&connection->operations);
194
195 connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1,
196 dev_name(&hd->dev), hd_cport_id);
197 if (!connection->wq) {
198 ret = -ENOMEM;
199 goto err_free_connection;
200 }
201
202 kref_init(&connection->kref);
203
204 gb_connection_init_name(connection);
205
206 spin_lock_irq(&gb_connections_lock);
207 list_add(&connection->hd_links, &hd->connections);
208
209 if (bundle)
210 list_add(&connection->bundle_links, &bundle->connections);
211 else
212 INIT_LIST_HEAD(&connection->bundle_links);
213
214 spin_unlock_irq(&gb_connections_lock);
215
216 mutex_unlock(&gb_connection_mutex);
217
218 trace_gb_connection_create(connection);
219
220 return connection;
221
222err_free_connection:
223 kfree(connection);
224err_hd_cport_release:
225 gb_hd_cport_release(hd, hd_cport_id);
226err_unlock:
227 mutex_unlock(&gb_connection_mutex);
228
229 return ERR_PTR(ret);
230}
231
232struct gb_connection *
233gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id,
234 gb_request_handler_t handler)
235{
236 return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler,
237 GB_CONNECTION_FLAG_HIGH_PRIO);
238}
239
240struct gb_connection *
241gb_connection_create_control(struct gb_interface *intf)
242{
243 return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL,
244 GB_CONNECTION_FLAG_CONTROL |
245 GB_CONNECTION_FLAG_HIGH_PRIO);
246}
247
248struct gb_connection *
249gb_connection_create(struct gb_bundle *bundle, u16 cport_id,
250 gb_request_handler_t handler)
251{
252 struct gb_interface *intf = bundle->intf;
253
254 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
255 handler, 0);
256}
257EXPORT_SYMBOL_GPL(gb_connection_create);
258
259struct gb_connection *
260gb_connection_create_flags(struct gb_bundle *bundle, u16 cport_id,
261 gb_request_handler_t handler,
262 unsigned long flags)
263{
264 struct gb_interface *intf = bundle->intf;
265
266 if (WARN_ON_ONCE(flags & GB_CONNECTION_FLAG_CORE_MASK))
267 flags &= ~GB_CONNECTION_FLAG_CORE_MASK;
268
269 return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id,
270 handler, flags);
271}
272EXPORT_SYMBOL_GPL(gb_connection_create_flags);
273
274struct gb_connection *
275gb_connection_create_offloaded(struct gb_bundle *bundle, u16 cport_id,
276 unsigned long flags)
277{
278 flags |= GB_CONNECTION_FLAG_OFFLOADED;
279
280 return gb_connection_create_flags(bundle, cport_id, NULL, flags);
281}
282EXPORT_SYMBOL_GPL(gb_connection_create_offloaded);
283
284static int gb_connection_hd_cport_enable(struct gb_connection *connection)
285{
286 struct gb_host_device *hd = connection->hd;
287 int ret;
288
289 if (!hd->driver->cport_enable)
290 return 0;
291
292 ret = hd->driver->cport_enable(hd, connection->hd_cport_id,
293 connection->flags);
294 if (ret) {
295 dev_err(&hd->dev, "%s: failed to enable host cport: %d\n",
296 connection->name, ret);
297 return ret;
298 }
299
300 return 0;
301}
302
303static void gb_connection_hd_cport_disable(struct gb_connection *connection)
304{
305 struct gb_host_device *hd = connection->hd;
306 int ret;
307
308 if (!hd->driver->cport_disable)
309 return;
310
311 ret = hd->driver->cport_disable(hd, connection->hd_cport_id);
312 if (ret) {
313 dev_err(&hd->dev, "%s: failed to disable host cport: %d\n",
314 connection->name, ret);
315 }
316}
317
318static int gb_connection_hd_cport_connected(struct gb_connection *connection)
319{
320 struct gb_host_device *hd = connection->hd;
321 int ret;
322
323 if (!hd->driver->cport_connected)
324 return 0;
325
326 ret = hd->driver->cport_connected(hd, connection->hd_cport_id);
327 if (ret) {
328 dev_err(&hd->dev, "%s: failed to set connected state: %d\n",
329 connection->name, ret);
330 return ret;
331 }
332
333 return 0;
334}
335
336static int gb_connection_hd_cport_flush(struct gb_connection *connection)
337{
338 struct gb_host_device *hd = connection->hd;
339 int ret;
340
341 if (!hd->driver->cport_flush)
342 return 0;
343
344 ret = hd->driver->cport_flush(hd, connection->hd_cport_id);
345 if (ret) {
346 dev_err(&hd->dev, "%s: failed to flush host cport: %d\n",
347 connection->name, ret);
348 return ret;
349 }
350
351 return 0;
352}
353
354static int gb_connection_hd_cport_quiesce(struct gb_connection *connection)
355{
356 struct gb_host_device *hd = connection->hd;
357 size_t peer_space;
358 int ret;
359
360 peer_space = sizeof(struct gb_operation_msg_hdr) +
361 sizeof(struct gb_cport_shutdown_request);
362
363 if (connection->mode_switch)
364 peer_space += sizeof(struct gb_operation_msg_hdr);
365
366 ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id,
367 peer_space,
368 GB_CONNECTION_CPORT_QUIESCE_TIMEOUT);
369 if (ret) {
370 dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n",
371 connection->name, ret);
372 return ret;
373 }
374
375 return 0;
376}
377
378static int gb_connection_hd_cport_clear(struct gb_connection *connection)
379{
380 struct gb_host_device *hd = connection->hd;
381 int ret;
382
383 ret = hd->driver->cport_clear(hd, connection->hd_cport_id);
384 if (ret) {
385 dev_err(&hd->dev, "%s: failed to clear host cport: %d\n",
386 connection->name, ret);
387 return ret;
388 }
389
390 return 0;
391}
392
393/*
394 * Request the SVC to create a connection from AP's cport to interface's
395 * cport.
396 */
397static int
398gb_connection_svc_connection_create(struct gb_connection *connection)
399{
400 struct gb_host_device *hd = connection->hd;
401 struct gb_interface *intf;
402 u8 cport_flags;
403 int ret;
404
405 if (gb_connection_is_static(connection))
406 return 0;
407
408 intf = connection->intf;
409
410 /*
411 * Enable either E2EFC or CSD, unless no flow control is requested.
412 */
413 cport_flags = GB_SVC_CPORT_FLAG_CSV_N;
414 if (gb_connection_flow_control_disabled(connection)) {
415 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N;
416 } else if (gb_connection_e2efc_enabled(connection)) {
417 cport_flags |= GB_SVC_CPORT_FLAG_CSD_N |
418 GB_SVC_CPORT_FLAG_E2EFC;
419 }
420
421 ret = gb_svc_connection_create(hd->svc,
422 hd->svc->ap_intf_id,
423 connection->hd_cport_id,
424 intf->interface_id,
425 connection->intf_cport_id,
426 cport_flags);
427 if (ret) {
428 dev_err(&connection->hd->dev,
429 "%s: failed to create svc connection: %d\n",
430 connection->name, ret);
431 return ret;
432 }
433
434 return 0;
435}
436
437static void
438gb_connection_svc_connection_destroy(struct gb_connection *connection)
439{
440 if (gb_connection_is_static(connection))
441 return;
442
443 gb_svc_connection_destroy(connection->hd->svc,
444 connection->hd->svc->ap_intf_id,
445 connection->hd_cport_id,
446 connection->intf->interface_id,
447 connection->intf_cport_id);
448}
449
450/* Inform Interface about active CPorts */
451static int gb_connection_control_connected(struct gb_connection *connection)
452{
453 struct gb_control *control;
454 u16 cport_id = connection->intf_cport_id;
455 int ret;
456
457 if (gb_connection_is_static(connection))
458 return 0;
459
460 if (gb_connection_is_control(connection))
461 return 0;
462
463 control = connection->intf->control;
464
465 ret = gb_control_connected_operation(control, cport_id);
466 if (ret) {
467 dev_err(&connection->bundle->dev,
468 "failed to connect cport: %d\n", ret);
469 return ret;
470 }
471
472 return 0;
473}
474
475static void
476gb_connection_control_disconnecting(struct gb_connection *connection)
477{
478 struct gb_control *control;
479 u16 cport_id = connection->intf_cport_id;
480 int ret;
481
482 if (gb_connection_is_static(connection))
483 return;
484
485 control = connection->intf->control;
486
487 ret = gb_control_disconnecting_operation(control, cport_id);
488 if (ret) {
489 dev_err(&connection->hd->dev,
490 "%s: failed to send disconnecting: %d\n",
491 connection->name, ret);
492 }
493}
494
495static void
496gb_connection_control_disconnected(struct gb_connection *connection)
497{
498 struct gb_control *control;
499 u16 cport_id = connection->intf_cport_id;
500 int ret;
501
502 if (gb_connection_is_static(connection))
503 return;
504
505 control = connection->intf->control;
506
507 if (gb_connection_is_control(connection)) {
508 if (connection->mode_switch) {
509 ret = gb_control_mode_switch_operation(control);
510 if (ret) {
511 /*
512 * Allow mode switch to time out waiting for
513 * mailbox event.
514 */
515 return;
516 }
517 }
518
519 return;
520 }
521
522 ret = gb_control_disconnected_operation(control, cport_id);
523 if (ret) {
524 dev_warn(&connection->bundle->dev,
525 "failed to disconnect cport: %d\n", ret);
526 }
527}
528
529static int gb_connection_shutdown_operation(struct gb_connection *connection,
530 u8 phase)
531{
532 struct gb_cport_shutdown_request *req;
533 struct gb_operation *operation;
534 int ret;
535
536 operation = gb_operation_create_core(connection,
537 GB_REQUEST_TYPE_CPORT_SHUTDOWN,
538 sizeof(*req), 0, 0,
539 GFP_KERNEL);
540 if (!operation)
541 return -ENOMEM;
542
543 req = operation->request->payload;
544 req->phase = phase;
545
546 ret = gb_operation_request_send_sync(operation);
547
548 gb_operation_put(operation);
549
550 return ret;
551}
552
553static int gb_connection_cport_shutdown(struct gb_connection *connection,
554 u8 phase)
555{
556 struct gb_host_device *hd = connection->hd;
557 const struct gb_hd_driver *drv = hd->driver;
558 int ret;
559
560 if (gb_connection_is_static(connection))
561 return 0;
562
563 if (gb_connection_is_offloaded(connection)) {
564 if (!drv->cport_shutdown)
565 return 0;
566
567 ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase,
568 GB_OPERATION_TIMEOUT_DEFAULT);
569 } else {
570 ret = gb_connection_shutdown_operation(connection, phase);
571 }
572
573 if (ret) {
574 dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n",
575 connection->name, phase, ret);
576 return ret;
577 }
578
579 return 0;
580}
581
582static int
583gb_connection_cport_shutdown_phase_1(struct gb_connection *connection)
584{
585 return gb_connection_cport_shutdown(connection, 1);
586}
587
588static int
589gb_connection_cport_shutdown_phase_2(struct gb_connection *connection)
590{
591 return gb_connection_cport_shutdown(connection, 2);
592}
593
594/*
595 * Cancel all active operations on a connection.
596 *
597 * Locking: Called with connection lock held and state set to DISABLED or
598 * DISCONNECTING.
599 */
600static void gb_connection_cancel_operations(struct gb_connection *connection,
601 int errno)
602 __must_hold(&connection->lock)
603{
604 struct gb_operation *operation;
605
606 while (!list_empty(&connection->operations)) {
607 operation = list_last_entry(&connection->operations,
608 struct gb_operation, links);
609 gb_operation_get(operation);
610 spin_unlock_irq(&connection->lock);
611
612 if (gb_operation_is_incoming(operation))
613 gb_operation_cancel_incoming(operation, errno);
614 else
615 gb_operation_cancel(operation, errno);
616
617 gb_operation_put(operation);
618
619 spin_lock_irq(&connection->lock);
620 }
621}
622
623/*
624 * Cancel all active incoming operations on a connection.
625 *
626 * Locking: Called with connection lock held and state set to ENABLED_TX.
627 */
628static void
629gb_connection_flush_incoming_operations(struct gb_connection *connection,
630 int errno)
631 __must_hold(&connection->lock)
632{
633 struct gb_operation *operation;
634 bool incoming;
635
636 while (!list_empty(&connection->operations)) {
637 incoming = false;
638 list_for_each_entry(operation, &connection->operations,
639 links) {
640 if (gb_operation_is_incoming(operation)) {
641 gb_operation_get(operation);
642 incoming = true;
643 break;
644 }
645 }
646
647 if (!incoming)
648 break;
649
650 spin_unlock_irq(&connection->lock);
651
652 /* FIXME: flush, not cancel? */
653 gb_operation_cancel_incoming(operation, errno);
654 gb_operation_put(operation);
655
656 spin_lock_irq(&connection->lock);
657 }
658}
659
660/*
661 * _gb_connection_enable() - enable a connection
662 * @connection: connection to enable
663 * @rx: whether to enable incoming requests
664 *
665 * Connection-enable helper for DISABLED->ENABLED, DISABLED->ENABLED_TX, and
666 * ENABLED_TX->ENABLED state transitions.
667 *
668 * Locking: Caller holds connection->mutex.
669 */
670static int _gb_connection_enable(struct gb_connection *connection, bool rx)
671{
672 int ret;
673
674 /* Handle ENABLED_TX -> ENABLED transitions. */
675 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX) {
676 if (!(connection->handler && rx))
677 return 0;
678
679 spin_lock_irq(&connection->lock);
680 connection->state = GB_CONNECTION_STATE_ENABLED;
681 spin_unlock_irq(&connection->lock);
682
683 return 0;
684 }
685
686 ret = gb_connection_hd_cport_enable(connection);
687 if (ret)
688 return ret;
689
690 ret = gb_connection_svc_connection_create(connection);
691 if (ret)
692 goto err_hd_cport_clear;
693
694 ret = gb_connection_hd_cport_connected(connection);
695 if (ret)
696 goto err_svc_connection_destroy;
697
698 spin_lock_irq(&connection->lock);
699 if (connection->handler && rx)
700 connection->state = GB_CONNECTION_STATE_ENABLED;
701 else
702 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
703 spin_unlock_irq(&connection->lock);
704
705 ret = gb_connection_control_connected(connection);
706 if (ret)
707 goto err_control_disconnecting;
708
709 return 0;
710
711err_control_disconnecting:
712 spin_lock_irq(&connection->lock);
713 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
714 gb_connection_cancel_operations(connection, -ESHUTDOWN);
715 spin_unlock_irq(&connection->lock);
716
717 /* Transmit queue should already be empty. */
718 gb_connection_hd_cport_flush(connection);
719
720 gb_connection_control_disconnecting(connection);
721 gb_connection_cport_shutdown_phase_1(connection);
722 gb_connection_hd_cport_quiesce(connection);
723 gb_connection_cport_shutdown_phase_2(connection);
724 gb_connection_control_disconnected(connection);
725 connection->state = GB_CONNECTION_STATE_DISABLED;
726err_svc_connection_destroy:
727 gb_connection_svc_connection_destroy(connection);
728err_hd_cport_clear:
729 gb_connection_hd_cport_clear(connection);
730
731 gb_connection_hd_cport_disable(connection);
732
733 return ret;
734}
735
736int gb_connection_enable(struct gb_connection *connection)
737{
738 int ret = 0;
739
740 mutex_lock(&connection->mutex);
741
742 if (connection->state == GB_CONNECTION_STATE_ENABLED)
743 goto out_unlock;
744
745 ret = _gb_connection_enable(connection, true);
746 if (!ret)
747 trace_gb_connection_enable(connection);
748
749out_unlock:
750 mutex_unlock(&connection->mutex);
751
752 return ret;
753}
754EXPORT_SYMBOL_GPL(gb_connection_enable);
755
756int gb_connection_enable_tx(struct gb_connection *connection)
757{
758 int ret = 0;
759
760 mutex_lock(&connection->mutex);
761
762 if (connection->state == GB_CONNECTION_STATE_ENABLED) {
763 ret = -EINVAL;
764 goto out_unlock;
765 }
766
767 if (connection->state == GB_CONNECTION_STATE_ENABLED_TX)
768 goto out_unlock;
769
770 ret = _gb_connection_enable(connection, false);
771 if (!ret)
772 trace_gb_connection_enable(connection);
773
774out_unlock:
775 mutex_unlock(&connection->mutex);
776
777 return ret;
778}
779EXPORT_SYMBOL_GPL(gb_connection_enable_tx);
780
781void gb_connection_disable_rx(struct gb_connection *connection)
782{
783 mutex_lock(&connection->mutex);
784
785 spin_lock_irq(&connection->lock);
786 if (connection->state != GB_CONNECTION_STATE_ENABLED) {
787 spin_unlock_irq(&connection->lock);
788 goto out_unlock;
789 }
790 connection->state = GB_CONNECTION_STATE_ENABLED_TX;
791 gb_connection_flush_incoming_operations(connection, -ESHUTDOWN);
792 spin_unlock_irq(&connection->lock);
793
794 trace_gb_connection_disable(connection);
795
796out_unlock:
797 mutex_unlock(&connection->mutex);
798}
799EXPORT_SYMBOL_GPL(gb_connection_disable_rx);
800
801void gb_connection_mode_switch_prepare(struct gb_connection *connection)
802{
803 connection->mode_switch = true;
804}
805
806void gb_connection_mode_switch_complete(struct gb_connection *connection)
807{
808 gb_connection_svc_connection_destroy(connection);
809 gb_connection_hd_cport_clear(connection);
810
811 gb_connection_hd_cport_disable(connection);
812
813 connection->mode_switch = false;
814}
815
816void gb_connection_disable(struct gb_connection *connection)
817{
818 mutex_lock(&connection->mutex);
819
820 if (connection->state == GB_CONNECTION_STATE_DISABLED)
821 goto out_unlock;
822
823 trace_gb_connection_disable(connection);
824
825 spin_lock_irq(&connection->lock);
826 connection->state = GB_CONNECTION_STATE_DISCONNECTING;
827 gb_connection_cancel_operations(connection, -ESHUTDOWN);
828 spin_unlock_irq(&connection->lock);
829
830 gb_connection_hd_cport_flush(connection);
831
832 gb_connection_control_disconnecting(connection);
833 gb_connection_cport_shutdown_phase_1(connection);
834 gb_connection_hd_cport_quiesce(connection);
835 gb_connection_cport_shutdown_phase_2(connection);
836 gb_connection_control_disconnected(connection);
837
838 connection->state = GB_CONNECTION_STATE_DISABLED;
839
840 /* control-connection tear down is deferred when mode switching */
841 if (!connection->mode_switch) {
842 gb_connection_svc_connection_destroy(connection);
843 gb_connection_hd_cport_clear(connection);
844
845 gb_connection_hd_cport_disable(connection);
846 }
847
848out_unlock:
849 mutex_unlock(&connection->mutex);
850}
851EXPORT_SYMBOL_GPL(gb_connection_disable);
852
853/* Disable a connection without communicating with the remote end. */
854void gb_connection_disable_forced(struct gb_connection *connection)
855{
856 mutex_lock(&connection->mutex);
857
858 if (connection->state == GB_CONNECTION_STATE_DISABLED)
859 goto out_unlock;
860
861 trace_gb_connection_disable(connection);
862
863 spin_lock_irq(&connection->lock);
864 connection->state = GB_CONNECTION_STATE_DISABLED;
865 gb_connection_cancel_operations(connection, -ESHUTDOWN);
866 spin_unlock_irq(&connection->lock);
867
868 gb_connection_hd_cport_flush(connection);
869
870 gb_connection_svc_connection_destroy(connection);
871 gb_connection_hd_cport_clear(connection);
872
873 gb_connection_hd_cport_disable(connection);
874out_unlock:
875 mutex_unlock(&connection->mutex);
876}
877EXPORT_SYMBOL_GPL(gb_connection_disable_forced);
878
879/* Caller must have disabled the connection before destroying it. */
880void gb_connection_destroy(struct gb_connection *connection)
881{
882 if (!connection)
883 return;
884
885 if (WARN_ON(connection->state != GB_CONNECTION_STATE_DISABLED))
886 gb_connection_disable(connection);
887
888 mutex_lock(&gb_connection_mutex);
889
890 spin_lock_irq(&gb_connections_lock);
891 list_del(&connection->bundle_links);
892 list_del(&connection->hd_links);
893 spin_unlock_irq(&gb_connections_lock);
894
895 destroy_workqueue(connection->wq);
896
897 gb_hd_cport_release(connection->hd, connection->hd_cport_id);
898 connection->hd_cport_id = CPORT_ID_BAD;
899
900 mutex_unlock(&gb_connection_mutex);
901
902 gb_connection_put(connection);
903}
904EXPORT_SYMBOL_GPL(gb_connection_destroy);
905
906void gb_connection_latency_tag_enable(struct gb_connection *connection)
907{
908 struct gb_host_device *hd = connection->hd;
909 int ret;
910
911 if (!hd->driver->latency_tag_enable)
912 return;
913
914 ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id);
915 if (ret) {
916 dev_err(&connection->hd->dev,
917 "%s: failed to enable latency tag: %d\n",
918 connection->name, ret);
919 }
920}
921EXPORT_SYMBOL_GPL(gb_connection_latency_tag_enable);
922
923void gb_connection_latency_tag_disable(struct gb_connection *connection)
924{
925 struct gb_host_device *hd = connection->hd;
926 int ret;
927
928 if (!hd->driver->latency_tag_disable)
929 return;
930
931 ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id);
932 if (ret) {
933 dev_err(&connection->hd->dev,
934 "%s: failed to disable latency tag: %d\n",
935 connection->name, ret);
936 }
937}
938EXPORT_SYMBOL_GPL(gb_connection_latency_tag_disable);
diff --git a/drivers/staging/greybus/connection.h b/drivers/staging/greybus/connection.h
new file mode 100644
index 000000000000..4d9f4c64176c
--- /dev/null
+++ b/drivers/staging/greybus/connection.h
@@ -0,0 +1,129 @@
1/*
2 * Greybus connections
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __CONNECTION_H
11#define __CONNECTION_H
12
13#include <linux/list.h>
14#include <linux/kfifo.h>
15
16#define GB_CONNECTION_FLAG_CSD BIT(0)
17#define GB_CONNECTION_FLAG_NO_FLOWCTRL BIT(1)
18#define GB_CONNECTION_FLAG_OFFLOADED BIT(2)
19#define GB_CONNECTION_FLAG_CDSI1 BIT(3)
20#define GB_CONNECTION_FLAG_CONTROL BIT(4)
21#define GB_CONNECTION_FLAG_HIGH_PRIO BIT(5)
22
23#define GB_CONNECTION_FLAG_CORE_MASK GB_CONNECTION_FLAG_CONTROL
24
25enum gb_connection_state {
26 GB_CONNECTION_STATE_DISABLED = 0,
27 GB_CONNECTION_STATE_ENABLED_TX = 1,
28 GB_CONNECTION_STATE_ENABLED = 2,
29 GB_CONNECTION_STATE_DISCONNECTING = 3,
30};
31
32struct gb_operation;
33
34typedef int (*gb_request_handler_t)(struct gb_operation *);
35
36struct gb_connection {
37 struct gb_host_device *hd;
38 struct gb_interface *intf;
39 struct gb_bundle *bundle;
40 struct kref kref;
41 u16 hd_cport_id;
42 u16 intf_cport_id;
43
44 struct list_head hd_links;
45 struct list_head bundle_links;
46
47 gb_request_handler_t handler;
48 unsigned long flags;
49
50 struct mutex mutex;
51 spinlock_t lock;
52 enum gb_connection_state state;
53 struct list_head operations;
54
55 char name[16];
56 struct workqueue_struct *wq;
57
58 atomic_t op_cycle;
59
60 void *private;
61
62 bool mode_switch;
63};
64
65struct gb_connection *gb_connection_create_static(struct gb_host_device *hd,
66 u16 hd_cport_id, gb_request_handler_t handler);
67struct gb_connection *gb_connection_create_control(struct gb_interface *intf);
68struct gb_connection *gb_connection_create(struct gb_bundle *bundle,
69 u16 cport_id, gb_request_handler_t handler);
70struct gb_connection *gb_connection_create_flags(struct gb_bundle *bundle,
71 u16 cport_id, gb_request_handler_t handler,
72 unsigned long flags);
73struct gb_connection *gb_connection_create_offloaded(struct gb_bundle *bundle,
74 u16 cport_id, unsigned long flags);
75void gb_connection_destroy(struct gb_connection *connection);
76
77static inline bool gb_connection_is_static(struct gb_connection *connection)
78{
79 return !connection->intf;
80}
81
82int gb_connection_enable(struct gb_connection *connection);
83int gb_connection_enable_tx(struct gb_connection *connection);
84void gb_connection_disable_rx(struct gb_connection *connection);
85void gb_connection_disable(struct gb_connection *connection);
86void gb_connection_disable_forced(struct gb_connection *connection);
87
88void gb_connection_mode_switch_prepare(struct gb_connection *connection);
89void gb_connection_mode_switch_complete(struct gb_connection *connection);
90
91void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id,
92 u8 *data, size_t length);
93
94void gb_connection_latency_tag_enable(struct gb_connection *connection);
95void gb_connection_latency_tag_disable(struct gb_connection *connection);
96
97static inline bool gb_connection_e2efc_enabled(struct gb_connection *connection)
98{
99 return !(connection->flags & GB_CONNECTION_FLAG_CSD);
100}
101
102static inline bool
103gb_connection_flow_control_disabled(struct gb_connection *connection)
104{
105 return connection->flags & GB_CONNECTION_FLAG_NO_FLOWCTRL;
106}
107
108static inline bool gb_connection_is_offloaded(struct gb_connection *connection)
109{
110 return connection->flags & GB_CONNECTION_FLAG_OFFLOADED;
111}
112
113static inline bool gb_connection_is_control(struct gb_connection *connection)
114{
115 return connection->flags & GB_CONNECTION_FLAG_CONTROL;
116}
117
118static inline void *gb_connection_get_data(struct gb_connection *connection)
119{
120 return connection->private;
121}
122
123static inline void gb_connection_set_data(struct gb_connection *connection,
124 void *data)
125{
126 connection->private = data;
127}
128
129#endif /* __CONNECTION_H */
diff --git a/drivers/staging/greybus/control.c b/drivers/staging/greybus/control.c
new file mode 100644
index 000000000000..4716190e740a
--- /dev/null
+++ b/drivers/staging/greybus/control.c
@@ -0,0 +1,635 @@
1/*
2 * Greybus CPort control protocol.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include "greybus.h"
14
15/* Highest control-protocol version supported */
16#define GB_CONTROL_VERSION_MAJOR 0
17#define GB_CONTROL_VERSION_MINOR 1
18
19
20static int gb_control_get_version(struct gb_control *control)
21{
22 struct gb_interface *intf = control->connection->intf;
23 struct gb_control_version_request request;
24 struct gb_control_version_response response;
25 int ret;
26
27 request.major = GB_CONTROL_VERSION_MAJOR;
28 request.minor = GB_CONTROL_VERSION_MINOR;
29
30 ret = gb_operation_sync(control->connection,
31 GB_CONTROL_TYPE_VERSION,
32 &request, sizeof(request), &response,
33 sizeof(response));
34 if (ret) {
35 dev_err(&intf->dev,
36 "failed to get control-protocol version: %d\n",
37 ret);
38 return ret;
39 }
40
41 if (response.major > request.major) {
42 dev_err(&intf->dev,
43 "unsupported major control-protocol version (%u > %u)\n",
44 response.major, request.major);
45 return -ENOTSUPP;
46 }
47
48 control->protocol_major = response.major;
49 control->protocol_minor = response.minor;
50
51 dev_dbg(&intf->dev, "%s - %u.%u\n", __func__, response.major,
52 response.minor);
53
54 return 0;
55}
56
57static int gb_control_get_bundle_version(struct gb_control *control,
58 struct gb_bundle *bundle)
59{
60 struct gb_interface *intf = control->connection->intf;
61 struct gb_control_bundle_version_request request;
62 struct gb_control_bundle_version_response response;
63 int ret;
64
65 request.bundle_id = bundle->id;
66
67 ret = gb_operation_sync(control->connection,
68 GB_CONTROL_TYPE_BUNDLE_VERSION,
69 &request, sizeof(request),
70 &response, sizeof(response));
71 if (ret) {
72 dev_err(&intf->dev,
73 "failed to get bundle %u class version: %d\n",
74 bundle->id, ret);
75 return ret;
76 }
77
78 bundle->class_major = response.major;
79 bundle->class_minor = response.minor;
80
81 dev_dbg(&intf->dev, "%s - %u: %u.%u\n", __func__, bundle->id,
82 response.major, response.minor);
83
84 return 0;
85}
86
87int gb_control_get_bundle_versions(struct gb_control *control)
88{
89 struct gb_interface *intf = control->connection->intf;
90 struct gb_bundle *bundle;
91 int ret;
92
93 if (!control->has_bundle_version)
94 return 0;
95
96 list_for_each_entry(bundle, &intf->bundles, links) {
97 ret = gb_control_get_bundle_version(control, bundle);
98 if (ret)
99 return ret;
100 }
101
102 return 0;
103}
104
105/* Get Manifest's size from the interface */
106int gb_control_get_manifest_size_operation(struct gb_interface *intf)
107{
108 struct gb_control_get_manifest_size_response response;
109 struct gb_connection *connection = intf->control->connection;
110 int ret;
111
112 ret = gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST_SIZE,
113 NULL, 0, &response, sizeof(response));
114 if (ret) {
115 dev_err(&connection->intf->dev,
116 "failed to get manifest size: %d\n", ret);
117 return ret;
118 }
119
120 return le16_to_cpu(response.size);
121}
122
123/* Reads Manifest from the interface */
124int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
125 size_t size)
126{
127 struct gb_connection *connection = intf->control->connection;
128
129 return gb_operation_sync(connection, GB_CONTROL_TYPE_GET_MANIFEST,
130 NULL, 0, manifest, size);
131}
132
133int gb_control_connected_operation(struct gb_control *control, u16 cport_id)
134{
135 struct gb_control_connected_request request;
136
137 request.cport_id = cpu_to_le16(cport_id);
138 return gb_operation_sync(control->connection, GB_CONTROL_TYPE_CONNECTED,
139 &request, sizeof(request), NULL, 0);
140}
141
142int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id)
143{
144 struct gb_control_disconnected_request request;
145
146 request.cport_id = cpu_to_le16(cport_id);
147 return gb_operation_sync(control->connection,
148 GB_CONTROL_TYPE_DISCONNECTED, &request,
149 sizeof(request), NULL, 0);
150}
151
152int gb_control_disconnecting_operation(struct gb_control *control,
153 u16 cport_id)
154{
155 struct gb_control_disconnecting_request *request;
156 struct gb_operation *operation;
157 int ret;
158
159 operation = gb_operation_create_core(control->connection,
160 GB_CONTROL_TYPE_DISCONNECTING,
161 sizeof(*request), 0, 0,
162 GFP_KERNEL);
163 if (!operation)
164 return -ENOMEM;
165
166 request = operation->request->payload;
167 request->cport_id = cpu_to_le16(cport_id);
168
169 ret = gb_operation_request_send_sync(operation);
170 if (ret) {
171 dev_err(&control->dev, "failed to send disconnecting: %d\n",
172 ret);
173 }
174
175 gb_operation_put(operation);
176
177 return ret;
178}
179
180int gb_control_mode_switch_operation(struct gb_control *control)
181{
182 struct gb_operation *operation;
183 int ret;
184
185 operation = gb_operation_create_core(control->connection,
186 GB_CONTROL_TYPE_MODE_SWITCH,
187 0, 0, GB_OPERATION_FLAG_UNIDIRECTIONAL,
188 GFP_KERNEL);
189 if (!operation)
190 return -ENOMEM;
191
192 ret = gb_operation_request_send_sync(operation);
193 if (ret)
194 dev_err(&control->dev, "failed to send mode switch: %d\n", ret);
195
196 gb_operation_put(operation);
197
198 return ret;
199}
200
201int gb_control_timesync_enable(struct gb_control *control, u8 count,
202 u64 frame_time, u32 strobe_delay, u32 refclk)
203{
204 struct gb_control_timesync_enable_request request;
205
206 request.count = count;
207 request.frame_time = cpu_to_le64(frame_time);
208 request.strobe_delay = cpu_to_le32(strobe_delay);
209 request.refclk = cpu_to_le32(refclk);
210 return gb_operation_sync(control->connection,
211 GB_CONTROL_TYPE_TIMESYNC_ENABLE, &request,
212 sizeof(request), NULL, 0);
213}
214
215int gb_control_timesync_disable(struct gb_control *control)
216{
217 return gb_operation_sync(control->connection,
218 GB_CONTROL_TYPE_TIMESYNC_DISABLE, NULL, 0,
219 NULL, 0);
220}
221
222int gb_control_timesync_get_last_event(struct gb_control *control,
223 u64 *frame_time)
224{
225 struct gb_control_timesync_get_last_event_response response;
226 int ret;
227
228 ret = gb_operation_sync(control->connection,
229 GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT,
230 NULL, 0, &response, sizeof(response));
231 if (!ret)
232 *frame_time = le64_to_cpu(response.frame_time);
233 return ret;
234}
235
236int gb_control_timesync_authoritative(struct gb_control *control,
237 u64 *frame_time)
238{
239 struct gb_control_timesync_authoritative_request request;
240 int i;
241
242 for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
243 request.frame_time[i] = cpu_to_le64(frame_time[i]);
244
245 return gb_operation_sync(control->connection,
246 GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE,
247 &request, sizeof(request),
248 NULL, 0);
249}
250
251static int gb_control_bundle_pm_status_map(u8 status)
252{
253 switch (status) {
254 case GB_CONTROL_BUNDLE_PM_INVAL:
255 return -EINVAL;
256 case GB_CONTROL_BUNDLE_PM_BUSY:
257 return -EBUSY;
258 case GB_CONTROL_BUNDLE_PM_NA:
259 return -ENOMSG;
260 case GB_CONTROL_BUNDLE_PM_FAIL:
261 default:
262 return -EREMOTEIO;
263 }
264}
265
266int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id)
267{
268 struct gb_control_bundle_pm_request request;
269 struct gb_control_bundle_pm_response response;
270 int ret;
271
272 request.bundle_id = bundle_id;
273 ret = gb_operation_sync(control->connection,
274 GB_CONTROL_TYPE_BUNDLE_SUSPEND, &request,
275 sizeof(request), &response, sizeof(response));
276 if (ret) {
277 dev_err(&control->dev, "failed to send bundle %u suspend: %d\n",
278 bundle_id, ret);
279 return ret;
280 }
281
282 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
283 dev_err(&control->dev, "failed to suspend bundle %u: %d\n",
284 bundle_id, response.status);
285 return gb_control_bundle_pm_status_map(response.status);
286 }
287
288 return 0;
289}
290
291int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id)
292{
293 struct gb_control_bundle_pm_request request;
294 struct gb_control_bundle_pm_response response;
295 int ret;
296
297 request.bundle_id = bundle_id;
298 ret = gb_operation_sync(control->connection,
299 GB_CONTROL_TYPE_BUNDLE_RESUME, &request,
300 sizeof(request), &response, sizeof(response));
301 if (ret) {
302 dev_err(&control->dev, "failed to send bundle %u resume: %d\n",
303 bundle_id, ret);
304 return ret;
305 }
306
307 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
308 dev_err(&control->dev, "failed to resume bundle %u: %d\n",
309 bundle_id, response.status);
310 return gb_control_bundle_pm_status_map(response.status);
311 }
312
313 return 0;
314}
315
316int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id)
317{
318 struct gb_control_bundle_pm_request request;
319 struct gb_control_bundle_pm_response response;
320 int ret;
321
322 request.bundle_id = bundle_id;
323 ret = gb_operation_sync(control->connection,
324 GB_CONTROL_TYPE_BUNDLE_DEACTIVATE, &request,
325 sizeof(request), &response, sizeof(response));
326 if (ret) {
327 dev_err(&control->dev,
328 "failed to send bundle %u deactivate: %d\n", bundle_id,
329 ret);
330 return ret;
331 }
332
333 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
334 dev_err(&control->dev, "failed to deactivate bundle %u: %d\n",
335 bundle_id, response.status);
336 return gb_control_bundle_pm_status_map(response.status);
337 }
338
339 return 0;
340}
341
342int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id)
343{
344 struct gb_control_bundle_pm_request request;
345 struct gb_control_bundle_pm_response response;
346 int ret;
347
348 if (!control->has_bundle_activate)
349 return 0;
350
351 request.bundle_id = bundle_id;
352 ret = gb_operation_sync(control->connection,
353 GB_CONTROL_TYPE_BUNDLE_ACTIVATE, &request,
354 sizeof(request), &response, sizeof(response));
355 if (ret) {
356 dev_err(&control->dev,
357 "failed to send bundle %u activate: %d\n", bundle_id,
358 ret);
359 return ret;
360 }
361
362 if (response.status != GB_CONTROL_BUNDLE_PM_OK) {
363 dev_err(&control->dev, "failed to activate bundle %u: %d\n",
364 bundle_id, response.status);
365 return gb_control_bundle_pm_status_map(response.status);
366 }
367
368 return 0;
369}
370
371static int gb_control_interface_pm_status_map(u8 status)
372{
373 switch (status) {
374 case GB_CONTROL_INTF_PM_BUSY:
375 return -EBUSY;
376 case GB_CONTROL_INTF_PM_NA:
377 return -ENOMSG;
378 default:
379 return -EREMOTEIO;
380 }
381}
382
383int gb_control_interface_suspend_prepare(struct gb_control *control)
384{
385 struct gb_control_intf_pm_response response;
386 int ret;
387
388 ret = gb_operation_sync(control->connection,
389 GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE, NULL, 0,
390 &response, sizeof(response));
391 if (ret) {
392 dev_err(&control->dev,
393 "failed to send interface suspend prepare: %d\n", ret);
394 return ret;
395 }
396
397 if (response.status != GB_CONTROL_INTF_PM_OK) {
398 dev_err(&control->dev, "interface error while preparing suspend: %d\n",
399 response.status);
400 return gb_control_interface_pm_status_map(response.status);
401 }
402
403 return 0;
404}
405
406int gb_control_interface_deactivate_prepare(struct gb_control *control)
407{
408 struct gb_control_intf_pm_response response;
409 int ret;
410
411 ret = gb_operation_sync(control->connection,
412 GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE, NULL,
413 0, &response, sizeof(response));
414 if (ret) {
415 dev_err(&control->dev, "failed to send interface deactivate prepare: %d\n",
416 ret);
417 return ret;
418 }
419
420 if (response.status != GB_CONTROL_INTF_PM_OK) {
421 dev_err(&control->dev, "interface error while preparing deactivate: %d\n",
422 response.status);
423 return gb_control_interface_pm_status_map(response.status);
424 }
425
426 return 0;
427}
428
429int gb_control_interface_hibernate_abort(struct gb_control *control)
430{
431 struct gb_control_intf_pm_response response;
432 int ret;
433
434 ret = gb_operation_sync(control->connection,
435 GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT, NULL, 0,
436 &response, sizeof(response));
437 if (ret) {
438 dev_err(&control->dev,
439 "failed to send interface aborting hibernate: %d\n",
440 ret);
441 return ret;
442 }
443
444 if (response.status != GB_CONTROL_INTF_PM_OK) {
445 dev_err(&control->dev, "interface error while aborting hibernate: %d\n",
446 response.status);
447 return gb_control_interface_pm_status_map(response.status);
448 }
449
450 return 0;
451}
452
453static ssize_t vendor_string_show(struct device *dev,
454 struct device_attribute *attr, char *buf)
455{
456 struct gb_control *control = to_gb_control(dev);
457
458 return scnprintf(buf, PAGE_SIZE, "%s\n", control->vendor_string);
459}
460static DEVICE_ATTR_RO(vendor_string);
461
462static ssize_t product_string_show(struct device *dev,
463 struct device_attribute *attr, char *buf)
464{
465 struct gb_control *control = to_gb_control(dev);
466
467 return scnprintf(buf, PAGE_SIZE, "%s\n", control->product_string);
468}
469static DEVICE_ATTR_RO(product_string);
470
471static struct attribute *control_attrs[] = {
472 &dev_attr_vendor_string.attr,
473 &dev_attr_product_string.attr,
474 NULL,
475};
476ATTRIBUTE_GROUPS(control);
477
478static void gb_control_release(struct device *dev)
479{
480 struct gb_control *control = to_gb_control(dev);
481
482 gb_connection_destroy(control->connection);
483
484 kfree(control->vendor_string);
485 kfree(control->product_string);
486
487 kfree(control);
488}
489
490struct device_type greybus_control_type = {
491 .name = "greybus_control",
492 .release = gb_control_release,
493};
494
495struct gb_control *gb_control_create(struct gb_interface *intf)
496{
497 struct gb_connection *connection;
498 struct gb_control *control;
499
500 control = kzalloc(sizeof(*control), GFP_KERNEL);
501 if (!control)
502 return ERR_PTR(-ENOMEM);
503
504 control->intf = intf;
505
506 connection = gb_connection_create_control(intf);
507 if (IS_ERR(connection)) {
508 dev_err(&intf->dev,
509 "failed to create control connection: %ld\n",
510 PTR_ERR(connection));
511 kfree(control);
512 return ERR_CAST(connection);
513 }
514
515 control->connection = connection;
516
517 control->dev.parent = &intf->dev;
518 control->dev.bus = &greybus_bus_type;
519 control->dev.type = &greybus_control_type;
520 control->dev.groups = control_groups;
521 control->dev.dma_mask = intf->dev.dma_mask;
522 device_initialize(&control->dev);
523 dev_set_name(&control->dev, "%s.ctrl", dev_name(&intf->dev));
524
525 gb_connection_set_data(control->connection, control);
526
527 return control;
528}
529
530int gb_control_enable(struct gb_control *control)
531{
532 int ret;
533
534 dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
535
536 ret = gb_connection_enable_tx(control->connection);
537 if (ret) {
538 dev_err(&control->connection->intf->dev,
539 "failed to enable control connection: %d\n",
540 ret);
541 return ret;
542 }
543
544 ret = gb_control_get_version(control);
545 if (ret)
546 goto err_disable_connection;
547
548 if (control->protocol_major > 0 || control->protocol_minor > 1)
549 control->has_bundle_version = true;
550
551 /* FIXME: use protocol version instead */
552 if (!(control->intf->quirks & GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE))
553 control->has_bundle_activate = true;
554
555 return 0;
556
557err_disable_connection:
558 gb_connection_disable(control->connection);
559
560 return ret;
561}
562
563void gb_control_disable(struct gb_control *control)
564{
565 dev_dbg(&control->connection->intf->dev, "%s\n", __func__);
566
567 if (control->intf->disconnected)
568 gb_connection_disable_forced(control->connection);
569 else
570 gb_connection_disable(control->connection);
571}
572
573int gb_control_suspend(struct gb_control *control)
574{
575 gb_connection_disable(control->connection);
576
577 return 0;
578}
579
580int gb_control_resume(struct gb_control *control)
581{
582 int ret;
583
584 ret = gb_connection_enable_tx(control->connection);
585 if (ret) {
586 dev_err(&control->connection->intf->dev,
587 "failed to enable control connection: %d\n", ret);
588 return ret;
589 }
590
591 return 0;
592}
593
594int gb_control_add(struct gb_control *control)
595{
596 int ret;
597
598 ret = device_add(&control->dev);
599 if (ret) {
600 dev_err(&control->dev,
601 "failed to register control device: %d\n",
602 ret);
603 return ret;
604 }
605
606 return 0;
607}
608
609void gb_control_del(struct gb_control *control)
610{
611 if (device_is_registered(&control->dev))
612 device_del(&control->dev);
613}
614
615struct gb_control *gb_control_get(struct gb_control *control)
616{
617 get_device(&control->dev);
618
619 return control;
620}
621
622void gb_control_put(struct gb_control *control)
623{
624 put_device(&control->dev);
625}
626
627void gb_control_mode_switch_prepare(struct gb_control *control)
628{
629 gb_connection_mode_switch_prepare(control->connection);
630}
631
632void gb_control_mode_switch_complete(struct gb_control *control)
633{
634 gb_connection_mode_switch_complete(control->connection);
635}
diff --git a/drivers/staging/greybus/control.h b/drivers/staging/greybus/control.h
new file mode 100644
index 000000000000..f9a60daf9a72
--- /dev/null
+++ b/drivers/staging/greybus/control.h
@@ -0,0 +1,65 @@
1/*
2 * Greybus CPort control protocol
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __CONTROL_H
11#define __CONTROL_H
12
13struct gb_control {
14 struct device dev;
15 struct gb_interface *intf;
16
17 struct gb_connection *connection;
18
19 u8 protocol_major;
20 u8 protocol_minor;
21
22 bool has_bundle_activate;
23 bool has_bundle_version;
24
25 char *vendor_string;
26 char *product_string;
27};
28#define to_gb_control(d) container_of(d, struct gb_control, dev)
29
30struct gb_control *gb_control_create(struct gb_interface *intf);
31int gb_control_enable(struct gb_control *control);
32void gb_control_disable(struct gb_control *control);
33int gb_control_suspend(struct gb_control *control);
34int gb_control_resume(struct gb_control *control);
35int gb_control_add(struct gb_control *control);
36void gb_control_del(struct gb_control *control);
37struct gb_control *gb_control_get(struct gb_control *control);
38void gb_control_put(struct gb_control *control);
39
40int gb_control_get_bundle_versions(struct gb_control *control);
41int gb_control_connected_operation(struct gb_control *control, u16 cport_id);
42int gb_control_disconnected_operation(struct gb_control *control, u16 cport_id);
43int gb_control_disconnecting_operation(struct gb_control *control,
44 u16 cport_id);
45int gb_control_mode_switch_operation(struct gb_control *control);
46void gb_control_mode_switch_prepare(struct gb_control *control);
47void gb_control_mode_switch_complete(struct gb_control *control);
48int gb_control_get_manifest_size_operation(struct gb_interface *intf);
49int gb_control_get_manifest_operation(struct gb_interface *intf, void *manifest,
50 size_t size);
51int gb_control_timesync_enable(struct gb_control *control, u8 count,
52 u64 frame_time, u32 strobe_delay, u32 refclk);
53int gb_control_timesync_disable(struct gb_control *control);
54int gb_control_timesync_get_last_event(struct gb_control *control,
55 u64 *frame_time);
56int gb_control_timesync_authoritative(struct gb_control *control,
57 u64 *frame_time);
58int gb_control_bundle_suspend(struct gb_control *control, u8 bundle_id);
59int gb_control_bundle_resume(struct gb_control *control, u8 bundle_id);
60int gb_control_bundle_deactivate(struct gb_control *control, u8 bundle_id);
61int gb_control_bundle_activate(struct gb_control *control, u8 bundle_id);
62int gb_control_interface_suspend_prepare(struct gb_control *control);
63int gb_control_interface_deactivate_prepare(struct gb_control *control);
64int gb_control_interface_hibernate_abort(struct gb_control *control);
65#endif /* __CONTROL_H */
diff --git a/drivers/staging/greybus/core.c b/drivers/staging/greybus/core.c
new file mode 100644
index 000000000000..1049e9c0edb0
--- /dev/null
+++ b/drivers/staging/greybus/core.c
@@ -0,0 +1,361 @@
1/*
2 * Greybus "Core"
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#define CREATE_TRACE_POINTS
13#include "greybus.h"
14#include "greybus_trace.h"
15
16#define GB_BUNDLE_AUTOSUSPEND_MS 3000
17
18/* Allow greybus to be disabled at boot if needed */
19static bool nogreybus;
20#ifdef MODULE
21module_param(nogreybus, bool, 0444);
22#else
23core_param(nogreybus, nogreybus, bool, 0444);
24#endif
25int greybus_disabled(void)
26{
27 return nogreybus;
28}
29EXPORT_SYMBOL_GPL(greybus_disabled);
30
31static bool greybus_match_one_id(struct gb_bundle *bundle,
32 const struct greybus_bundle_id *id)
33{
34 if ((id->match_flags & GREYBUS_ID_MATCH_VENDOR) &&
35 (id->vendor != bundle->intf->vendor_id))
36 return false;
37
38 if ((id->match_flags & GREYBUS_ID_MATCH_PRODUCT) &&
39 (id->product != bundle->intf->product_id))
40 return false;
41
42 if ((id->match_flags & GREYBUS_ID_MATCH_CLASS) &&
43 (id->class != bundle->class))
44 return false;
45
46 return true;
47}
48
49static const struct greybus_bundle_id *
50greybus_match_id(struct gb_bundle *bundle, const struct greybus_bundle_id *id)
51{
52 if (id == NULL)
53 return NULL;
54
55 for (; id->vendor || id->product || id->class || id->driver_info;
56 id++) {
57 if (greybus_match_one_id(bundle, id))
58 return id;
59 }
60
61 return NULL;
62}
63
64static int greybus_match_device(struct device *dev, struct device_driver *drv)
65{
66 struct greybus_driver *driver = to_greybus_driver(drv);
67 struct gb_bundle *bundle;
68 const struct greybus_bundle_id *id;
69
70 if (!is_gb_bundle(dev))
71 return 0;
72
73 bundle = to_gb_bundle(dev);
74
75 id = greybus_match_id(bundle, driver->id_table);
76 if (id)
77 return 1;
78 /* FIXME - Dynamic ids? */
79 return 0;
80}
81
82static int greybus_uevent(struct device *dev, struct kobj_uevent_env *env)
83{
84 struct gb_host_device *hd;
85 struct gb_module *module = NULL;
86 struct gb_interface *intf = NULL;
87 struct gb_control *control = NULL;
88 struct gb_bundle *bundle = NULL;
89 struct gb_svc *svc = NULL;
90
91 if (is_gb_host_device(dev)) {
92 hd = to_gb_host_device(dev);
93 } else if (is_gb_module(dev)) {
94 module = to_gb_module(dev);
95 hd = module->hd;
96 } else if (is_gb_interface(dev)) {
97 intf = to_gb_interface(dev);
98 module = intf->module;
99 hd = intf->hd;
100 } else if (is_gb_control(dev)) {
101 control = to_gb_control(dev);
102 intf = control->intf;
103 module = intf->module;
104 hd = intf->hd;
105 } else if (is_gb_bundle(dev)) {
106 bundle = to_gb_bundle(dev);
107 intf = bundle->intf;
108 module = intf->module;
109 hd = intf->hd;
110 } else if (is_gb_svc(dev)) {
111 svc = to_gb_svc(dev);
112 hd = svc->hd;
113 } else {
114 dev_WARN(dev, "uevent for unknown greybus device \"type\"!\n");
115 return -EINVAL;
116 }
117
118 if (add_uevent_var(env, "BUS=%u", hd->bus_id))
119 return -ENOMEM;
120
121 if (module) {
122 if (add_uevent_var(env, "MODULE=%u", module->module_id))
123 return -ENOMEM;
124 }
125
126 if (intf) {
127 if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
128 return -ENOMEM;
129 if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
130 intf->vendor_id, intf->product_id))
131 return -ENOMEM;
132 }
133
134 if (bundle) {
135 // FIXME
136 // add a uevent that can "load" a bundle type
137 // This is what we need to bind a driver to so use the info
138 // in gmod here as well
139
140 if (add_uevent_var(env, "BUNDLE=%u", bundle->id))
141 return -ENOMEM;
142 if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
143 return -ENOMEM;
144 }
145
146 return 0;
147}
148
149static void greybus_shutdown(struct device *dev)
150{
151 if (is_gb_host_device(dev)) {
152 struct gb_host_device *hd;
153
154 hd = to_gb_host_device(dev);
155 gb_hd_shutdown(hd);
156 }
157}
158
159struct bus_type greybus_bus_type = {
160 .name = "greybus",
161 .match = greybus_match_device,
162 .uevent = greybus_uevent,
163 .shutdown = greybus_shutdown,
164};
165
166static int greybus_probe(struct device *dev)
167{
168 struct greybus_driver *driver = to_greybus_driver(dev->driver);
169 struct gb_bundle *bundle = to_gb_bundle(dev);
170 const struct greybus_bundle_id *id;
171 int retval;
172
173 /* match id */
174 id = greybus_match_id(bundle, driver->id_table);
175 if (!id)
176 return -ENODEV;
177
178 retval = pm_runtime_get_sync(&bundle->intf->dev);
179 if (retval < 0) {
180 pm_runtime_put_noidle(&bundle->intf->dev);
181 return retval;
182 }
183
184 retval = gb_control_bundle_activate(bundle->intf->control, bundle->id);
185 if (retval) {
186 pm_runtime_put(&bundle->intf->dev);
187 return retval;
188 }
189
190 /*
191 * Unbound bundle devices are always deactivated. During probe, the
192 * Runtime PM is set to enabled and active and the usage count is
193 * incremented. If the driver supports runtime PM, it should call
194 * pm_runtime_put() in its probe routine and pm_runtime_get_sync()
195 * in remove routine.
196 */
197 pm_runtime_set_autosuspend_delay(dev, GB_BUNDLE_AUTOSUSPEND_MS);
198 pm_runtime_use_autosuspend(dev);
199 pm_runtime_get_noresume(dev);
200 pm_runtime_set_active(dev);
201 pm_runtime_enable(dev);
202
203 retval = driver->probe(bundle, id);
204 if (retval) {
205 /*
206 * Catch buggy drivers that fail to destroy their connections.
207 */
208 WARN_ON(!list_empty(&bundle->connections));
209
210 gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
211
212 pm_runtime_disable(dev);
213 pm_runtime_set_suspended(dev);
214 pm_runtime_put_noidle(dev);
215 pm_runtime_dont_use_autosuspend(dev);
216 pm_runtime_put(&bundle->intf->dev);
217
218 return retval;
219 }
220
221 gb_timesync_schedule_synchronous(bundle->intf);
222
223 pm_runtime_put(&bundle->intf->dev);
224
225 return 0;
226}
227
228static int greybus_remove(struct device *dev)
229{
230 struct greybus_driver *driver = to_greybus_driver(dev->driver);
231 struct gb_bundle *bundle = to_gb_bundle(dev);
232 struct gb_connection *connection;
233 int retval;
234
235 retval = pm_runtime_get_sync(dev);
236 if (retval < 0)
237 dev_err(dev, "failed to resume bundle: %d\n", retval);
238
239 /*
240 * Disable (non-offloaded) connections early in case the interface is
241 * already gone to avoid unceccessary operation timeouts during
242 * driver disconnect. Otherwise, only disable incoming requests.
243 */
244 list_for_each_entry(connection, &bundle->connections, bundle_links) {
245 if (gb_connection_is_offloaded(connection))
246 continue;
247
248 if (bundle->intf->disconnected)
249 gb_connection_disable_forced(connection);
250 else
251 gb_connection_disable_rx(connection);
252 }
253
254 driver->disconnect(bundle);
255
256 /* Catch buggy drivers that fail to destroy their connections. */
257 WARN_ON(!list_empty(&bundle->connections));
258
259 if (!bundle->intf->disconnected)
260 gb_control_bundle_deactivate(bundle->intf->control, bundle->id);
261
262 pm_runtime_put_noidle(dev);
263 pm_runtime_disable(dev);
264 pm_runtime_set_suspended(dev);
265 pm_runtime_dont_use_autosuspend(dev);
266 pm_runtime_put_noidle(dev);
267
268 return 0;
269}
270
271int greybus_register_driver(struct greybus_driver *driver, struct module *owner,
272 const char *mod_name)
273{
274 int retval;
275
276 if (greybus_disabled())
277 return -ENODEV;
278
279 driver->driver.bus = &greybus_bus_type;
280 driver->driver.name = driver->name;
281 driver->driver.probe = greybus_probe;
282 driver->driver.remove = greybus_remove;
283 driver->driver.owner = owner;
284 driver->driver.mod_name = mod_name;
285
286 retval = driver_register(&driver->driver);
287 if (retval)
288 return retval;
289
290 pr_info("registered new driver %s\n", driver->name);
291 return 0;
292}
293EXPORT_SYMBOL_GPL(greybus_register_driver);
294
295void greybus_deregister_driver(struct greybus_driver *driver)
296{
297 driver_unregister(&driver->driver);
298}
299EXPORT_SYMBOL_GPL(greybus_deregister_driver);
300
301static int __init gb_init(void)
302{
303 int retval;
304
305 if (greybus_disabled())
306 return -ENODEV;
307
308 BUILD_BUG_ON(CPORT_ID_MAX >= (long)CPORT_ID_BAD);
309
310 gb_debugfs_init();
311
312 retval = bus_register(&greybus_bus_type);
313 if (retval) {
314 pr_err("bus_register failed (%d)\n", retval);
315 goto error_bus;
316 }
317
318 retval = gb_hd_init();
319 if (retval) {
320 pr_err("gb_hd_init failed (%d)\n", retval);
321 goto error_hd;
322 }
323
324 retval = gb_operation_init();
325 if (retval) {
326 pr_err("gb_operation_init failed (%d)\n", retval);
327 goto error_operation;
328 }
329
330 retval = gb_timesync_init();
331 if (retval) {
332 pr_err("gb_timesync_init failed\n");
333 goto error_timesync;
334 }
335 return 0; /* Success */
336
337error_timesync:
338 gb_operation_exit();
339error_operation:
340 gb_hd_exit();
341error_hd:
342 bus_unregister(&greybus_bus_type);
343error_bus:
344 gb_debugfs_cleanup();
345
346 return retval;
347}
348module_init(gb_init);
349
350static void __exit gb_exit(void)
351{
352 gb_timesync_exit();
353 gb_operation_exit();
354 gb_hd_exit();
355 bus_unregister(&greybus_bus_type);
356 gb_debugfs_cleanup();
357 tracepoint_synchronize_unregister();
358}
359module_exit(gb_exit);
360MODULE_LICENSE("GPL v2");
361MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/staging/greybus/debugfs.c b/drivers/staging/greybus/debugfs.c
new file mode 100644
index 000000000000..a9d4d3da99a0
--- /dev/null
+++ b/drivers/staging/greybus/debugfs.c
@@ -0,0 +1,31 @@
1/*
2 * Greybus debugfs code
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/debugfs.h>
11
12#include "greybus.h"
13
14static struct dentry *gb_debug_root;
15
16void __init gb_debugfs_init(void)
17{
18 gb_debug_root = debugfs_create_dir("greybus", NULL);
19}
20
21void gb_debugfs_cleanup(void)
22{
23 debugfs_remove_recursive(gb_debug_root);
24 gb_debug_root = NULL;
25}
26
27struct dentry *gb_debugfs_get(void)
28{
29 return gb_debug_root;
30}
31EXPORT_SYMBOL_GPL(gb_debugfs_get);
diff --git a/drivers/staging/greybus/es2.c b/drivers/staging/greybus/es2.c
new file mode 100644
index 000000000000..071bb1cfd3ae
--- /dev/null
+++ b/drivers/staging/greybus/es2.c
@@ -0,0 +1,1597 @@
1/*
2 * Greybus "AP" USB driver for "ES2" controller chips
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9#include <linux/kthread.h>
10#include <linux/sizes.h>
11#include <linux/usb.h>
12#include <linux/kfifo.h>
13#include <linux/debugfs.h>
14#include <linux/list.h>
15#include <asm/unaligned.h>
16
17#include "arpc.h"
18#include "greybus.h"
19#include "greybus_trace.h"
20#include "connection.h"
21
22
23/* Default timeout for USB vendor requests. */
24#define ES2_USB_CTRL_TIMEOUT 500
25
26/* Default timeout for ARPC CPort requests */
27#define ES2_ARPC_CPORT_TIMEOUT 500
28
29/* Fixed CPort numbers */
30#define ES2_CPORT_CDSI0 16
31#define ES2_CPORT_CDSI1 17
32
33/* Memory sizes for the buffers sent to/from the ES2 controller */
34#define ES2_GBUF_MSG_SIZE_MAX 2048
35
36/* Memory sizes for the ARPC buffers */
37#define ARPC_OUT_SIZE_MAX U16_MAX
38#define ARPC_IN_SIZE_MAX 128
39
40static const struct usb_device_id id_table[] = {
41 { USB_DEVICE(0x18d1, 0x1eaf) },
42 { },
43};
44MODULE_DEVICE_TABLE(usb, id_table);
45
46#define APB1_LOG_SIZE SZ_16K
47
48/*
49 * Number of CPort IN urbs in flight at any point in time.
50 * Adjust if we are having stalls in the USB buffer due to not enough urbs in
51 * flight.
52 */
53#define NUM_CPORT_IN_URB 4
54
55/* Number of CPort OUT urbs in flight at any point in time.
56 * Adjust if we get messages saying we are out of urbs in the system log.
57 */
58#define NUM_CPORT_OUT_URB 8
59
60/*
61 * Number of ARPC in urbs in flight at any point in time.
62 */
63#define NUM_ARPC_IN_URB 2
64
65/*
66 * @endpoint: bulk in endpoint for CPort data
67 * @urb: array of urbs for the CPort in messages
68 * @buffer: array of buffers for the @cport_in_urb urbs
69 */
70struct es2_cport_in {
71 __u8 endpoint;
72 struct urb *urb[NUM_CPORT_IN_URB];
73 u8 *buffer[NUM_CPORT_IN_URB];
74};
75
76/**
77 * es2_ap_dev - ES2 USB Bridge to AP structure
78 * @usb_dev: pointer to the USB device we are.
79 * @usb_intf: pointer to the USB interface we are bound to.
80 * @hd: pointer to our gb_host_device structure
81
82 * @cport_in: endpoint, urbs and buffer for cport in messages
83 * @cport_out_endpoint: endpoint for for cport out messages
84 * @cport_out_urb: array of urbs for the CPort out messages
85 * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
86 * not.
87 * @cport_out_urb_cancelled: array of flags indicating whether the
88 * corresponding @cport_out_urb is being cancelled
89 * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
90 *
91 * @apb_log_task: task pointer for logging thread
92 * @apb_log_dentry: file system entry for the log file interface
93 * @apb_log_enable_dentry: file system entry for enabling logging
94 * @apb_log_fifo: kernel FIFO to carry logged data
95 * @arpc_urb: array of urbs for the ARPC in messages
96 * @arpc_buffer: array of buffers for the @arpc_urb urbs
97 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
98 * @arpc_id_cycle: gives an unique id to ARPC
99 * @arpc_lock: locks ARPC list
100 * @arpcs: list of in progress ARPCs
101 */
102struct es2_ap_dev {
103 struct usb_device *usb_dev;
104 struct usb_interface *usb_intf;
105 struct gb_host_device *hd;
106
107 struct es2_cport_in cport_in;
108 __u8 cport_out_endpoint;
109 struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
110 bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
111 bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
112 spinlock_t cport_out_urb_lock;
113
114 bool cdsi1_in_use;
115
116 struct task_struct *apb_log_task;
117 struct dentry *apb_log_dentry;
118 struct dentry *apb_log_enable_dentry;
119 DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
120
121 __u8 arpc_endpoint_in;
122 struct urb *arpc_urb[NUM_ARPC_IN_URB];
123 u8 *arpc_buffer[NUM_ARPC_IN_URB];
124
125 int arpc_id_cycle;
126 spinlock_t arpc_lock;
127 struct list_head arpcs;
128};
129
130/**
131 * timesync_enable_request - Enable timesync in an APBridge
132 * @count: number of TimeSync Pulses to expect
133 * @frame_time: the initial FrameTime at the first TimeSync Pulse
134 * @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
135 * @refclk: The AP mandated reference clock to run FrameTime at
136 */
137struct timesync_enable_request {
138 __u8 count;
139 __le64 frame_time;
140 __le32 strobe_delay;
141 __le32 refclk;
142} __packed;
143
144/**
145 * timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
146 * @frame_time: An array of authoritative FrameTimes provided by the SVC
147 * and relayed to the APBridge by the AP
148 */
149struct timesync_authoritative_request {
150 __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
151} __packed;
152
153struct arpc {
154 struct list_head list;
155 struct arpc_request_message *req;
156 struct arpc_response_message *resp;
157 struct completion response_received;
158 bool active;
159};
160
161static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
162{
163 return (struct es2_ap_dev *)&hd->hd_priv;
164}
165
166static void cport_out_callback(struct urb *urb);
167static void usb_log_enable(struct es2_ap_dev *es2);
168static void usb_log_disable(struct es2_ap_dev *es2);
169static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
170 size_t size, int *result, unsigned int timeout);
171
172static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
173{
174 struct usb_device *udev = es2->usb_dev;
175 u8 *data;
176 int retval;
177
178 data = kmalloc(size, GFP_KERNEL);
179 if (!data)
180 return -ENOMEM;
181 memcpy(data, req, size);
182
183 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
184 cmd,
185 USB_DIR_OUT | USB_TYPE_VENDOR |
186 USB_RECIP_INTERFACE,
187 0, 0, data, size, ES2_USB_CTRL_TIMEOUT);
188 if (retval < 0)
189 dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
190 else
191 retval = 0;
192
193 kfree(data);
194 return retval;
195}
196
197static void ap_urb_complete(struct urb *urb)
198{
199 struct usb_ctrlrequest *dr = urb->context;
200
201 kfree(dr);
202 usb_free_urb(urb);
203}
204
205static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
206{
207 struct usb_device *udev = es2->usb_dev;
208 struct urb *urb;
209 struct usb_ctrlrequest *dr;
210 u8 *buf;
211 int retval;
212
213 urb = usb_alloc_urb(0, GFP_ATOMIC);
214 if (!urb)
215 return -ENOMEM;
216
217 dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
218 if (!dr) {
219 usb_free_urb(urb);
220 return -ENOMEM;
221 }
222
223 buf = (u8 *)dr + sizeof(*dr);
224 memcpy(buf, req, size);
225
226 dr->bRequest = cmd;
227 dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
228 dr->wValue = 0;
229 dr->wIndex = 0;
230 dr->wLength = cpu_to_le16(size);
231
232 usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
233 (unsigned char *)dr, buf, size,
234 ap_urb_complete, dr);
235 retval = usb_submit_urb(urb, GFP_ATOMIC);
236 if (retval) {
237 usb_free_urb(urb);
238 kfree(dr);
239 }
240 return retval;
241}
242
243static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
244 bool async)
245{
246 struct es2_ap_dev *es2 = hd_to_es2(hd);
247
248 if (async)
249 return output_async(es2, req, size, cmd);
250
251 return output_sync(es2, req, size, cmd);
252}
253
254static int es2_cport_in_enable(struct es2_ap_dev *es2,
255 struct es2_cport_in *cport_in)
256{
257 struct urb *urb;
258 int ret;
259 int i;
260
261 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
262 urb = cport_in->urb[i];
263
264 ret = usb_submit_urb(urb, GFP_KERNEL);
265 if (ret) {
266 dev_err(&es2->usb_dev->dev,
267 "failed to submit in-urb: %d\n", ret);
268 goto err_kill_urbs;
269 }
270 }
271
272 return 0;
273
274err_kill_urbs:
275 for (--i; i >= 0; --i) {
276 urb = cport_in->urb[i];
277 usb_kill_urb(urb);
278 }
279
280 return ret;
281}
282
283static void es2_cport_in_disable(struct es2_ap_dev *es2,
284 struct es2_cport_in *cport_in)
285{
286 struct urb *urb;
287 int i;
288
289 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
290 urb = cport_in->urb[i];
291 usb_kill_urb(urb);
292 }
293}
294
295static int es2_arpc_in_enable(struct es2_ap_dev *es2)
296{
297 struct urb *urb;
298 int ret;
299 int i;
300
301 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
302 urb = es2->arpc_urb[i];
303
304 ret = usb_submit_urb(urb, GFP_KERNEL);
305 if (ret) {
306 dev_err(&es2->usb_dev->dev,
307 "failed to submit arpc in-urb: %d\n", ret);
308 goto err_kill_urbs;
309 }
310 }
311
312 return 0;
313
314err_kill_urbs:
315 for (--i; i >= 0; --i) {
316 urb = es2->arpc_urb[i];
317 usb_kill_urb(urb);
318 }
319
320 return ret;
321}
322
323static void es2_arpc_in_disable(struct es2_ap_dev *es2)
324{
325 struct urb *urb;
326 int i;
327
328 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
329 urb = es2->arpc_urb[i];
330 usb_kill_urb(urb);
331 }
332}
333
334static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
335{
336 struct urb *urb = NULL;
337 unsigned long flags;
338 int i;
339
340 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
341
342 /* Look in our pool of allocated urbs first, as that's the "fastest" */
343 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
344 if (es2->cport_out_urb_busy[i] == false &&
345 es2->cport_out_urb_cancelled[i] == false) {
346 es2->cport_out_urb_busy[i] = true;
347 urb = es2->cport_out_urb[i];
348 break;
349 }
350 }
351 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
352 if (urb)
353 return urb;
354
355 /*
356 * Crap, pool is empty, complain to the syslog and go allocate one
357 * dynamically as we have to succeed.
358 */
359 dev_dbg(&es2->usb_dev->dev,
360 "No free CPort OUT urbs, having to dynamically allocate one!\n");
361 return usb_alloc_urb(0, gfp_mask);
362}
363
364static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
365{
366 unsigned long flags;
367 int i;
368 /*
369 * See if this was an urb in our pool, if so mark it "free", otherwise
370 * we need to free it ourselves.
371 */
372 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
373 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
374 if (urb == es2->cport_out_urb[i]) {
375 es2->cport_out_urb_busy[i] = false;
376 urb = NULL;
377 break;
378 }
379 }
380 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
381
382 /* If urb is not NULL, then we need to free this urb */
383 usb_free_urb(urb);
384}
385
386/*
387 * We (ab)use the operation-message header pad bytes to transfer the
388 * cport id in order to minimise overhead.
389 */
390static void
391gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
392{
393 header->pad[0] = cport_id;
394}
395
396/* Clear the pad bytes used for the CPort id */
397static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
398{
399 header->pad[0] = 0;
400}
401
402/* Extract the CPort id packed into the header, and clear it */
403static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
404{
405 u16 cport_id = header->pad[0];
406
407 gb_message_cport_clear(header);
408
409 return cport_id;
410}
411
412/*
413 * Returns zero if the message was successfully queued, or a negative errno
414 * otherwise.
415 */
416static int message_send(struct gb_host_device *hd, u16 cport_id,
417 struct gb_message *message, gfp_t gfp_mask)
418{
419 struct es2_ap_dev *es2 = hd_to_es2(hd);
420 struct usb_device *udev = es2->usb_dev;
421 size_t buffer_size;
422 int retval;
423 struct urb *urb;
424 unsigned long flags;
425
426 /*
427 * The data actually transferred will include an indication
428 * of where the data should be sent. Do one last check of
429 * the target CPort id before filling it in.
430 */
431 if (!cport_id_valid(hd, cport_id)) {
432 dev_err(&udev->dev, "invalid cport %u\n", cport_id);
433 return -EINVAL;
434 }
435
436 /* Find a free urb */
437 urb = next_free_urb(es2, gfp_mask);
438 if (!urb)
439 return -ENOMEM;
440
441 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
442 message->hcpriv = urb;
443 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
444
445 /* Pack the cport id into the message header */
446 gb_message_cport_pack(message->header, cport_id);
447
448 buffer_size = sizeof(*message->header) + message->payload_size;
449
450 usb_fill_bulk_urb(urb, udev,
451 usb_sndbulkpipe(udev,
452 es2->cport_out_endpoint),
453 message->buffer, buffer_size,
454 cport_out_callback, message);
455 urb->transfer_flags |= URB_ZERO_PACKET;
456
457 trace_gb_message_submit(message);
458
459 retval = usb_submit_urb(urb, gfp_mask);
460 if (retval) {
461 dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
462
463 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
464 message->hcpriv = NULL;
465 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
466
467 free_urb(es2, urb);
468 gb_message_cport_clear(message->header);
469
470 return retval;
471 }
472
473 return 0;
474}
475
476/*
477 * Can not be called in atomic context.
478 */
479static void message_cancel(struct gb_message *message)
480{
481 struct gb_host_device *hd = message->operation->connection->hd;
482 struct es2_ap_dev *es2 = hd_to_es2(hd);
483 struct urb *urb;
484 int i;
485
486 might_sleep();
487
488 spin_lock_irq(&es2->cport_out_urb_lock);
489 urb = message->hcpriv;
490
491 /* Prevent dynamically allocated urb from being deallocated. */
492 usb_get_urb(urb);
493
494 /* Prevent pre-allocated urb from being reused. */
495 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
496 if (urb == es2->cport_out_urb[i]) {
497 es2->cport_out_urb_cancelled[i] = true;
498 break;
499 }
500 }
501 spin_unlock_irq(&es2->cport_out_urb_lock);
502
503 usb_kill_urb(urb);
504
505 if (i < NUM_CPORT_OUT_URB) {
506 spin_lock_irq(&es2->cport_out_urb_lock);
507 es2->cport_out_urb_cancelled[i] = false;
508 spin_unlock_irq(&es2->cport_out_urb_lock);
509 }
510
511 usb_free_urb(urb);
512}
513
514static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
515 unsigned long flags)
516{
517 struct es2_ap_dev *es2 = hd_to_es2(hd);
518 struct ida *id_map = &hd->cport_id_map;
519 int ida_start, ida_end;
520
521 switch (cport_id) {
522 case ES2_CPORT_CDSI0:
523 case ES2_CPORT_CDSI1:
524 dev_err(&hd->dev, "cport %d not available\n", cport_id);
525 return -EBUSY;
526 }
527
528 if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
529 flags & GB_CONNECTION_FLAG_CDSI1) {
530 if (es2->cdsi1_in_use) {
531 dev_err(&hd->dev, "CDSI1 already in use\n");
532 return -EBUSY;
533 }
534
535 es2->cdsi1_in_use = true;
536
537 return ES2_CPORT_CDSI1;
538 }
539
540 if (cport_id < 0) {
541 ida_start = 0;
542 ida_end = hd->num_cports;
543 } else if (cport_id < hd->num_cports) {
544 ida_start = cport_id;
545 ida_end = cport_id + 1;
546 } else {
547 dev_err(&hd->dev, "cport %d not available\n", cport_id);
548 return -EINVAL;
549 }
550
551 return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
552}
553
554static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
555{
556 struct es2_ap_dev *es2 = hd_to_es2(hd);
557
558 switch (cport_id) {
559 case ES2_CPORT_CDSI1:
560 es2->cdsi1_in_use = false;
561 return;
562 }
563
564 ida_simple_remove(&hd->cport_id_map, cport_id);
565}
566
567static int cport_enable(struct gb_host_device *hd, u16 cport_id,
568 unsigned long flags)
569{
570 struct es2_ap_dev *es2 = hd_to_es2(hd);
571 struct usb_device *udev = es2->usb_dev;
572 struct gb_apb_request_cport_flags *req;
573 u32 connection_flags;
574 int ret;
575
576 req = kzalloc(sizeof(*req), GFP_KERNEL);
577 if (!req)
578 return -ENOMEM;
579
580 connection_flags = 0;
581 if (flags & GB_CONNECTION_FLAG_CONTROL)
582 connection_flags |= GB_APB_CPORT_FLAG_CONTROL;
583 if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
584 connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;
585
586 req->flags = cpu_to_le32(connection_flags);
587
588 dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
589 cport_id, connection_flags);
590
591 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
592 GB_APB_REQUEST_CPORT_FLAGS,
593 USB_DIR_OUT | USB_TYPE_VENDOR |
594 USB_RECIP_INTERFACE, cport_id, 0,
595 req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
596 if (ret != sizeof(*req)) {
597 dev_err(&udev->dev, "failed to set cport flags for port %d\n",
598 cport_id);
599 if (ret >= 0)
600 ret = -EIO;
601
602 goto out;
603 }
604
605 ret = 0;
606out:
607 kfree(req);
608
609 return ret;
610}
611
612static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
613{
614 struct es2_ap_dev *es2 = hd_to_es2(hd);
615 struct device *dev = &es2->usb_dev->dev;
616 struct arpc_cport_connected_req req;
617 int ret;
618
619 req.cport_id = cpu_to_le16(cport_id);
620 ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
621 NULL, ES2_ARPC_CPORT_TIMEOUT);
622 if (ret) {
623 dev_err(dev, "failed to set connected state for cport %u: %d\n",
624 cport_id, ret);
625 return ret;
626 }
627
628 return 0;
629}
630
631static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
632{
633 struct es2_ap_dev *es2 = hd_to_es2(hd);
634 struct device *dev = &es2->usb_dev->dev;
635 struct arpc_cport_flush_req req;
636 int ret;
637
638 req.cport_id = cpu_to_le16(cport_id);
639 ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
640 NULL, ES2_ARPC_CPORT_TIMEOUT);
641 if (ret) {
642 dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret);
643 return ret;
644 }
645
646 return 0;
647}
648
649static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
650 u8 phase, unsigned int timeout)
651{
652 struct es2_ap_dev *es2 = hd_to_es2(hd);
653 struct device *dev = &es2->usb_dev->dev;
654 struct arpc_cport_shutdown_req req;
655 int result;
656 int ret;
657
658 if (timeout > U16_MAX)
659 return -EINVAL;
660
661 req.cport_id = cpu_to_le16(cport_id);
662 req.timeout = cpu_to_le16(timeout);
663 req.phase = phase;
664 ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
665 &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
666 if (ret) {
667 dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n",
668 cport_id, ret, result);
669 return ret;
670 }
671
672 return 0;
673}
674
675static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
676 size_t peer_space, unsigned int timeout)
677{
678 struct es2_ap_dev *es2 = hd_to_es2(hd);
679 struct device *dev = &es2->usb_dev->dev;
680 struct arpc_cport_quiesce_req req;
681 int result;
682 int ret;
683
684 if (peer_space > U16_MAX)
685 return -EINVAL;
686
687 if (timeout > U16_MAX)
688 return -EINVAL;
689
690 req.cport_id = cpu_to_le16(cport_id);
691 req.peer_space = cpu_to_le16(peer_space);
692 req.timeout = cpu_to_le16(timeout);
693 ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
694 &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
695 if (ret) {
696 dev_err(dev, "failed to quiesce cport %u: %d (%d)\n",
697 cport_id, ret, result);
698 return ret;
699 }
700
701 return 0;
702}
703
704static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
705{
706 struct es2_ap_dev *es2 = hd_to_es2(hd);
707 struct device *dev = &es2->usb_dev->dev;
708 struct arpc_cport_clear_req req;
709 int ret;
710
711 req.cport_id = cpu_to_le16(cport_id);
712 ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
713 NULL, ES2_ARPC_CPORT_TIMEOUT);
714 if (ret) {
715 dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret);
716 return ret;
717 }
718
719 return 0;
720}
721
722static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
723{
724 int retval;
725 struct es2_ap_dev *es2 = hd_to_es2(hd);
726 struct usb_device *udev = es2->usb_dev;
727
728 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
729 GB_APB_REQUEST_LATENCY_TAG_EN,
730 USB_DIR_OUT | USB_TYPE_VENDOR |
731 USB_RECIP_INTERFACE, cport_id, 0, NULL,
732 0, ES2_USB_CTRL_TIMEOUT);
733
734 if (retval < 0)
735 dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
736 cport_id);
737 return retval;
738}
739
740static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
741{
742 int retval;
743 struct es2_ap_dev *es2 = hd_to_es2(hd);
744 struct usb_device *udev = es2->usb_dev;
745
746 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
747 GB_APB_REQUEST_LATENCY_TAG_DIS,
748 USB_DIR_OUT | USB_TYPE_VENDOR |
749 USB_RECIP_INTERFACE, cport_id, 0, NULL,
750 0, ES2_USB_CTRL_TIMEOUT);
751
752 if (retval < 0)
753 dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
754 cport_id);
755 return retval;
756}
757
758static int timesync_enable(struct gb_host_device *hd, u8 count,
759 u64 frame_time, u32 strobe_delay, u32 refclk)
760{
761 int retval;
762 struct es2_ap_dev *es2 = hd_to_es2(hd);
763 struct usb_device *udev = es2->usb_dev;
764 struct gb_control_timesync_enable_request *request;
765
766 request = kzalloc(sizeof(*request), GFP_KERNEL);
767 if (!request)
768 return -ENOMEM;
769
770 request->count = count;
771 request->frame_time = cpu_to_le64(frame_time);
772 request->strobe_delay = cpu_to_le32(strobe_delay);
773 request->refclk = cpu_to_le32(refclk);
774 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
775 GB_APB_REQUEST_TIMESYNC_ENABLE,
776 USB_DIR_OUT | USB_TYPE_VENDOR |
777 USB_RECIP_INTERFACE, 0, 0, request,
778 sizeof(*request), ES2_USB_CTRL_TIMEOUT);
779 if (retval < 0)
780 dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);
781
782 kfree(request);
783 return retval;
784}
785
786static int timesync_disable(struct gb_host_device *hd)
787{
788 int retval;
789 struct es2_ap_dev *es2 = hd_to_es2(hd);
790 struct usb_device *udev = es2->usb_dev;
791
792 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
793 GB_APB_REQUEST_TIMESYNC_DISABLE,
794 USB_DIR_OUT | USB_TYPE_VENDOR |
795 USB_RECIP_INTERFACE, 0, 0, NULL,
796 0, ES2_USB_CTRL_TIMEOUT);
797 if (retval < 0)
798 dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);
799
800 return retval;
801}
802
803static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
804{
805 int retval, i;
806 struct es2_ap_dev *es2 = hd_to_es2(hd);
807 struct usb_device *udev = es2->usb_dev;
808 struct timesync_authoritative_request *request;
809
810 request = kzalloc(sizeof(*request), GFP_KERNEL);
811 if (!request)
812 return -ENOMEM;
813
814 for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
815 request->frame_time[i] = cpu_to_le64(frame_time[i]);
816
817 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
818 GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE,
819 USB_DIR_OUT | USB_TYPE_VENDOR |
820 USB_RECIP_INTERFACE, 0, 0, request,
821 sizeof(*request), ES2_USB_CTRL_TIMEOUT);
822 if (retval < 0)
823 dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);
824
825 kfree(request);
826 return retval;
827}
828
829static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
830{
831 int retval;
832 struct es2_ap_dev *es2 = hd_to_es2(hd);
833 struct usb_device *udev = es2->usb_dev;
834 __le64 *response_frame_time;
835
836 response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
837 if (!response_frame_time)
838 return -ENOMEM;
839
840 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
841 GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT,
842 USB_DIR_IN | USB_TYPE_VENDOR |
843 USB_RECIP_INTERFACE, 0, 0, response_frame_time,
844 sizeof(*response_frame_time),
845 ES2_USB_CTRL_TIMEOUT);
846
847 if (retval != sizeof(*response_frame_time)) {
848 dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
849 retval);
850
851 if (retval >= 0)
852 retval = -EIO;
853
854 goto out;
855 }
856 *frame_time = le64_to_cpu(*response_frame_time);
857 retval = 0;
858out:
859 kfree(response_frame_time);
860 return retval;
861}
862
863static struct gb_hd_driver es2_driver = {
864 .hd_priv_size = sizeof(struct es2_ap_dev),
865 .message_send = message_send,
866 .message_cancel = message_cancel,
867 .cport_allocate = es2_cport_allocate,
868 .cport_release = es2_cport_release,
869 .cport_enable = cport_enable,
870 .cport_connected = es2_cport_connected,
871 .cport_flush = es2_cport_flush,
872 .cport_shutdown = es2_cport_shutdown,
873 .cport_quiesce = es2_cport_quiesce,
874 .cport_clear = es2_cport_clear,
875 .latency_tag_enable = latency_tag_enable,
876 .latency_tag_disable = latency_tag_disable,
877 .output = output,
878 .timesync_enable = timesync_enable,
879 .timesync_disable = timesync_disable,
880 .timesync_authoritative = timesync_authoritative,
881 .timesync_get_last_event = timesync_get_last_event,
882};
883
884/* Common function to report consistent warnings based on URB status */
885static int check_urb_status(struct urb *urb)
886{
887 struct device *dev = &urb->dev->dev;
888 int status = urb->status;
889
890 switch (status) {
891 case 0:
892 return 0;
893
894 case -EOVERFLOW:
895 dev_err(dev, "%s: overflow actual length is %d\n",
896 __func__, urb->actual_length);
897 case -ECONNRESET:
898 case -ENOENT:
899 case -ESHUTDOWN:
900 case -EILSEQ:
901 case -EPROTO:
902 /* device is gone, stop sending */
903 return status;
904 }
905 dev_err(dev, "%s: unknown status %d\n", __func__, status);
906
907 return -EAGAIN;
908}
909
910static void es2_destroy(struct es2_ap_dev *es2)
911{
912 struct usb_device *udev;
913 struct urb *urb;
914 int i;
915
916 debugfs_remove(es2->apb_log_enable_dentry);
917 usb_log_disable(es2);
918
919 /* Tear down everything! */
920 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
921 urb = es2->cport_out_urb[i];
922 usb_kill_urb(urb);
923 usb_free_urb(urb);
924 es2->cport_out_urb[i] = NULL;
925 es2->cport_out_urb_busy[i] = false; /* just to be anal */
926 }
927
928 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
929 usb_free_urb(es2->arpc_urb[i]);
930 kfree(es2->arpc_buffer[i]);
931 es2->arpc_buffer[i] = NULL;
932 }
933
934 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
935 usb_free_urb(es2->cport_in.urb[i]);
936 kfree(es2->cport_in.buffer[i]);
937 es2->cport_in.buffer[i] = NULL;
938 }
939
940 /* release reserved CDSI0 and CDSI1 cports */
941 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
942 gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
943
944 udev = es2->usb_dev;
945 gb_hd_put(es2->hd);
946
947 usb_put_dev(udev);
948}
949
950static void cport_in_callback(struct urb *urb)
951{
952 struct gb_host_device *hd = urb->context;
953 struct device *dev = &urb->dev->dev;
954 struct gb_operation_msg_hdr *header;
955 int status = check_urb_status(urb);
956 int retval;
957 u16 cport_id;
958
959 if (status) {
960 if ((status == -EAGAIN) || (status == -EPROTO))
961 goto exit;
962
963 /* The urb is being unlinked */
964 if (status == -ENOENT || status == -ESHUTDOWN)
965 return;
966
967 dev_err(dev, "urb cport in error %d (dropped)\n", status);
968 return;
969 }
970
971 if (urb->actual_length < sizeof(*header)) {
972 dev_err(dev, "short message received\n");
973 goto exit;
974 }
975
976 /* Extract the CPort id, which is packed in the message header */
977 header = urb->transfer_buffer;
978 cport_id = gb_message_cport_unpack(header);
979
980 if (cport_id_valid(hd, cport_id)) {
981 greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
982 urb->actual_length);
983 } else {
984 dev_err(dev, "invalid cport id %u received\n", cport_id);
985 }
986exit:
987 /* put our urb back in the request pool */
988 retval = usb_submit_urb(urb, GFP_ATOMIC);
989 if (retval)
990 dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
991}
992
993static void cport_out_callback(struct urb *urb)
994{
995 struct gb_message *message = urb->context;
996 struct gb_host_device *hd = message->operation->connection->hd;
997 struct es2_ap_dev *es2 = hd_to_es2(hd);
998 int status = check_urb_status(urb);
999 unsigned long flags;
1000
1001 gb_message_cport_clear(message->header);
1002
1003 spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
1004 message->hcpriv = NULL;
1005 spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
1006
1007 /*
1008 * Tell the submitter that the message send (attempt) is
1009 * complete, and report the status.
1010 */
1011 greybus_message_sent(hd, message, status);
1012
1013 free_urb(es2, urb);
1014}
1015
1016static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
1017{
1018 struct arpc *rpc;
1019
1020 if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
1021 return NULL;
1022
1023 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
1024 if (!rpc)
1025 return NULL;
1026
1027 INIT_LIST_HEAD(&rpc->list);
1028 rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
1029 if (!rpc->req)
1030 goto err_free_rpc;
1031
1032 rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL);
1033 if (!rpc->resp)
1034 goto err_free_req;
1035
1036 rpc->req->type = type;
1037 rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size);
1038 memcpy(rpc->req->data, payload, size);
1039
1040 init_completion(&rpc->response_received);
1041
1042 return rpc;
1043
1044err_free_req:
1045 kfree(rpc->req);
1046err_free_rpc:
1047 kfree(rpc);
1048
1049 return NULL;
1050}
1051
1052static void arpc_free(struct arpc *rpc)
1053{
1054 kfree(rpc->req);
1055 kfree(rpc->resp);
1056 kfree(rpc);
1057}
1058
1059static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id)
1060{
1061 struct arpc *rpc;
1062
1063 list_for_each_entry(rpc, &es2->arpcs, list) {
1064 if (rpc->req->id == id)
1065 return rpc;
1066 }
1067
1068 return NULL;
1069}
1070
1071static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc)
1072{
1073 rpc->active = true;
1074 rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
1075 list_add_tail(&rpc->list, &es2->arpcs);
1076}
1077
1078static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc)
1079{
1080 if (rpc->active) {
1081 rpc->active = false;
1082 list_del(&rpc->list);
1083 }
1084}
1085
1086static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
1087{
1088 struct usb_device *udev = es2->usb_dev;
1089 int retval;
1090
1091 retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
1092 GB_APB_REQUEST_ARPC_RUN,
1093 USB_DIR_OUT | USB_TYPE_VENDOR |
1094 USB_RECIP_INTERFACE,
1095 0, 0,
1096 rpc->req, le16_to_cpu(rpc->req->size),
1097 ES2_USB_CTRL_TIMEOUT);
1098 if (retval != le16_to_cpu(rpc->req->size)) {
1099 dev_err(&udev->dev,
1100 "failed to send ARPC request %d: %d\n",
1101 rpc->req->type, retval);
1102 if (retval > 0)
1103 retval = -EIO;
1104 return retval;
1105 }
1106
1107 return 0;
1108}
1109
1110static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
1111 size_t size, int *result, unsigned int timeout)
1112{
1113 struct arpc *rpc;
1114 unsigned long flags;
1115 int retval;
1116
1117 if (result)
1118 *result = 0;
1119
1120 rpc = arpc_alloc(payload, size, type);
1121 if (!rpc)
1122 return -ENOMEM;
1123
1124 spin_lock_irqsave(&es2->arpc_lock, flags);
1125 arpc_add(es2, rpc);
1126 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1127
1128 retval = arpc_send(es2, rpc, timeout);
1129 if (retval)
1130 goto out_arpc_del;
1131
1132 retval = wait_for_completion_interruptible_timeout(
1133 &rpc->response_received,
1134 msecs_to_jiffies(timeout));
1135 if (retval <= 0) {
1136 if (!retval)
1137 retval = -ETIMEDOUT;
1138 goto out_arpc_del;
1139 }
1140
1141 if (rpc->resp->result) {
1142 retval = -EREMOTEIO;
1143 if (result)
1144 *result = rpc->resp->result;
1145 } else {
1146 retval = 0;
1147 }
1148
1149out_arpc_del:
1150 spin_lock_irqsave(&es2->arpc_lock, flags);
1151 arpc_del(es2, rpc);
1152 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1153 arpc_free(rpc);
1154
1155 if (retval < 0 && retval != -EREMOTEIO) {
1156 dev_err(&es2->usb_dev->dev,
1157 "failed to execute ARPC: %d\n", retval);
1158 }
1159
1160 return retval;
1161}
1162
1163static void arpc_in_callback(struct urb *urb)
1164{
1165 struct es2_ap_dev *es2 = urb->context;
1166 struct device *dev = &urb->dev->dev;
1167 int status = check_urb_status(urb);
1168 struct arpc *rpc;
1169 struct arpc_response_message *resp;
1170 unsigned long flags;
1171 int retval;
1172
1173 if (status) {
1174 if ((status == -EAGAIN) || (status == -EPROTO))
1175 goto exit;
1176
1177 /* The urb is being unlinked */
1178 if (status == -ENOENT || status == -ESHUTDOWN)
1179 return;
1180
1181 dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
1182 return;
1183 }
1184
1185 if (urb->actual_length < sizeof(*resp)) {
1186 dev_err(dev, "short aprc response received\n");
1187 goto exit;
1188 }
1189
1190 resp = urb->transfer_buffer;
1191 spin_lock_irqsave(&es2->arpc_lock, flags);
1192 rpc = arpc_find(es2, resp->id);
1193 if (!rpc) {
1194 dev_err(dev, "invalid arpc response id received: %u\n",
1195 le16_to_cpu(resp->id));
1196 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1197 goto exit;
1198 }
1199
1200 arpc_del(es2, rpc);
1201 memcpy(rpc->resp, resp, sizeof(*resp));
1202 complete(&rpc->response_received);
1203 spin_unlock_irqrestore(&es2->arpc_lock, flags);
1204
1205exit:
1206 /* put our urb back in the request pool */
1207 retval = usb_submit_urb(urb, GFP_ATOMIC);
1208 if (retval)
1209 dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
1210}
1211
1212#define APB1_LOG_MSG_SIZE 64
1213static void apb_log_get(struct es2_ap_dev *es2, char *buf)
1214{
1215 int retval;
1216
1217 do {
1218 retval = usb_control_msg(es2->usb_dev,
1219 usb_rcvctrlpipe(es2->usb_dev, 0),
1220 GB_APB_REQUEST_LOG,
1221 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
1222 0x00, 0x00,
1223 buf,
1224 APB1_LOG_MSG_SIZE,
1225 ES2_USB_CTRL_TIMEOUT);
1226 if (retval > 0)
1227 kfifo_in(&es2->apb_log_fifo, buf, retval);
1228 } while (retval > 0);
1229}
1230
1231static int apb_log_poll(void *data)
1232{
1233 struct es2_ap_dev *es2 = data;
1234 char *buf;
1235
1236 buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
1237 if (!buf)
1238 return -ENOMEM;
1239
1240 while (!kthread_should_stop()) {
1241 msleep(1000);
1242 apb_log_get(es2, buf);
1243 }
1244
1245 kfree(buf);
1246
1247 return 0;
1248}
1249
1250static ssize_t apb_log_read(struct file *f, char __user *buf,
1251 size_t count, loff_t *ppos)
1252{
1253 struct es2_ap_dev *es2 = f->f_inode->i_private;
1254 ssize_t ret;
1255 size_t copied;
1256 char *tmp_buf;
1257
1258 if (count > APB1_LOG_SIZE)
1259 count = APB1_LOG_SIZE;
1260
1261 tmp_buf = kmalloc(count, GFP_KERNEL);
1262 if (!tmp_buf)
1263 return -ENOMEM;
1264
1265 copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
1266 ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
1267
1268 kfree(tmp_buf);
1269
1270 return ret;
1271}
1272
1273static const struct file_operations apb_log_fops = {
1274 .read = apb_log_read,
1275};
1276
1277static void usb_log_enable(struct es2_ap_dev *es2)
1278{
1279 if (!IS_ERR_OR_NULL(es2->apb_log_task))
1280 return;
1281
1282 /* get log from APB1 */
1283 es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
1284 if (IS_ERR(es2->apb_log_task))
1285 return;
1286 /* XXX We will need to rename this per APB */
1287 es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
1288 gb_debugfs_get(), es2,
1289 &apb_log_fops);
1290}
1291
1292static void usb_log_disable(struct es2_ap_dev *es2)
1293{
1294 if (IS_ERR_OR_NULL(es2->apb_log_task))
1295 return;
1296
1297 debugfs_remove(es2->apb_log_dentry);
1298 es2->apb_log_dentry = NULL;
1299
1300 kthread_stop(es2->apb_log_task);
1301 es2->apb_log_task = NULL;
1302}
1303
1304static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
1305 size_t count, loff_t *ppos)
1306{
1307 struct es2_ap_dev *es2 = f->f_inode->i_private;
1308 int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
1309 char tmp_buf[3];
1310
1311 sprintf(tmp_buf, "%d\n", enable);
1312 return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
1313}
1314
1315static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
1316 size_t count, loff_t *ppos)
1317{
1318 int enable;
1319 ssize_t retval;
1320 struct es2_ap_dev *es2 = f->f_inode->i_private;
1321
1322 retval = kstrtoint_from_user(buf, count, 10, &enable);
1323 if (retval)
1324 return retval;
1325
1326 if (enable)
1327 usb_log_enable(es2);
1328 else
1329 usb_log_disable(es2);
1330
1331 return count;
1332}
1333
1334static const struct file_operations apb_log_enable_fops = {
1335 .read = apb_log_enable_read,
1336 .write = apb_log_enable_write,
1337};
1338
1339static int apb_get_cport_count(struct usb_device *udev)
1340{
1341 int retval;
1342 __le16 *cport_count;
1343
1344 cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
1345 if (!cport_count)
1346 return -ENOMEM;
1347
1348 retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
1349 GB_APB_REQUEST_CPORT_COUNT,
1350 USB_DIR_IN | USB_TYPE_VENDOR |
1351 USB_RECIP_INTERFACE, 0, 0, cport_count,
1352 sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT);
1353 if (retval != sizeof(*cport_count)) {
1354 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1355 retval);
1356
1357 if (retval >= 0)
1358 retval = -EIO;
1359
1360 goto out;
1361 }
1362
1363 retval = le16_to_cpu(*cport_count);
1364
1365 /* We need to fit a CPort ID in one byte of a message header */
1366 if (retval > U8_MAX) {
1367 retval = U8_MAX;
1368 dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
1369 }
1370
1371out:
1372 kfree(cport_count);
1373 return retval;
1374}
1375
1376/*
1377 * The ES2 USB Bridge device has 15 endpoints
1378 * 1 Control - usual USB stuff + AP -> APBridgeA messages
1379 * 7 Bulk IN - CPort data in
1380 * 7 Bulk OUT - CPort data out
1381 */
1382static int ap_probe(struct usb_interface *interface,
1383 const struct usb_device_id *id)
1384{
1385 struct es2_ap_dev *es2;
1386 struct gb_host_device *hd;
1387 struct usb_device *udev;
1388 struct usb_host_interface *iface_desc;
1389 struct usb_endpoint_descriptor *endpoint;
1390 __u8 ep_addr;
1391 int retval;
1392 int i;
1393 int num_cports;
1394 bool bulk_out_found = false;
1395 bool bulk_in_found = false;
1396 bool arpc_in_found = false;
1397
1398 udev = usb_get_dev(interface_to_usbdev(interface));
1399
1400 num_cports = apb_get_cport_count(udev);
1401 if (num_cports < 0) {
1402 usb_put_dev(udev);
1403 dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
1404 num_cports);
1405 return num_cports;
1406 }
1407
1408 hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
1409 num_cports);
1410 if (IS_ERR(hd)) {
1411 usb_put_dev(udev);
1412 return PTR_ERR(hd);
1413 }
1414
1415 es2 = hd_to_es2(hd);
1416 es2->hd = hd;
1417 es2->usb_intf = interface;
1418 es2->usb_dev = udev;
1419 spin_lock_init(&es2->cport_out_urb_lock);
1420 INIT_KFIFO(es2->apb_log_fifo);
1421 usb_set_intfdata(interface, es2);
1422
1423 /*
1424 * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
1425 * dynamically.
1426 */
1427 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
1428 if (retval)
1429 goto error;
1430 retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
1431 if (retval)
1432 goto error;
1433
1434 /* find all bulk endpoints */
1435 iface_desc = interface->cur_altsetting;
1436 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
1437 endpoint = &iface_desc->endpoint[i].desc;
1438 ep_addr = endpoint->bEndpointAddress;
1439
1440 if (usb_endpoint_is_bulk_in(endpoint)) {
1441 if (!bulk_in_found) {
1442 es2->cport_in.endpoint = ep_addr;
1443 bulk_in_found = true;
1444 } else if (!arpc_in_found) {
1445 es2->arpc_endpoint_in = ep_addr;
1446 arpc_in_found = true;
1447 } else {
1448 dev_warn(&udev->dev,
1449 "Unused bulk IN endpoint found: 0x%02x\n",
1450 ep_addr);
1451 }
1452 continue;
1453 }
1454 if (usb_endpoint_is_bulk_out(endpoint)) {
1455 if (!bulk_out_found) {
1456 es2->cport_out_endpoint = ep_addr;
1457 bulk_out_found = true;
1458 } else {
1459 dev_warn(&udev->dev,
1460 "Unused bulk OUT endpoint found: 0x%02x\n",
1461 ep_addr);
1462 }
1463 continue;
1464 }
1465 dev_warn(&udev->dev,
1466 "Unknown endpoint type found, address 0x%02x\n",
1467 ep_addr);
1468 }
1469 if (!bulk_in_found || !arpc_in_found || !bulk_out_found) {
1470 dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
1471 retval = -ENODEV;
1472 goto error;
1473 }
1474
1475 /* Allocate buffers for our cport in messages */
1476 for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
1477 struct urb *urb;
1478 u8 *buffer;
1479
1480 urb = usb_alloc_urb(0, GFP_KERNEL);
1481 if (!urb) {
1482 retval = -ENOMEM;
1483 goto error;
1484 }
1485 es2->cport_in.urb[i] = urb;
1486
1487 buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
1488 if (!buffer) {
1489 retval = -ENOMEM;
1490 goto error;
1491 }
1492
1493 usb_fill_bulk_urb(urb, udev,
1494 usb_rcvbulkpipe(udev, es2->cport_in.endpoint),
1495 buffer, ES2_GBUF_MSG_SIZE_MAX,
1496 cport_in_callback, hd);
1497
1498 es2->cport_in.buffer[i] = buffer;
1499 }
1500
1501 /* Allocate buffers for ARPC in messages */
1502 for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
1503 struct urb *urb;
1504 u8 *buffer;
1505
1506 urb = usb_alloc_urb(0, GFP_KERNEL);
1507 if (!urb) {
1508 retval = -ENOMEM;
1509 goto error;
1510 }
1511 es2->arpc_urb[i] = urb;
1512
1513 buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
1514 if (!buffer) {
1515 retval = -ENOMEM;
1516 goto error;
1517 }
1518
1519 usb_fill_bulk_urb(urb, udev,
1520 usb_rcvbulkpipe(udev,
1521 es2->arpc_endpoint_in),
1522 buffer, ARPC_IN_SIZE_MAX,
1523 arpc_in_callback, es2);
1524
1525 es2->arpc_buffer[i] = buffer;
1526 }
1527
1528 /* Allocate urbs for our CPort OUT messages */
1529 for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
1530 struct urb *urb;
1531
1532 urb = usb_alloc_urb(0, GFP_KERNEL);
1533 if (!urb) {
1534 retval = -ENOMEM;
1535 goto error;
1536 }
1537
1538 es2->cport_out_urb[i] = urb;
1539 es2->cport_out_urb_busy[i] = false; /* just to be anal */
1540 }
1541
1542 /* XXX We will need to rename this per APB */
1543 es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
1544 (S_IWUSR | S_IRUGO),
1545 gb_debugfs_get(), es2,
1546 &apb_log_enable_fops);
1547
1548 INIT_LIST_HEAD(&es2->arpcs);
1549 spin_lock_init(&es2->arpc_lock);
1550
1551 if (es2_arpc_in_enable(es2))
1552 goto error;
1553
1554 retval = gb_hd_add(hd);
1555 if (retval)
1556 goto err_disable_arpc_in;
1557
1558 retval = es2_cport_in_enable(es2, &es2->cport_in);
1559 if (retval)
1560 goto err_hd_del;
1561
1562 return 0;
1563
1564err_hd_del:
1565 gb_hd_del(hd);
1566err_disable_arpc_in:
1567 es2_arpc_in_disable(es2);
1568error:
1569 es2_destroy(es2);
1570
1571 return retval;
1572}
1573
1574static void ap_disconnect(struct usb_interface *interface)
1575{
1576 struct es2_ap_dev *es2 = usb_get_intfdata(interface);
1577
1578 gb_hd_del(es2->hd);
1579
1580 es2_cport_in_disable(es2, &es2->cport_in);
1581 es2_arpc_in_disable(es2);
1582
1583 es2_destroy(es2);
1584}
1585
1586static struct usb_driver es2_ap_driver = {
1587 .name = "es2_ap_driver",
1588 .probe = ap_probe,
1589 .disconnect = ap_disconnect,
1590 .id_table = id_table,
1591 .soft_unbind = 1,
1592};
1593
1594module_usb_driver(es2_ap_driver);
1595
1596MODULE_LICENSE("GPL v2");
1597MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");
diff --git a/drivers/staging/greybus/firmware.h b/drivers/staging/greybus/firmware.h
new file mode 100644
index 000000000000..f4f0db1cefe8
--- /dev/null
+++ b/drivers/staging/greybus/firmware.h
@@ -0,0 +1,42 @@
1/*
2 * Greybus Firmware Management Header
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __FIRMWARE_H
11#define __FIRMWARE_H
12
13#include "greybus.h"
14
15#define FW_NAME_PREFIX "gmp_"
16
17/*
18 * Length of the string in format: "FW_NAME_PREFIX""%08x_%08x_%08x_%08x_%s.tftf"
19 * (3 + 1 + 4 * (8 + 1) + 10 + 1 + 4 + 1)
20 */
21#define FW_NAME_SIZE 56
22
23/* Firmware Management Protocol specific functions */
24int fw_mgmt_init(void);
25void fw_mgmt_exit(void);
26struct gb_connection *to_fw_mgmt_connection(struct device *dev);
27int gb_fw_mgmt_request_handler(struct gb_operation *op);
28int gb_fw_mgmt_connection_init(struct gb_connection *connection);
29void gb_fw_mgmt_connection_exit(struct gb_connection *connection);
30
31/* Firmware Download Protocol specific functions */
32int gb_fw_download_request_handler(struct gb_operation *op);
33int gb_fw_download_connection_init(struct gb_connection *connection);
34void gb_fw_download_connection_exit(struct gb_connection *connection);
35
36/* CAP Protocol specific functions */
37int cap_init(void);
38void cap_exit(void);
39int gb_cap_connection_init(struct gb_connection *connection);
40void gb_cap_connection_exit(struct gb_connection *connection);
41
42#endif /* __FIRMWARE_H */
diff --git a/drivers/staging/greybus/fw-core.c b/drivers/staging/greybus/fw-core.c
new file mode 100644
index 000000000000..454a98957ba5
--- /dev/null
+++ b/drivers/staging/greybus/fw-core.c
@@ -0,0 +1,312 @@
1/*
2 * Greybus Firmware Core Bundle Driver.
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/firmware.h>
12#include "firmware.h"
13#include "greybus.h"
14#include "spilib.h"
15
16struct gb_fw_core {
17 struct gb_connection *download_connection;
18 struct gb_connection *mgmt_connection;
19 struct gb_connection *spi_connection;
20 struct gb_connection *cap_connection;
21};
22
23static struct spilib_ops *spilib_ops;
24
25struct gb_connection *to_fw_mgmt_connection(struct device *dev)
26{
27 struct gb_fw_core *fw_core = dev_get_drvdata(dev);
28
29 return fw_core->mgmt_connection;
30}
31
32static int gb_fw_spi_connection_init(struct gb_connection *connection)
33{
34 int ret;
35
36 if (!connection)
37 return 0;
38
39 ret = gb_connection_enable(connection);
40 if (ret)
41 return ret;
42
43 ret = gb_spilib_master_init(connection, &connection->bundle->dev,
44 spilib_ops);
45 if (ret) {
46 gb_connection_disable(connection);
47 return ret;
48 }
49
50 return 0;
51}
52
53static void gb_fw_spi_connection_exit(struct gb_connection *connection)
54{
55 if (!connection)
56 return;
57
58 gb_spilib_master_exit(connection);
59 gb_connection_disable(connection);
60}
61
62static int gb_fw_core_probe(struct gb_bundle *bundle,
63 const struct greybus_bundle_id *id)
64{
65 struct greybus_descriptor_cport *cport_desc;
66 struct gb_connection *connection;
67 struct gb_fw_core *fw_core;
68 int ret, i;
69 u16 cport_id;
70 u8 protocol_id;
71
72 fw_core = kzalloc(sizeof(*fw_core), GFP_KERNEL);
73 if (!fw_core)
74 return -ENOMEM;
75
76 /* Parse CPorts and create connections */
77 for (i = 0; i < bundle->num_cports; i++) {
78 cport_desc = &bundle->cport_desc[i];
79 cport_id = le16_to_cpu(cport_desc->id);
80 protocol_id = cport_desc->protocol_id;
81
82 switch (protocol_id) {
83 case GREYBUS_PROTOCOL_FW_MANAGEMENT:
84 /* Disallow multiple Firmware Management CPorts */
85 if (fw_core->mgmt_connection) {
86 dev_err(&bundle->dev,
87 "multiple management CPorts found\n");
88 ret = -EINVAL;
89 goto err_destroy_connections;
90 }
91
92 connection = gb_connection_create(bundle, cport_id,
93 gb_fw_mgmt_request_handler);
94 if (IS_ERR(connection)) {
95 ret = PTR_ERR(connection);
96 dev_err(&bundle->dev,
97 "failed to create management connection (%d)\n",
98 ret);
99 goto err_destroy_connections;
100 }
101
102 fw_core->mgmt_connection = connection;
103 break;
104 case GREYBUS_PROTOCOL_FW_DOWNLOAD:
105 /* Disallow multiple Firmware Download CPorts */
106 if (fw_core->download_connection) {
107 dev_err(&bundle->dev,
108 "multiple download CPorts found\n");
109 ret = -EINVAL;
110 goto err_destroy_connections;
111 }
112
113 connection = gb_connection_create(bundle, cport_id,
114 gb_fw_download_request_handler);
115 if (IS_ERR(connection)) {
116 dev_err(&bundle->dev, "failed to create download connection (%ld)\n",
117 PTR_ERR(connection));
118 } else {
119 fw_core->download_connection = connection;
120 }
121
122 break;
123 case GREYBUS_PROTOCOL_SPI:
124 /* Disallow multiple SPI CPorts */
125 if (fw_core->spi_connection) {
126 dev_err(&bundle->dev,
127 "multiple SPI CPorts found\n");
128 ret = -EINVAL;
129 goto err_destroy_connections;
130 }
131
132 connection = gb_connection_create(bundle, cport_id,
133 NULL);
134 if (IS_ERR(connection)) {
135 dev_err(&bundle->dev, "failed to create SPI connection (%ld)\n",
136 PTR_ERR(connection));
137 } else {
138 fw_core->spi_connection = connection;
139 }
140
141 break;
142 case GREYBUS_PROTOCOL_AUTHENTICATION:
143 /* Disallow multiple CAP CPorts */
144 if (fw_core->cap_connection) {
145 dev_err(&bundle->dev, "multiple Authentication CPorts found\n");
146 ret = -EINVAL;
147 goto err_destroy_connections;
148 }
149
150 connection = gb_connection_create(bundle, cport_id,
151 NULL);
152 if (IS_ERR(connection)) {
153 dev_err(&bundle->dev, "failed to create Authentication connection (%ld)\n",
154 PTR_ERR(connection));
155 } else {
156 fw_core->cap_connection = connection;
157 }
158
159 break;
160 default:
161 dev_err(&bundle->dev, "invalid protocol id (0x%02x)\n",
162 protocol_id);
163 ret = -EINVAL;
164 goto err_destroy_connections;
165 }
166 }
167
168 /* Firmware Management connection is mandatory */
169 if (!fw_core->mgmt_connection) {
170 dev_err(&bundle->dev, "missing management connection\n");
171 ret = -ENODEV;
172 goto err_destroy_connections;
173 }
174
175 ret = gb_fw_download_connection_init(fw_core->download_connection);
176 if (ret) {
177 /* We may still be able to work with the Interface */
178 dev_err(&bundle->dev, "failed to initialize firmware download connection, disable it (%d)\n",
179 ret);
180 gb_connection_destroy(fw_core->download_connection);
181 fw_core->download_connection = NULL;
182 }
183
184 ret = gb_fw_spi_connection_init(fw_core->spi_connection);
185 if (ret) {
186 /* We may still be able to work with the Interface */
187 dev_err(&bundle->dev, "failed to initialize SPI connection, disable it (%d)\n",
188 ret);
189 gb_connection_destroy(fw_core->spi_connection);
190 fw_core->spi_connection = NULL;
191 }
192
193 ret = gb_cap_connection_init(fw_core->cap_connection);
194 if (ret) {
195 /* We may still be able to work with the Interface */
196 dev_err(&bundle->dev, "failed to initialize CAP connection, disable it (%d)\n",
197 ret);
198 gb_connection_destroy(fw_core->cap_connection);
199 fw_core->cap_connection = NULL;
200 }
201
202 ret = gb_fw_mgmt_connection_init(fw_core->mgmt_connection);
203 if (ret) {
204 /* We may still be able to work with the Interface */
205 dev_err(&bundle->dev, "failed to initialize firmware management connection, disable it (%d)\n",
206 ret);
207 goto err_exit_connections;
208 }
209
210 greybus_set_drvdata(bundle, fw_core);
211
212 /* FIXME: Remove this after S2 Loader gets runtime PM support */
213 if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM))
214 gb_pm_runtime_put_autosuspend(bundle);
215
216 return 0;
217
218err_exit_connections:
219 gb_cap_connection_exit(fw_core->cap_connection);
220 gb_fw_spi_connection_exit(fw_core->spi_connection);
221 gb_fw_download_connection_exit(fw_core->download_connection);
222err_destroy_connections:
223 gb_connection_destroy(fw_core->mgmt_connection);
224 gb_connection_destroy(fw_core->cap_connection);
225 gb_connection_destroy(fw_core->spi_connection);
226 gb_connection_destroy(fw_core->download_connection);
227 kfree(fw_core);
228
229 return ret;
230}
231
232static void gb_fw_core_disconnect(struct gb_bundle *bundle)
233{
234 struct gb_fw_core *fw_core = greybus_get_drvdata(bundle);
235 int ret;
236
237 /* FIXME: Remove this after S2 Loader gets runtime PM support */
238 if (!(bundle->intf->quirks & GB_INTERFACE_QUIRK_NO_PM)) {
239 ret = gb_pm_runtime_get_sync(bundle);
240 if (ret)
241 gb_pm_runtime_get_noresume(bundle);
242 }
243
244 gb_fw_mgmt_connection_exit(fw_core->mgmt_connection);
245 gb_cap_connection_exit(fw_core->cap_connection);
246 gb_fw_spi_connection_exit(fw_core->spi_connection);
247 gb_fw_download_connection_exit(fw_core->download_connection);
248
249 gb_connection_destroy(fw_core->mgmt_connection);
250 gb_connection_destroy(fw_core->cap_connection);
251 gb_connection_destroy(fw_core->spi_connection);
252 gb_connection_destroy(fw_core->download_connection);
253
254 kfree(fw_core);
255}
256
257static const struct greybus_bundle_id gb_fw_core_id_table[] = {
258 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_FW_MANAGEMENT) },
259 { }
260};
261
262static struct greybus_driver gb_fw_core_driver = {
263 .name = "gb-firmware",
264 .probe = gb_fw_core_probe,
265 .disconnect = gb_fw_core_disconnect,
266 .id_table = gb_fw_core_id_table,
267};
268
269static int fw_core_init(void)
270{
271 int ret;
272
273 ret = fw_mgmt_init();
274 if (ret) {
275 pr_err("Failed to initialize fw-mgmt core (%d)\n", ret);
276 return ret;
277 }
278
279 ret = cap_init();
280 if (ret) {
281 pr_err("Failed to initialize component authentication core (%d)\n",
282 ret);
283 goto fw_mgmt_exit;
284 }
285
286 ret = greybus_register(&gb_fw_core_driver);
287 if (ret)
288 goto cap_exit;
289
290 return 0;
291
292cap_exit:
293 cap_exit();
294fw_mgmt_exit:
295 fw_mgmt_exit();
296
297 return ret;
298}
299module_init(fw_core_init);
300
301static void __exit fw_core_exit(void)
302{
303 greybus_deregister(&gb_fw_core_driver);
304 cap_exit();
305 fw_mgmt_exit();
306}
307module_exit(fw_core_exit);
308
309MODULE_ALIAS("greybus:firmware");
310MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
311MODULE_DESCRIPTION("Greybus Firmware Bundle Driver");
312MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
new file mode 100644
index 000000000000..2d7246887547
--- /dev/null
+++ b/drivers/staging/greybus/fw-download.c
@@ -0,0 +1,465 @@
1/*
2 * Greybus Firmware Download Protocol Driver.
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/firmware.h>
11#include <linux/jiffies.h>
12#include <linux/mutex.h>
13#include <linux/workqueue.h>
14#include "firmware.h"
15#include "greybus.h"
16
17/* Estimated minimum buffer size, actual size can be smaller than this */
18#define MIN_FETCH_SIZE 512
19/* Timeout, in jiffies, within which fetch or release firmware must be called */
20#define NEXT_REQ_TIMEOUT_J msecs_to_jiffies(1000)
21
22struct fw_request {
23 u8 firmware_id;
24 bool disabled;
25 bool timedout;
26 char name[FW_NAME_SIZE];
27 const struct firmware *fw;
28 struct list_head node;
29
30 struct delayed_work dwork;
31 /* Timeout, in jiffies, within which the firmware shall download */
32 unsigned long release_timeout_j;
33 struct kref kref;
34 struct fw_download *fw_download;
35};
36
37struct fw_download {
38 struct device *parent;
39 struct gb_connection *connection;
40 struct list_head fw_requests;
41 struct ida id_map;
42 struct mutex mutex;
43};
44
45static void fw_req_release(struct kref *kref)
46{
47 struct fw_request *fw_req = container_of(kref, struct fw_request, kref);
48
49 dev_dbg(fw_req->fw_download->parent, "firmware %s released\n",
50 fw_req->name);
51
52 release_firmware(fw_req->fw);
53
54 /*
55 * The request timed out and the module may send a fetch-fw or
56 * release-fw request later. Lets block the id we allocated for this
57 * request, so that the AP doesn't refer to a later fw-request (with
58 * same firmware_id) for the old timedout fw-request.
59 *
60 * NOTE:
61 *
62 * This also means that after 255 timeouts we will fail to service new
63 * firmware downloads. But what else can we do in that case anyway? Lets
64 * just hope that it never happens.
65 */
66 if (!fw_req->timedout)
67 ida_simple_remove(&fw_req->fw_download->id_map,
68 fw_req->firmware_id);
69
70 kfree(fw_req);
71}
72
73/*
74 * Incoming requests are serialized for a connection, and the only race possible
75 * is between the timeout handler freeing this and an incoming request.
76 *
77 * The operations on the fw-request list are protected by the mutex and
78 * get_fw_req() increments the reference count before returning a fw_req pointer
79 * to the users.
80 *
81 * free_firmware() also takes the mutex while removing an entry from the list,
82 * it guarantees that every user of fw_req has taken a kref-reference by now and
83 * we wouldn't have any new users.
84 *
85 * Once the last user drops the reference, the fw_req structure is freed.
86 */
87static void put_fw_req(struct fw_request *fw_req)
88{
89 kref_put(&fw_req->kref, fw_req_release);
90}
91
92/* Caller must call put_fw_req() after using struct fw_request */
93static struct fw_request *get_fw_req(struct fw_download *fw_download,
94 u8 firmware_id)
95{
96 struct fw_request *fw_req;
97
98 mutex_lock(&fw_download->mutex);
99
100 list_for_each_entry(fw_req, &fw_download->fw_requests, node) {
101 if (fw_req->firmware_id == firmware_id) {
102 kref_get(&fw_req->kref);
103 goto unlock;
104 }
105 }
106
107 fw_req = NULL;
108
109unlock:
110 mutex_unlock(&fw_download->mutex);
111
112 return fw_req;
113}
114
115static void free_firmware(struct fw_download *fw_download,
116 struct fw_request *fw_req)
117{
118 /* Already disabled from timeout handlers */
119 if (fw_req->disabled)
120 return;
121
122 mutex_lock(&fw_download->mutex);
123 list_del(&fw_req->node);
124 mutex_unlock(&fw_download->mutex);
125
126 fw_req->disabled = true;
127 put_fw_req(fw_req);
128}
129
130static void fw_request_timedout(struct work_struct *work)
131{
132 struct delayed_work *dwork = to_delayed_work(work);
133 struct fw_request *fw_req = container_of(dwork, struct fw_request, dwork);
134 struct fw_download *fw_download = fw_req->fw_download;
135
136 dev_err(fw_download->parent,
137 "Timed out waiting for fetch / release firmware requests: %u\n",
138 fw_req->firmware_id);
139
140 fw_req->timedout = true;
141 free_firmware(fw_download, fw_req);
142}
143
144static int exceeds_release_timeout(struct fw_request *fw_req)
145{
146 struct fw_download *fw_download = fw_req->fw_download;
147
148 if (time_before(jiffies, fw_req->release_timeout_j))
149 return 0;
150
151 dev_err(fw_download->parent,
152 "Firmware download didn't finish in time, abort: %d\n",
153 fw_req->firmware_id);
154
155 fw_req->timedout = true;
156 free_firmware(fw_download, fw_req);
157
158 return -ETIMEDOUT;
159}
160
161/* This returns path of the firmware blob on the disk */
162static struct fw_request *find_firmware(struct fw_download *fw_download,
163 const char *tag)
164{
165 struct gb_interface *intf = fw_download->connection->bundle->intf;
166 struct fw_request *fw_req;
167 int ret, req_count;
168
169 fw_req = kzalloc(sizeof(*fw_req), GFP_KERNEL);
170 if (!fw_req)
171 return ERR_PTR(-ENOMEM);
172
173 /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
174 ret = ida_simple_get(&fw_download->id_map, 1, 256, GFP_KERNEL);
175 if (ret < 0) {
176 dev_err(fw_download->parent,
177 "failed to allocate firmware id (%d)\n", ret);
178 goto err_free_req;
179 }
180 fw_req->firmware_id = ret;
181
182 snprintf(fw_req->name, sizeof(fw_req->name),
183 FW_NAME_PREFIX "%08x_%08x_%08x_%08x_%s.tftf",
184 intf->ddbl1_manufacturer_id, intf->ddbl1_product_id,
185 intf->vendor_id, intf->product_id, tag);
186
187 dev_info(fw_download->parent, "Requested firmware package '%s'\n",
188 fw_req->name);
189
190 ret = request_firmware(&fw_req->fw, fw_req->name, fw_download->parent);
191 if (ret) {
192 dev_err(fw_download->parent,
193 "firmware request failed for %s (%d)\n", fw_req->name,
194 ret);
195 goto err_free_id;
196 }
197
198 fw_req->fw_download = fw_download;
199 kref_init(&fw_req->kref);
200
201 mutex_lock(&fw_download->mutex);
202 list_add(&fw_req->node, &fw_download->fw_requests);
203 mutex_unlock(&fw_download->mutex);
204
205 /* Timeout, in jiffies, within which firmware should get loaded */
206 req_count = DIV_ROUND_UP(fw_req->fw->size, MIN_FETCH_SIZE);
207 fw_req->release_timeout_j = jiffies + req_count * NEXT_REQ_TIMEOUT_J;
208
209 INIT_DELAYED_WORK(&fw_req->dwork, fw_request_timedout);
210 schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
211
212 return fw_req;
213
214err_free_id:
215 ida_simple_remove(&fw_download->id_map, fw_req->firmware_id);
216err_free_req:
217 kfree(fw_req);
218
219 return ERR_PTR(ret);
220}
221
222static int fw_download_find_firmware(struct gb_operation *op)
223{
224 struct gb_connection *connection = op->connection;
225 struct fw_download *fw_download = gb_connection_get_data(connection);
226 struct gb_fw_download_find_firmware_request *request;
227 struct gb_fw_download_find_firmware_response *response;
228 struct fw_request *fw_req;
229 const char *tag;
230
231 if (op->request->payload_size != sizeof(*request)) {
232 dev_err(fw_download->parent,
233 "illegal size of find firmware request (%zu != %zu)\n",
234 op->request->payload_size, sizeof(*request));
235 return -EINVAL;
236 }
237
238 request = op->request->payload;
239 tag = (const char *)request->firmware_tag;
240
241 /* firmware_tag must be null-terminated */
242 if (strnlen(tag, GB_FIRMWARE_TAG_MAX_SIZE) == GB_FIRMWARE_TAG_MAX_SIZE) {
243 dev_err(fw_download->parent,
244 "firmware-tag is not null-terminated\n");
245 return -EINVAL;
246 }
247
248 fw_req = find_firmware(fw_download, tag);
249 if (IS_ERR(fw_req))
250 return PTR_ERR(fw_req);
251
252 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL)) {
253 dev_err(fw_download->parent, "error allocating response\n");
254 free_firmware(fw_download, fw_req);
255 return -ENOMEM;
256 }
257
258 response = op->response->payload;
259 response->firmware_id = fw_req->firmware_id;
260 response->size = cpu_to_le32(fw_req->fw->size);
261
262 dev_dbg(fw_download->parent,
263 "firmware size is %zu bytes\n", fw_req->fw->size);
264
265 return 0;
266}
267
268static int fw_download_fetch_firmware(struct gb_operation *op)
269{
270 struct gb_connection *connection = op->connection;
271 struct fw_download *fw_download = gb_connection_get_data(connection);
272 struct gb_fw_download_fetch_firmware_request *request;
273 struct gb_fw_download_fetch_firmware_response *response;
274 struct fw_request *fw_req;
275 const struct firmware *fw;
276 unsigned int offset, size;
277 u8 firmware_id;
278 int ret = 0;
279
280 if (op->request->payload_size != sizeof(*request)) {
281 dev_err(fw_download->parent,
282 "Illegal size of fetch firmware request (%zu %zu)\n",
283 op->request->payload_size, sizeof(*request));
284 return -EINVAL;
285 }
286
287 request = op->request->payload;
288 offset = le32_to_cpu(request->offset);
289 size = le32_to_cpu(request->size);
290 firmware_id = request->firmware_id;
291
292 fw_req = get_fw_req(fw_download, firmware_id);
293 if (!fw_req) {
294 dev_err(fw_download->parent,
295 "firmware not available for id: %02u\n", firmware_id);
296 return -EINVAL;
297 }
298
299 /* Make sure work handler isn't running in parallel */
300 cancel_delayed_work_sync(&fw_req->dwork);
301
302 /* We timed-out before reaching here ? */
303 if (fw_req->disabled) {
304 ret = -ETIMEDOUT;
305 goto put_fw;
306 }
307
308 /*
309 * Firmware download must finish within a limited time interval. If it
310 * doesn't, then we might have a buggy Module on the other side. Abort
311 * download.
312 */
313 ret = exceeds_release_timeout(fw_req);
314 if (ret)
315 goto put_fw;
316
317 fw = fw_req->fw;
318
319 if (offset >= fw->size || size > fw->size - offset) {
320 dev_err(fw_download->parent,
321 "bad fetch firmware request (offs = %u, size = %u)\n",
322 offset, size);
323 ret = -EINVAL;
324 goto put_fw;
325 }
326
327 if (!gb_operation_response_alloc(op, sizeof(*response) + size,
328 GFP_KERNEL)) {
329 dev_err(fw_download->parent,
330 "error allocating fetch firmware response\n");
331 ret = -ENOMEM;
332 goto put_fw;
333 }
334
335 response = op->response->payload;
336 memcpy(response->data, fw->data + offset, size);
337
338 dev_dbg(fw_download->parent,
339 "responding with firmware (offs = %u, size = %u)\n", offset,
340 size);
341
342 /* Refresh timeout */
343 schedule_delayed_work(&fw_req->dwork, NEXT_REQ_TIMEOUT_J);
344
345put_fw:
346 put_fw_req(fw_req);
347
348 return ret;
349}
350
351static int fw_download_release_firmware(struct gb_operation *op)
352{
353 struct gb_connection *connection = op->connection;
354 struct fw_download *fw_download = gb_connection_get_data(connection);
355 struct gb_fw_download_release_firmware_request *request;
356 struct fw_request *fw_req;
357 u8 firmware_id;
358
359 if (op->request->payload_size != sizeof(*request)) {
360 dev_err(fw_download->parent,
361 "Illegal size of release firmware request (%zu %zu)\n",
362 op->request->payload_size, sizeof(*request));
363 return -EINVAL;
364 }
365
366 request = op->request->payload;
367 firmware_id = request->firmware_id;
368
369 fw_req = get_fw_req(fw_download, firmware_id);
370 if (!fw_req) {
371 dev_err(fw_download->parent,
372 "firmware not available for id: %02u\n", firmware_id);
373 return -EINVAL;
374 }
375
376 cancel_delayed_work_sync(&fw_req->dwork);
377
378 free_firmware(fw_download, fw_req);
379 put_fw_req(fw_req);
380
381 dev_dbg(fw_download->parent, "release firmware\n");
382
383 return 0;
384}
385
386int gb_fw_download_request_handler(struct gb_operation *op)
387{
388 u8 type = op->type;
389
390 switch (type) {
391 case GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE:
392 return fw_download_find_firmware(op);
393 case GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE:
394 return fw_download_fetch_firmware(op);
395 case GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE:
396 return fw_download_release_firmware(op);
397 default:
398 dev_err(&op->connection->bundle->dev,
399 "unsupported request: %u\n", type);
400 return -EINVAL;
401 }
402}
403
404int gb_fw_download_connection_init(struct gb_connection *connection)
405{
406 struct fw_download *fw_download;
407 int ret;
408
409 if (!connection)
410 return 0;
411
412 fw_download = kzalloc(sizeof(*fw_download), GFP_KERNEL);
413 if (!fw_download)
414 return -ENOMEM;
415
416 fw_download->parent = &connection->bundle->dev;
417 INIT_LIST_HEAD(&fw_download->fw_requests);
418 ida_init(&fw_download->id_map);
419 gb_connection_set_data(connection, fw_download);
420 fw_download->connection = connection;
421 mutex_init(&fw_download->mutex);
422
423 ret = gb_connection_enable(connection);
424 if (ret)
425 goto err_destroy_id_map;
426
427 return 0;
428
429err_destroy_id_map:
430 ida_destroy(&fw_download->id_map);
431 kfree(fw_download);
432
433 return ret;
434}
435
436void gb_fw_download_connection_exit(struct gb_connection *connection)
437{
438 struct fw_download *fw_download;
439 struct fw_request *fw_req, *tmp;
440
441 if (!connection)
442 return;
443
444 fw_download = gb_connection_get_data(connection);
445 gb_connection_disable(fw_download->connection);
446
447 /*
448 * Make sure we have a reference to the pending requests, before they
449 * are freed from the timeout handler.
450 */
451 mutex_lock(&fw_download->mutex);
452 list_for_each_entry(fw_req, &fw_download->fw_requests, node)
453 kref_get(&fw_req->kref);
454 mutex_unlock(&fw_download->mutex);
455
456 /* Release pending firmware packages */
457 list_for_each_entry_safe(fw_req, tmp, &fw_download->fw_requests, node) {
458 cancel_delayed_work_sync(&fw_req->dwork);
459 free_firmware(fw_download, fw_req);
460 put_fw_req(fw_req);
461 }
462
463 ida_destroy(&fw_download->id_map);
464 kfree(fw_download);
465}
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
new file mode 100644
index 000000000000..3cd6cf0a656b
--- /dev/null
+++ b/drivers/staging/greybus/fw-management.c
@@ -0,0 +1,721 @@
1/*
2 * Greybus Firmware Management Protocol Driver.
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/cdev.h>
11#include <linux/completion.h>
12#include <linux/firmware.h>
13#include <linux/fs.h>
14#include <linux/idr.h>
15#include <linux/ioctl.h>
16#include <linux/uaccess.h>
17
18#include "firmware.h"
19#include "greybus_firmware.h"
20#include "greybus.h"
21
22#define FW_MGMT_TIMEOUT_MS 1000
23
24struct fw_mgmt {
25 struct device *parent;
26 struct gb_connection *connection;
27 struct kref kref;
28 struct list_head node;
29
30 /* Common id-map for interface and backend firmware requests */
31 struct ida id_map;
32 struct mutex mutex;
33 struct completion completion;
34 struct cdev cdev;
35 struct device *class_device;
36 dev_t dev_num;
37 unsigned int timeout_jiffies;
38 bool disabled; /* connection getting disabled */
39
40 /* Interface Firmware specific fields */
41 bool mode_switch_started;
42 bool intf_fw_loaded;
43 u8 intf_fw_request_id;
44 u8 intf_fw_status;
45 u16 intf_fw_major;
46 u16 intf_fw_minor;
47
48 /* Backend Firmware specific fields */
49 u8 backend_fw_request_id;
50 u8 backend_fw_status;
51};
52
53/*
54 * Number of minor devices this driver supports.
55 * There will be exactly one required per Interface.
56 */
57#define NUM_MINORS U8_MAX
58
59static struct class *fw_mgmt_class;
60static dev_t fw_mgmt_dev_num;
61static DEFINE_IDA(fw_mgmt_minors_map);
62static LIST_HEAD(fw_mgmt_list);
63static DEFINE_MUTEX(list_mutex);
64
65static void fw_mgmt_kref_release(struct kref *kref)
66{
67 struct fw_mgmt *fw_mgmt = container_of(kref, struct fw_mgmt, kref);
68
69 ida_destroy(&fw_mgmt->id_map);
70 kfree(fw_mgmt);
71}
72
73/*
74 * All users of fw_mgmt take a reference (from within list_mutex lock), before
75 * they get a pointer to play with. And the structure will be freed only after
76 * the last user has put the reference to it.
77 */
78static void put_fw_mgmt(struct fw_mgmt *fw_mgmt)
79{
80 kref_put(&fw_mgmt->kref, fw_mgmt_kref_release);
81}
82
83/* Caller must call put_fw_mgmt() after using struct fw_mgmt */
84static struct fw_mgmt *get_fw_mgmt(struct cdev *cdev)
85{
86 struct fw_mgmt *fw_mgmt;
87
88 mutex_lock(&list_mutex);
89
90 list_for_each_entry(fw_mgmt, &fw_mgmt_list, node) {
91 if (&fw_mgmt->cdev == cdev) {
92 kref_get(&fw_mgmt->kref);
93 goto unlock;
94 }
95 }
96
97 fw_mgmt = NULL;
98
99unlock:
100 mutex_unlock(&list_mutex);
101
102 return fw_mgmt;
103}
104
105static int fw_mgmt_interface_fw_version_operation(struct fw_mgmt *fw_mgmt,
106 struct fw_mgmt_ioc_get_intf_version *fw_info)
107{
108 struct gb_connection *connection = fw_mgmt->connection;
109 struct gb_fw_mgmt_interface_fw_version_response response;
110 int ret;
111
112 ret = gb_operation_sync(connection,
113 GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION, NULL, 0,
114 &response, sizeof(response));
115 if (ret) {
116 dev_err(fw_mgmt->parent,
117 "failed to get interface firmware version (%d)\n", ret);
118 return ret;
119 }
120
121 fw_info->major = le16_to_cpu(response.major);
122 fw_info->minor = le16_to_cpu(response.minor);
123
124 strncpy(fw_info->firmware_tag, response.firmware_tag,
125 GB_FIRMWARE_TAG_MAX_SIZE);
126
127 /*
128 * The firmware-tag should be NULL terminated, otherwise throw error but
129 * don't fail.
130 */
131 if (fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
132 dev_err(fw_mgmt->parent,
133 "fw-version: firmware-tag is not NULL terminated\n");
134 fw_info->firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] = '\0';
135 }
136
137 return 0;
138}
139
140static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
141 u8 load_method, const char *tag)
142{
143 struct gb_fw_mgmt_load_and_validate_fw_request request;
144 int ret;
145
146 if (load_method != GB_FW_LOAD_METHOD_UNIPRO &&
147 load_method != GB_FW_LOAD_METHOD_INTERNAL) {
148 dev_err(fw_mgmt->parent,
149 "invalid load-method (%d)\n", load_method);
150 return -EINVAL;
151 }
152
153 request.load_method = load_method;
154 strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
155
156 /*
157 * The firmware-tag should be NULL terminated, otherwise throw error and
158 * fail.
159 */
160 if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
161 dev_err(fw_mgmt->parent, "load-and-validate: firmware-tag is not NULL terminated\n");
162 return -EINVAL;
163 }
164
165 /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
166 ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
167 if (ret < 0) {
168 dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
169 ret);
170 return ret;
171 }
172
173 fw_mgmt->intf_fw_request_id = ret;
174 fw_mgmt->intf_fw_loaded = false;
175 request.request_id = ret;
176
177 ret = gb_operation_sync(fw_mgmt->connection,
178 GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
179 sizeof(request), NULL, 0);
180 if (ret) {
181 ida_simple_remove(&fw_mgmt->id_map,
182 fw_mgmt->intf_fw_request_id);
183 fw_mgmt->intf_fw_request_id = 0;
184 dev_err(fw_mgmt->parent,
185 "load and validate firmware request failed (%d)\n",
186 ret);
187 return ret;
188 }
189
190 return 0;
191}
192
193static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
194{
195 struct gb_connection *connection = op->connection;
196 struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
197 struct gb_fw_mgmt_loaded_fw_request *request;
198
199 /* No pending load and validate request ? */
200 if (!fw_mgmt->intf_fw_request_id) {
201 dev_err(fw_mgmt->parent,
202 "unexpected firmware loaded request received\n");
203 return -ENODEV;
204 }
205
206 if (op->request->payload_size != sizeof(*request)) {
207 dev_err(fw_mgmt->parent, "illegal size of firmware loaded request (%zu != %zu)\n",
208 op->request->payload_size, sizeof(*request));
209 return -EINVAL;
210 }
211
212 request = op->request->payload;
213
214 /* Invalid request-id ? */
215 if (request->request_id != fw_mgmt->intf_fw_request_id) {
216 dev_err(fw_mgmt->parent, "invalid request id for firmware loaded request (%02u != %02u)\n",
217 fw_mgmt->intf_fw_request_id, request->request_id);
218 return -ENODEV;
219 }
220
221 ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
222 fw_mgmt->intf_fw_request_id = 0;
223 fw_mgmt->intf_fw_status = request->status;
224 fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
225 fw_mgmt->intf_fw_minor = le16_to_cpu(request->minor);
226
227 if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_FAILED)
228 dev_err(fw_mgmt->parent,
229 "failed to load interface firmware, status:%02x\n",
230 fw_mgmt->intf_fw_status);
231 else if (fw_mgmt->intf_fw_status == GB_FW_LOAD_STATUS_VALIDATION_FAILED)
232 dev_err(fw_mgmt->parent,
233 "failed to validate interface firmware, status:%02x\n",
234 fw_mgmt->intf_fw_status);
235 else
236 fw_mgmt->intf_fw_loaded = true;
237
238 complete(&fw_mgmt->completion);
239
240 return 0;
241}
242
243static int fw_mgmt_backend_fw_version_operation(struct fw_mgmt *fw_mgmt,
244 struct fw_mgmt_ioc_get_backend_version *fw_info)
245{
246 struct gb_connection *connection = fw_mgmt->connection;
247 struct gb_fw_mgmt_backend_fw_version_request request;
248 struct gb_fw_mgmt_backend_fw_version_response response;
249 int ret;
250
251 strncpy(request.firmware_tag, fw_info->firmware_tag,
252 GB_FIRMWARE_TAG_MAX_SIZE);
253
254 /*
255 * The firmware-tag should be NULL terminated, otherwise throw error and
256 * fail.
257 */
258 if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
259 dev_err(fw_mgmt->parent, "backend-version: firmware-tag is not NULL terminated\n");
260 return -EINVAL;
261 }
262
263 ret = gb_operation_sync(connection,
264 GB_FW_MGMT_TYPE_BACKEND_FW_VERSION, &request,
265 sizeof(request), &response, sizeof(response));
266 if (ret) {
267 dev_err(fw_mgmt->parent, "failed to get version of %s backend firmware (%d)\n",
268 fw_info->firmware_tag, ret);
269 return ret;
270 }
271
272 fw_info->status = response.status;
273
274 /* Reset version as that should be non-zero only for success case */
275 fw_info->major = 0;
276 fw_info->minor = 0;
277
278 switch (fw_info->status) {
279 case GB_FW_BACKEND_VERSION_STATUS_SUCCESS:
280 fw_info->major = le16_to_cpu(response.major);
281 fw_info->minor = le16_to_cpu(response.minor);
282 break;
283 case GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE:
284 case GB_FW_BACKEND_VERSION_STATUS_RETRY:
285 break;
286 case GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED:
287 dev_err(fw_mgmt->parent,
288 "Firmware with tag %s is not supported by Interface\n",
289 fw_info->firmware_tag);
290 break;
291 default:
292 dev_err(fw_mgmt->parent, "Invalid status received: %u\n",
293 fw_info->status);
294 }
295
296 return 0;
297}
298
299static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
300 char *tag)
301{
302 struct gb_fw_mgmt_backend_fw_update_request request;
303 int ret;
304
305 strncpy(request.firmware_tag, tag, GB_FIRMWARE_TAG_MAX_SIZE);
306
307 /*
308 * The firmware-tag should be NULL terminated, otherwise throw error and
309 * fail.
310 */
311 if (request.firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE - 1] != '\0') {
312 dev_err(fw_mgmt->parent, "backend-update: firmware-tag is not NULL terminated\n");
313 return -EINVAL;
314 }
315
316 /* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
317 ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
318 if (ret < 0) {
319 dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
320 ret);
321 return ret;
322 }
323
324 fw_mgmt->backend_fw_request_id = ret;
325 request.request_id = ret;
326
327 ret = gb_operation_sync(fw_mgmt->connection,
328 GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
329 sizeof(request), NULL, 0);
330 if (ret) {
331 ida_simple_remove(&fw_mgmt->id_map,
332 fw_mgmt->backend_fw_request_id);
333 fw_mgmt->backend_fw_request_id = 0;
334 dev_err(fw_mgmt->parent,
335 "backend %s firmware update request failed (%d)\n", tag,
336 ret);
337 return ret;
338 }
339
340 return 0;
341}
342
343static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
344{
345 struct gb_connection *connection = op->connection;
346 struct fw_mgmt *fw_mgmt = gb_connection_get_data(connection);
347 struct gb_fw_mgmt_backend_fw_updated_request *request;
348
349 /* No pending load and validate request ? */
350 if (!fw_mgmt->backend_fw_request_id) {
351 dev_err(fw_mgmt->parent, "unexpected backend firmware updated request received\n");
352 return -ENODEV;
353 }
354
355 if (op->request->payload_size != sizeof(*request)) {
356 dev_err(fw_mgmt->parent, "illegal size of backend firmware updated request (%zu != %zu)\n",
357 op->request->payload_size, sizeof(*request));
358 return -EINVAL;
359 }
360
361 request = op->request->payload;
362
363 /* Invalid request-id ? */
364 if (request->request_id != fw_mgmt->backend_fw_request_id) {
365 dev_err(fw_mgmt->parent, "invalid request id for backend firmware updated request (%02u != %02u)\n",
366 fw_mgmt->backend_fw_request_id, request->request_id);
367 return -ENODEV;
368 }
369
370 ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
371 fw_mgmt->backend_fw_request_id = 0;
372 fw_mgmt->backend_fw_status = request->status;
373
374 if ((fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_SUCCESS) &&
375 (fw_mgmt->backend_fw_status != GB_FW_BACKEND_FW_STATUS_RETRY))
376 dev_err(fw_mgmt->parent,
377 "failed to load backend firmware: %02x\n",
378 fw_mgmt->backend_fw_status);
379
380 complete(&fw_mgmt->completion);
381
382 return 0;
383}
384
385/* Char device fops */
386
387static int fw_mgmt_open(struct inode *inode, struct file *file)
388{
389 struct fw_mgmt *fw_mgmt = get_fw_mgmt(inode->i_cdev);
390
391 /* fw_mgmt structure can't get freed until file descriptor is closed */
392 if (fw_mgmt) {
393 file->private_data = fw_mgmt;
394 return 0;
395 }
396
397 return -ENODEV;
398}
399
400static int fw_mgmt_release(struct inode *inode, struct file *file)
401{
402 struct fw_mgmt *fw_mgmt = file->private_data;
403
404 put_fw_mgmt(fw_mgmt);
405 return 0;
406}
407
408static int fw_mgmt_ioctl(struct fw_mgmt *fw_mgmt, unsigned int cmd,
409 void __user *buf)
410{
411 struct fw_mgmt_ioc_get_intf_version intf_fw_info;
412 struct fw_mgmt_ioc_get_backend_version backend_fw_info;
413 struct fw_mgmt_ioc_intf_load_and_validate intf_load;
414 struct fw_mgmt_ioc_backend_fw_update backend_update;
415 unsigned int timeout;
416 int ret;
417
418 /* Reject any operations after mode-switch has started */
419 if (fw_mgmt->mode_switch_started)
420 return -EBUSY;
421
422 switch (cmd) {
423 case FW_MGMT_IOC_GET_INTF_FW:
424 ret = fw_mgmt_interface_fw_version_operation(fw_mgmt,
425 &intf_fw_info);
426 if (ret)
427 return ret;
428
429 if (copy_to_user(buf, &intf_fw_info, sizeof(intf_fw_info)))
430 return -EFAULT;
431
432 return 0;
433 case FW_MGMT_IOC_GET_BACKEND_FW:
434 if (copy_from_user(&backend_fw_info, buf,
435 sizeof(backend_fw_info)))
436 return -EFAULT;
437
438 ret = fw_mgmt_backend_fw_version_operation(fw_mgmt,
439 &backend_fw_info);
440 if (ret)
441 return ret;
442
443 if (copy_to_user(buf, &backend_fw_info,
444 sizeof(backend_fw_info)))
445 return -EFAULT;
446
447 return 0;
448 case FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE:
449 if (copy_from_user(&intf_load, buf, sizeof(intf_load)))
450 return -EFAULT;
451
452 ret = fw_mgmt_load_and_validate_operation(fw_mgmt,
453 intf_load.load_method, intf_load.firmware_tag);
454 if (ret)
455 return ret;
456
457 if (!wait_for_completion_timeout(&fw_mgmt->completion,
458 fw_mgmt->timeout_jiffies)) {
459 dev_err(fw_mgmt->parent, "timed out waiting for firmware load and validation to finish\n");
460 return -ETIMEDOUT;
461 }
462
463 intf_load.status = fw_mgmt->intf_fw_status;
464 intf_load.major = fw_mgmt->intf_fw_major;
465 intf_load.minor = fw_mgmt->intf_fw_minor;
466
467 if (copy_to_user(buf, &intf_load, sizeof(intf_load)))
468 return -EFAULT;
469
470 return 0;
471 case FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE:
472 if (copy_from_user(&backend_update, buf,
473 sizeof(backend_update)))
474 return -EFAULT;
475
476 ret = fw_mgmt_backend_fw_update_operation(fw_mgmt,
477 backend_update.firmware_tag);
478 if (ret)
479 return ret;
480
481 if (!wait_for_completion_timeout(&fw_mgmt->completion,
482 fw_mgmt->timeout_jiffies)) {
483 dev_err(fw_mgmt->parent, "timed out waiting for backend firmware update to finish\n");
484 return -ETIMEDOUT;
485 }
486
487 backend_update.status = fw_mgmt->backend_fw_status;
488
489 if (copy_to_user(buf, &backend_update, sizeof(backend_update)))
490 return -EFAULT;
491
492 return 0;
493 case FW_MGMT_IOC_SET_TIMEOUT_MS:
494 if (get_user(timeout, (unsigned int __user *)buf))
495 return -EFAULT;
496
497 if (!timeout) {
498 dev_err(fw_mgmt->parent, "timeout can't be zero\n");
499 return -EINVAL;
500 }
501
502 fw_mgmt->timeout_jiffies = msecs_to_jiffies(timeout);
503
504 return 0;
505 case FW_MGMT_IOC_MODE_SWITCH:
506 if (!fw_mgmt->intf_fw_loaded) {
507 dev_err(fw_mgmt->parent,
508 "Firmware not loaded for mode-switch\n");
509 return -EPERM;
510 }
511
512 /*
513 * Disallow new ioctls as the fw-core bundle driver is going to
514 * get disconnected soon and the character device will get
515 * removed.
516 */
517 fw_mgmt->mode_switch_started = true;
518
519 ret = gb_interface_request_mode_switch(fw_mgmt->connection->intf);
520 if (ret) {
521 dev_err(fw_mgmt->parent, "Mode-switch failed: %d\n",
522 ret);
523 fw_mgmt->mode_switch_started = false;
524 return ret;
525 }
526
527 return 0;
528 default:
529 return -ENOTTY;
530 }
531}
532
533static long fw_mgmt_ioctl_unlocked(struct file *file, unsigned int cmd,
534 unsigned long arg)
535{
536 struct fw_mgmt *fw_mgmt = file->private_data;
537 struct gb_bundle *bundle = fw_mgmt->connection->bundle;
538 int ret = -ENODEV;
539
540 /*
541 * Serialize ioctls.
542 *
543 * We don't want the user to do few operations in parallel. For example,
544 * updating Interface firmware in parallel for the same Interface. There
545 * is no need to do things in parallel for speed and we can avoid having
546 * complicated code for now.
547 *
548 * This is also used to protect ->disabled, which is used to check if
549 * the connection is getting disconnected, so that we don't start any
550 * new operations.
551 */
552 mutex_lock(&fw_mgmt->mutex);
553 if (!fw_mgmt->disabled) {
554 ret = gb_pm_runtime_get_sync(bundle);
555 if (!ret) {
556 ret = fw_mgmt_ioctl(fw_mgmt, cmd, (void __user *)arg);
557 gb_pm_runtime_put_autosuspend(bundle);
558 }
559 }
560 mutex_unlock(&fw_mgmt->mutex);
561
562 return ret;
563}
564
565static const struct file_operations fw_mgmt_fops = {
566 .owner = THIS_MODULE,
567 .open = fw_mgmt_open,
568 .release = fw_mgmt_release,
569 .unlocked_ioctl = fw_mgmt_ioctl_unlocked,
570};
571
572int gb_fw_mgmt_request_handler(struct gb_operation *op)
573{
574 u8 type = op->type;
575
576 switch (type) {
577 case GB_FW_MGMT_TYPE_LOADED_FW:
578 return fw_mgmt_interface_fw_loaded_operation(op);
579 case GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED:
580 return fw_mgmt_backend_fw_updated_operation(op);
581 default:
582 dev_err(&op->connection->bundle->dev,
583 "unsupported request: %u\n", type);
584 return -EINVAL;
585 }
586}
587
588int gb_fw_mgmt_connection_init(struct gb_connection *connection)
589{
590 struct fw_mgmt *fw_mgmt;
591 int ret, minor;
592
593 if (!connection)
594 return 0;
595
596 fw_mgmt = kzalloc(sizeof(*fw_mgmt), GFP_KERNEL);
597 if (!fw_mgmt)
598 return -ENOMEM;
599
600 fw_mgmt->parent = &connection->bundle->dev;
601 fw_mgmt->timeout_jiffies = msecs_to_jiffies(FW_MGMT_TIMEOUT_MS);
602 fw_mgmt->connection = connection;
603
604 gb_connection_set_data(connection, fw_mgmt);
605 init_completion(&fw_mgmt->completion);
606 ida_init(&fw_mgmt->id_map);
607 mutex_init(&fw_mgmt->mutex);
608 kref_init(&fw_mgmt->kref);
609
610 mutex_lock(&list_mutex);
611 list_add(&fw_mgmt->node, &fw_mgmt_list);
612 mutex_unlock(&list_mutex);
613
614 ret = gb_connection_enable(connection);
615 if (ret)
616 goto err_list_del;
617
618 minor = ida_simple_get(&fw_mgmt_minors_map, 0, NUM_MINORS, GFP_KERNEL);
619 if (minor < 0) {
620 ret = minor;
621 goto err_connection_disable;
622 }
623
624 /* Add a char device to allow userspace to interact with fw-mgmt */
625 fw_mgmt->dev_num = MKDEV(MAJOR(fw_mgmt_dev_num), minor);
626 cdev_init(&fw_mgmt->cdev, &fw_mgmt_fops);
627
628 ret = cdev_add(&fw_mgmt->cdev, fw_mgmt->dev_num, 1);
629 if (ret)
630 goto err_remove_ida;
631
632 /* Add a soft link to the previously added char-dev within the bundle */
633 fw_mgmt->class_device = device_create(fw_mgmt_class, fw_mgmt->parent,
634 fw_mgmt->dev_num, NULL,
635 "gb-fw-mgmt-%d", minor);
636 if (IS_ERR(fw_mgmt->class_device)) {
637 ret = PTR_ERR(fw_mgmt->class_device);
638 goto err_del_cdev;
639 }
640
641 return 0;
642
643err_del_cdev:
644 cdev_del(&fw_mgmt->cdev);
645err_remove_ida:
646 ida_simple_remove(&fw_mgmt_minors_map, minor);
647err_connection_disable:
648 gb_connection_disable(connection);
649err_list_del:
650 mutex_lock(&list_mutex);
651 list_del(&fw_mgmt->node);
652 mutex_unlock(&list_mutex);
653
654 put_fw_mgmt(fw_mgmt);
655
656 return ret;
657}
658
659void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
660{
661 struct fw_mgmt *fw_mgmt;
662
663 if (!connection)
664 return;
665
666 fw_mgmt = gb_connection_get_data(connection);
667
668 device_destroy(fw_mgmt_class, fw_mgmt->dev_num);
669 cdev_del(&fw_mgmt->cdev);
670 ida_simple_remove(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
671
672 /*
673 * Disallow any new ioctl operations on the char device and wait for
674 * existing ones to finish.
675 */
676 mutex_lock(&fw_mgmt->mutex);
677 fw_mgmt->disabled = true;
678 mutex_unlock(&fw_mgmt->mutex);
679
680 /* All pending greybus operations should have finished by now */
681 gb_connection_disable(fw_mgmt->connection);
682
683 /* Disallow new users to get access to the fw_mgmt structure */
684 mutex_lock(&list_mutex);
685 list_del(&fw_mgmt->node);
686 mutex_unlock(&list_mutex);
687
688 /*
689 * All current users of fw_mgmt would have taken a reference to it by
690 * now, we can drop our reference and wait the last user will get
691 * fw_mgmt freed.
692 */
693 put_fw_mgmt(fw_mgmt);
694}
695
696int fw_mgmt_init(void)
697{
698 int ret;
699
700 fw_mgmt_class = class_create(THIS_MODULE, "gb_fw_mgmt");
701 if (IS_ERR(fw_mgmt_class))
702 return PTR_ERR(fw_mgmt_class);
703
704 ret = alloc_chrdev_region(&fw_mgmt_dev_num, 0, NUM_MINORS,
705 "gb_fw_mgmt");
706 if (ret)
707 goto err_remove_class;
708
709 return 0;
710
711err_remove_class:
712 class_destroy(fw_mgmt_class);
713 return ret;
714}
715
716void fw_mgmt_exit(void)
717{
718 unregister_chrdev_region(fw_mgmt_dev_num, NUM_MINORS);
719 class_destroy(fw_mgmt_class);
720 ida_destroy(&fw_mgmt_minors_map);
721}
diff --git a/drivers/staging/greybus/gb-camera.h b/drivers/staging/greybus/gb-camera.h
new file mode 100644
index 000000000000..d45dabc5b367
--- /dev/null
+++ b/drivers/staging/greybus/gb-camera.h
@@ -0,0 +1,127 @@
1/*
2 * Greybus Camera protocol driver.
3 *
4 * Copyright 2015 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8#ifndef __GB_CAMERA_H
9#define __GB_CAMERA_H
10
11#include <linux/v4l2-mediabus.h>
12
13/* Input flags need to be set from the caller */
14#define GB_CAMERA_IN_FLAG_TEST (1 << 0)
15/* Output flags returned */
16#define GB_CAMERA_OUT_FLAG_ADJUSTED (1 << 0)
17
18/**
19 * struct gb_camera_stream - Represents greybus camera stream.
20 * @width: Stream width in pixels.
21 * @height: Stream height in pixels.
22 * @pixel_code: Media bus pixel code.
23 * @vc: MIPI CSI virtual channel.
24 * @dt: MIPI CSI data types. Most formats use a single data type, in which case
25 * the second element will be ignored.
26 * @max_size: Maximum size of a frame in bytes. The camera module guarantees
27 * that all data between the Frame Start and Frame End packet for
28 * the associated virtual channel and data type(s) will not exceed
29 * this size.
30 */
31struct gb_camera_stream {
32 unsigned int width;
33 unsigned int height;
34 enum v4l2_mbus_pixelcode pixel_code;
35 unsigned int vc;
36 unsigned int dt[2];
37 unsigned int max_size;
38};
39
40/**
41 * struct gb_camera_csi_params - CSI configuration parameters
42 * @num_lanes: number of CSI data lanes
43 * @clk_freq: CSI clock frequency in Hz
44 */
45struct gb_camera_csi_params {
46 unsigned int num_lanes;
47 unsigned int clk_freq;
48};
49
50/**
51 * struct gb_camera_ops - Greybus camera operations, used by the Greybus camera
52 * driver to expose operations to the host camera driver.
53 * @capabilities: Retrieve camera capabilities and store them in the buffer
54 * 'buf' capabilities. The buffer maximum size is specified by
55 * the caller in the 'size' parameter, and the effective
56 * capabilities size is returned from the function. If the buffer
57 * size is too small to hold the capabilities an error is
58 * returned and the buffer is left untouched.
59 *
60 * @configure_streams: Negotiate configuration and prepare the module for video
61 * capture. The caller specifies the number of streams it
62 * requests in the 'nstreams' argument and the associated
63 * streams configurations in the 'streams' argument. The
64 * GB_CAMERA_IN_FLAG_TEST 'flag' can be set to test a
65 * configuration without applying it, otherwise the
66 * configuration is applied by the module. The module can
67 * decide to modify the requested configuration, including
68 * using a different number of streams. In that case the
69 * modified configuration won't be applied, the
70 * GB_CAMERA_OUT_FLAG_ADJUSTED 'flag' will be set upon
71 * return, and the modified configuration and number of
72 * streams stored in 'streams' and 'array'. The module
73 * returns its CSI-2 bus parameters in the 'csi_params'
74 * structure in all cases.
75 *
76 * @capture: Submit a capture request. The supplied 'request_id' must be unique
77 * and higher than the IDs of all the previously submitted requests.
78 * The 'streams' argument specifies which streams are affected by the
79 * request in the form of a bitmask, with bits corresponding to the
80 * configured streams indexes. If the request contains settings, the
81 * 'settings' argument points to the settings buffer and its size is
82 * specified by the 'settings_size' argument. Otherwise the 'settings'
83 * argument should be set to NULL and 'settings_size' to 0.
84 *
85 * @flush: Flush the capture requests queue. Return the ID of the last request
86 * that will processed by the device before it stops transmitting video
87 * frames. All queued capture requests with IDs higher than the returned
88 * ID will be dropped without being processed.
89 */
90struct gb_camera_ops {
91 ssize_t (*capabilities)(void *priv, char *buf, size_t len);
92 int (*configure_streams)(void *priv, unsigned int *nstreams,
93 unsigned int *flags, struct gb_camera_stream *streams,
94 struct gb_camera_csi_params *csi_params);
95 int (*capture)(void *priv, u32 request_id,
96 unsigned int streams, unsigned int num_frames,
97 size_t settings_size, const void *settings);
98 int (*flush)(void *priv, u32 *request_id);
99};
100
101/**
102 * struct gb_camera_module - Represents greybus camera module.
103 * @priv: Module private data, passed to all camera operations.
104 * @ops: Greybus camera operation callbacks.
105 * @interface_id: Interface id of the module.
106 * @refcount: Reference counting object.
107 * @release: Module release function.
108 * @list: List entry in the camera modules list.
109 */
110struct gb_camera_module {
111 void *priv;
112 const struct gb_camera_ops *ops;
113
114 unsigned int interface_id;
115 struct kref refcount;
116 void (*release)(struct kref *kref);
117 struct list_head list; /* Global list */
118};
119
120#define gb_camera_call(f, op, args...) \
121 (!(f) ? -ENODEV : (((f)->ops->op) ? \
122 (f)->ops->op((f)->priv, ##args) : -ENOIOCTLCMD))
123
124int gb_camera_register(struct gb_camera_module *module);
125int gb_camera_unregister(struct gb_camera_module *module);
126
127#endif /* __GB_CAMERA_H */
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
new file mode 100644
index 000000000000..bcde7c9a0f17
--- /dev/null
+++ b/drivers/staging/greybus/gbphy.c
@@ -0,0 +1,360 @@
1/*
2 * Greybus Bridged-Phy Bus driver
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/kernel.h>
16#include <linux/slab.h>
17#include <linux/device.h>
18
19#include "greybus.h"
20#include "gbphy.h"
21
22#define GB_GBPHY_AUTOSUSPEND_MS 3000
23
24struct gbphy_host {
25 struct gb_bundle *bundle;
26 struct list_head devices;
27};
28
29static DEFINE_IDA(gbphy_id);
30
31static ssize_t protocol_id_show(struct device *dev,
32 struct device_attribute *attr, char *buf)
33{
34 struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
35
36 return sprintf(buf, "0x%02x\n", gbphy_dev->cport_desc->protocol_id);
37}
38static DEVICE_ATTR_RO(protocol_id);
39
40static struct attribute *gbphy_dev_attrs[] = {
41 &dev_attr_protocol_id.attr,
42 NULL,
43};
44
45ATTRIBUTE_GROUPS(gbphy_dev);
46
47static void gbphy_dev_release(struct device *dev)
48{
49 struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
50
51 ida_simple_remove(&gbphy_id, gbphy_dev->id);
52 kfree(gbphy_dev);
53}
54
55#ifdef CONFIG_PM
56static int gb_gbphy_idle(struct device *dev)
57{
58 pm_runtime_mark_last_busy(dev);
59 pm_request_autosuspend(dev);
60 return 0;
61}
62#endif
63
64static const struct dev_pm_ops gb_gbphy_pm_ops = {
65 SET_RUNTIME_PM_OPS(pm_generic_runtime_suspend,
66 pm_generic_runtime_resume,
67 gb_gbphy_idle)
68};
69
70static struct device_type greybus_gbphy_dev_type = {
71 .name = "gbphy_device",
72 .release = gbphy_dev_release,
73 .pm = &gb_gbphy_pm_ops,
74};
75
76static int gbphy_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
77{
78 struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
79 struct greybus_descriptor_cport *cport_desc = gbphy_dev->cport_desc;
80 struct gb_bundle *bundle = gbphy_dev->bundle;
81 struct gb_interface *intf = bundle->intf;
82 struct gb_module *module = intf->module;
83 struct gb_host_device *hd = intf->hd;
84
85 if (add_uevent_var(env, "BUS=%u", hd->bus_id))
86 return -ENOMEM;
87 if (add_uevent_var(env, "MODULE=%u", module->module_id))
88 return -ENOMEM;
89 if (add_uevent_var(env, "INTERFACE=%u", intf->interface_id))
90 return -ENOMEM;
91 if (add_uevent_var(env, "GREYBUS_ID=%08x/%08x",
92 intf->vendor_id, intf->product_id))
93 return -ENOMEM;
94 if (add_uevent_var(env, "BUNDLE=%u", gbphy_dev->bundle->id))
95 return -ENOMEM;
96 if (add_uevent_var(env, "BUNDLE_CLASS=%02x", bundle->class))
97 return -ENOMEM;
98 if (add_uevent_var(env, "GBPHY=%u", gbphy_dev->id))
99 return -ENOMEM;
100 if (add_uevent_var(env, "PROTOCOL_ID=%02x", cport_desc->protocol_id))
101 return -ENOMEM;
102
103 return 0;
104}
105
106static const struct gbphy_device_id *
107gbphy_dev_match_id(struct gbphy_device *gbphy_dev, struct gbphy_driver *gbphy_drv)
108{
109 const struct gbphy_device_id *id = gbphy_drv->id_table;
110
111 if (!id)
112 return NULL;
113
114 for (; id->protocol_id; id++)
115 if (id->protocol_id == gbphy_dev->cport_desc->protocol_id)
116 return id;
117
118 return NULL;
119}
120
121static int gbphy_dev_match(struct device *dev, struct device_driver *drv)
122{
123 struct gbphy_driver *gbphy_drv = to_gbphy_driver(drv);
124 struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
125 const struct gbphy_device_id *id;
126
127 id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
128 if (id)
129 return 1;
130
131 return 0;
132}
133
134static int gbphy_dev_probe(struct device *dev)
135{
136 struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
137 struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
138 const struct gbphy_device_id *id;
139 int ret;
140
141 id = gbphy_dev_match_id(gbphy_dev, gbphy_drv);
142 if (!id)
143 return -ENODEV;
144
145 /* for old kernels we need get_sync to resume parent devices */
146 ret = gb_pm_runtime_get_sync(gbphy_dev->bundle);
147 if (ret < 0)
148 return ret;
149
150 pm_runtime_set_autosuspend_delay(dev, GB_GBPHY_AUTOSUSPEND_MS);
151 pm_runtime_use_autosuspend(dev);
152 pm_runtime_get_noresume(dev);
153 pm_runtime_set_active(dev);
154 pm_runtime_enable(dev);
155
156 /*
157 * Drivers should call put on the gbphy dev before returning
158 * from probe if they support runtime pm.
159 */
160 ret = gbphy_drv->probe(gbphy_dev, id);
161 if (ret) {
162 pm_runtime_disable(dev);
163 pm_runtime_set_suspended(dev);
164 pm_runtime_put_noidle(dev);
165 pm_runtime_dont_use_autosuspend(dev);
166 }
167
168 gb_pm_runtime_put_autosuspend(gbphy_dev->bundle);
169
170 return ret;
171}
172
173static int gbphy_dev_remove(struct device *dev)
174{
175 struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
176 struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
177
178 gbphy_drv->remove(gbphy_dev);
179
180 pm_runtime_disable(dev);
181 pm_runtime_set_suspended(dev);
182 pm_runtime_put_noidle(dev);
183 pm_runtime_dont_use_autosuspend(dev);
184
185 return 0;
186}
187
188static struct bus_type gbphy_bus_type = {
189 .name = "gbphy",
190 .match = gbphy_dev_match,
191 .probe = gbphy_dev_probe,
192 .remove = gbphy_dev_remove,
193 .uevent = gbphy_dev_uevent,
194};
195
196int gb_gbphy_register_driver(struct gbphy_driver *driver,
197 struct module *owner, const char *mod_name)
198{
199 int retval;
200
201 if (greybus_disabled())
202 return -ENODEV;
203
204 driver->driver.bus = &gbphy_bus_type;
205 driver->driver.name = driver->name;
206 driver->driver.owner = owner;
207 driver->driver.mod_name = mod_name;
208
209 retval = driver_register(&driver->driver);
210 if (retval)
211 return retval;
212
213 pr_info("registered new driver %s\n", driver->name);
214 return 0;
215}
216EXPORT_SYMBOL_GPL(gb_gbphy_register_driver);
217
218void gb_gbphy_deregister_driver(struct gbphy_driver *driver)
219{
220 driver_unregister(&driver->driver);
221}
222EXPORT_SYMBOL_GPL(gb_gbphy_deregister_driver);
223
224static struct gbphy_device *gb_gbphy_create_dev(struct gb_bundle *bundle,
225 struct greybus_descriptor_cport *cport_desc)
226{
227 struct gbphy_device *gbphy_dev;
228 int retval;
229 int id;
230
231 id = ida_simple_get(&gbphy_id, 1, 0, GFP_KERNEL);
232 if (id < 0)
233 return ERR_PTR(id);
234
235 gbphy_dev = kzalloc(sizeof(*gbphy_dev), GFP_KERNEL);
236 if (!gbphy_dev) {
237 ida_simple_remove(&gbphy_id, id);
238 return ERR_PTR(-ENOMEM);
239 }
240
241 gbphy_dev->id = id;
242 gbphy_dev->bundle = bundle;
243 gbphy_dev->cport_desc = cport_desc;
244 gbphy_dev->dev.parent = &bundle->dev;
245 gbphy_dev->dev.bus = &gbphy_bus_type;
246 gbphy_dev->dev.type = &greybus_gbphy_dev_type;
247 gbphy_dev->dev.groups = gbphy_dev_groups;
248 gbphy_dev->dev.dma_mask = bundle->dev.dma_mask;
249 dev_set_name(&gbphy_dev->dev, "gbphy%d", id);
250
251 retval = device_register(&gbphy_dev->dev);
252 if (retval) {
253 put_device(&gbphy_dev->dev);
254 return ERR_PTR(retval);
255 }
256
257 return gbphy_dev;
258}
259
260static void gb_gbphy_disconnect(struct gb_bundle *bundle)
261{
262 struct gbphy_host *gbphy_host = greybus_get_drvdata(bundle);
263 struct gbphy_device *gbphy_dev, *temp;
264 int ret;
265
266 ret = gb_pm_runtime_get_sync(bundle);
267 if (ret < 0)
268 gb_pm_runtime_get_noresume(bundle);
269
270 list_for_each_entry_safe(gbphy_dev, temp, &gbphy_host->devices, list) {
271 list_del(&gbphy_dev->list);
272 device_unregister(&gbphy_dev->dev);
273 }
274
275 kfree(gbphy_host);
276}
277
278static int gb_gbphy_probe(struct gb_bundle *bundle,
279 const struct greybus_bundle_id *id)
280{
281 struct gbphy_host *gbphy_host;
282 struct gbphy_device *gbphy_dev;
283 int i;
284
285 if (bundle->num_cports == 0)
286 return -ENODEV;
287
288 gbphy_host = kzalloc(sizeof(*gbphy_host), GFP_KERNEL);
289 if (!gbphy_host)
290 return -ENOMEM;
291
292 gbphy_host->bundle = bundle;
293 INIT_LIST_HEAD(&gbphy_host->devices);
294 greybus_set_drvdata(bundle, gbphy_host);
295
296 /*
297 * Create a bunch of children devices, one per cport, and bind the
298 * bridged phy drivers to them.
299 */
300 for (i = 0; i < bundle->num_cports; ++i) {
301 gbphy_dev = gb_gbphy_create_dev(bundle, &bundle->cport_desc[i]);
302 if (IS_ERR(gbphy_dev)) {
303 gb_gbphy_disconnect(bundle);
304 return PTR_ERR(gbphy_dev);
305 }
306 list_add(&gbphy_dev->list, &gbphy_host->devices);
307 }
308
309 gb_pm_runtime_put_autosuspend(bundle);
310
311 return 0;
312}
313
314static const struct greybus_bundle_id gb_gbphy_id_table[] = {
315 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_BRIDGED_PHY) },
316 { },
317};
318MODULE_DEVICE_TABLE(greybus, gb_gbphy_id_table);
319
320static struct greybus_driver gb_gbphy_driver = {
321 .name = "gbphy",
322 .probe = gb_gbphy_probe,
323 .disconnect = gb_gbphy_disconnect,
324 .id_table = gb_gbphy_id_table,
325};
326
327static int __init gbphy_init(void)
328{
329 int retval;
330
331 retval = bus_register(&gbphy_bus_type);
332 if (retval) {
333 pr_err("gbphy bus register failed (%d)\n", retval);
334 return retval;
335 }
336
337 retval = greybus_register(&gb_gbphy_driver);
338 if (retval) {
339 pr_err("error registering greybus driver\n");
340 goto error_gbphy;
341 }
342
343 return 0;
344
345error_gbphy:
346 bus_unregister(&gbphy_bus_type);
347 ida_destroy(&gbphy_id);
348 return retval;
349}
350module_init(gbphy_init);
351
352static void __exit gbphy_exit(void)
353{
354 greybus_deregister(&gb_gbphy_driver);
355 bus_unregister(&gbphy_bus_type);
356 ida_destroy(&gbphy_id);
357}
358module_exit(gbphy_exit);
359
360MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/gbphy.h b/drivers/staging/greybus/gbphy.h
new file mode 100644
index 000000000000..8ee68055ccc4
--- /dev/null
+++ b/drivers/staging/greybus/gbphy.h
@@ -0,0 +1,110 @@
1/*
2 * Greybus Bridged-Phy Bus driver
3 *
4 * Copyright 2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#ifndef __GBPHY_H
10#define __GBPHY_H
11
12struct gbphy_device {
13 u32 id;
14 struct greybus_descriptor_cport *cport_desc;
15 struct gb_bundle *bundle;
16 struct list_head list;
17 struct device dev;
18};
19#define to_gbphy_dev(d) container_of(d, struct gbphy_device, dev)
20
21static inline void *gb_gbphy_get_data(struct gbphy_device *gdev)
22{
23 return dev_get_drvdata(&gdev->dev);
24}
25
26static inline void gb_gbphy_set_data(struct gbphy_device *gdev, void *data)
27{
28 dev_set_drvdata(&gdev->dev, data);
29}
30
31struct gbphy_device_id {
32 __u8 protocol_id;
33};
34
35#define GBPHY_PROTOCOL(p) \
36 .protocol_id = (p),
37
38struct gbphy_driver {
39 const char *name;
40 int (*probe)(struct gbphy_device *,
41 const struct gbphy_device_id *id);
42 void (*remove)(struct gbphy_device *);
43 const struct gbphy_device_id *id_table;
44
45 struct device_driver driver;
46};
47#define to_gbphy_driver(d) container_of(d, struct gbphy_driver, driver)
48
49int gb_gbphy_register_driver(struct gbphy_driver *driver,
50 struct module *owner, const char *mod_name);
51void gb_gbphy_deregister_driver(struct gbphy_driver *driver);
52
53#define gb_gbphy_register(driver) \
54 gb_gbphy_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
55#define gb_gbphy_deregister(driver) \
56 gb_gbphy_deregister_driver(driver)
57
58/**
59 * module_gbphy_driver() - Helper macro for registering a gbphy driver
60 * @__gbphy_driver: gbphy_driver structure
61 *
62 * Helper macro for gbphy drivers to set up proper module init / exit
63 * functions. Replaces module_init() and module_exit() and keeps people from
64 * printing pointless things to the kernel log when their driver is loaded.
65 */
66#define module_gbphy_driver(__gbphy_driver) \
67 module_driver(__gbphy_driver, gb_gbphy_register, gb_gbphy_deregister)
68
69#ifdef CONFIG_PM
70static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev)
71{
72 struct device *dev = &gbphy_dev->dev;
73 int ret;
74
75 ret = pm_runtime_get_sync(dev);
76 if (ret < 0) {
77 dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret);
78 pm_runtime_put_noidle(dev);
79 return ret;
80 }
81
82 return 0;
83}
84
85static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev)
86{
87 struct device *dev = &gbphy_dev->dev;
88
89 pm_runtime_mark_last_busy(dev);
90 pm_runtime_put_autosuspend(dev);
91}
92
93static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev)
94{
95 pm_runtime_get_noresume(&gbphy_dev->dev);
96}
97
98static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev)
99{
100 pm_runtime_put_noidle(&gbphy_dev->dev);
101}
102#else
103static inline int gbphy_runtime_get_sync(struct gbphy_device *gbphy_dev) { return 0; }
104static inline void gbphy_runtime_put_autosuspend(struct gbphy_device *gbphy_dev) {}
105static inline void gbphy_runtime_get_noresume(struct gbphy_device *gbphy_dev) {}
106static inline void gbphy_runtime_put_noidle(struct gbphy_device *gbphy_dev) {}
107#endif
108
109#endif /* __GBPHY_H */
110
diff --git a/drivers/staging/greybus/gpio.c b/drivers/staging/greybus/gpio.c
new file mode 100644
index 000000000000..ea8234abf185
--- /dev/null
+++ b/drivers/staging/greybus/gpio.c
@@ -0,0 +1,767 @@
1/*
2 * GPIO Greybus driver.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/gpio.h>
14#include <linux/irq.h>
15#include <linux/irqdomain.h>
16#include <linux/mutex.h>
17
18#include "greybus.h"
19#include "gbphy.h"
20
21struct gb_gpio_line {
22 /* The following has to be an array of line_max entries */
23 /* --> make them just a flags field */
24 u8 active: 1,
25 direction: 1, /* 0 = output, 1 = input */
26 value: 1; /* 0 = low, 1 = high */
27 u16 debounce_usec;
28
29 u8 irq_type;
30 bool irq_type_pending;
31 bool masked;
32 bool masked_pending;
33};
34
35struct gb_gpio_controller {
36 struct gbphy_device *gbphy_dev;
37 struct gb_connection *connection;
38 u8 line_max; /* max line number */
39 struct gb_gpio_line *lines;
40
41 struct gpio_chip chip;
42 struct irq_chip irqc;
43 struct irq_chip *irqchip;
44 struct irq_domain *irqdomain;
45 unsigned int irq_base;
46 irq_flow_handler_t irq_handler;
47 unsigned int irq_default_type;
48 struct mutex irq_lock;
49};
50#define gpio_chip_to_gb_gpio_controller(chip) \
51 container_of(chip, struct gb_gpio_controller, chip)
52#define irq_data_to_gpio_chip(d) (d->domain->host_data)
53
54static int gb_gpio_line_count_operation(struct gb_gpio_controller *ggc)
55{
56 struct gb_gpio_line_count_response response;
57 int ret;
58
59 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_LINE_COUNT,
60 NULL, 0, &response, sizeof(response));
61 if (!ret)
62 ggc->line_max = response.count;
63 return ret;
64}
65
66static int gb_gpio_activate_operation(struct gb_gpio_controller *ggc, u8 which)
67{
68 struct gb_gpio_activate_request request;
69 struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
70 int ret;
71
72 ret = gbphy_runtime_get_sync(gbphy_dev);
73 if (ret)
74 return ret;
75
76 request.which = which;
77 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_ACTIVATE,
78 &request, sizeof(request), NULL, 0);
79 if (ret) {
80 gbphy_runtime_put_autosuspend(gbphy_dev);
81 return ret;
82 }
83
84 ggc->lines[which].active = true;
85
86 return 0;
87}
88
89static void gb_gpio_deactivate_operation(struct gb_gpio_controller *ggc,
90 u8 which)
91{
92 struct gbphy_device *gbphy_dev = ggc->gbphy_dev;
93 struct device *dev = &gbphy_dev->dev;
94 struct gb_gpio_deactivate_request request;
95 int ret;
96
97 request.which = which;
98 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DEACTIVATE,
99 &request, sizeof(request), NULL, 0);
100 if (ret) {
101 dev_err(dev, "failed to deactivate gpio %u\n", which);
102 goto out_pm_put;
103 }
104
105 ggc->lines[which].active = false;
106
107out_pm_put:
108 gbphy_runtime_put_autosuspend(gbphy_dev);
109}
110
111static int gb_gpio_get_direction_operation(struct gb_gpio_controller *ggc,
112 u8 which)
113{
114 struct device *dev = &ggc->gbphy_dev->dev;
115 struct gb_gpio_get_direction_request request;
116 struct gb_gpio_get_direction_response response;
117 int ret;
118 u8 direction;
119
120 request.which = which;
121 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_DIRECTION,
122 &request, sizeof(request),
123 &response, sizeof(response));
124 if (ret)
125 return ret;
126
127 direction = response.direction;
128 if (direction && direction != 1) {
129 dev_warn(dev, "gpio %u direction was %u (should be 0 or 1)\n",
130 which, direction);
131 }
132 ggc->lines[which].direction = direction ? 1 : 0;
133 return 0;
134}
135
136static int gb_gpio_direction_in_operation(struct gb_gpio_controller *ggc,
137 u8 which)
138{
139 struct gb_gpio_direction_in_request request;
140 int ret;
141
142 request.which = which;
143 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_IN,
144 &request, sizeof(request), NULL, 0);
145 if (!ret)
146 ggc->lines[which].direction = 1;
147 return ret;
148}
149
150static int gb_gpio_direction_out_operation(struct gb_gpio_controller *ggc,
151 u8 which, bool value_high)
152{
153 struct gb_gpio_direction_out_request request;
154 int ret;
155
156 request.which = which;
157 request.value = value_high ? 1 : 0;
158 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_DIRECTION_OUT,
159 &request, sizeof(request), NULL, 0);
160 if (!ret)
161 ggc->lines[which].direction = 0;
162 return ret;
163}
164
165static int gb_gpio_get_value_operation(struct gb_gpio_controller *ggc,
166 u8 which)
167{
168 struct device *dev = &ggc->gbphy_dev->dev;
169 struct gb_gpio_get_value_request request;
170 struct gb_gpio_get_value_response response;
171 int ret;
172 u8 value;
173
174 request.which = which;
175 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_GET_VALUE,
176 &request, sizeof(request),
177 &response, sizeof(response));
178 if (ret) {
179 dev_err(dev, "failed to get value of gpio %u\n", which);
180 return ret;
181 }
182
183 value = response.value;
184 if (value && value != 1) {
185 dev_warn(dev, "gpio %u value was %u (should be 0 or 1)\n",
186 which, value);
187 }
188 ggc->lines[which].value = value ? 1 : 0;
189 return 0;
190}
191
192static void gb_gpio_set_value_operation(struct gb_gpio_controller *ggc,
193 u8 which, bool value_high)
194{
195 struct device *dev = &ggc->gbphy_dev->dev;
196 struct gb_gpio_set_value_request request;
197 int ret;
198
199 if (ggc->lines[which].direction == 1) {
200 dev_warn(dev, "refusing to set value of input gpio %u\n",
201 which);
202 return;
203 }
204
205 request.which = which;
206 request.value = value_high ? 1 : 0;
207 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_VALUE,
208 &request, sizeof(request), NULL, 0);
209 if (ret) {
210 dev_err(dev, "failed to set value of gpio %u\n", which);
211 return;
212 }
213
214 ggc->lines[which].value = request.value;
215}
216
217static int gb_gpio_set_debounce_operation(struct gb_gpio_controller *ggc,
218 u8 which, u16 debounce_usec)
219{
220 struct gb_gpio_set_debounce_request request;
221 int ret;
222
223 request.which = which;
224 request.usec = cpu_to_le16(debounce_usec);
225 ret = gb_operation_sync(ggc->connection, GB_GPIO_TYPE_SET_DEBOUNCE,
226 &request, sizeof(request), NULL, 0);
227 if (!ret)
228 ggc->lines[which].debounce_usec = debounce_usec;
229 return ret;
230}
231
232static void _gb_gpio_irq_mask(struct gb_gpio_controller *ggc, u8 hwirq)
233{
234 struct device *dev = &ggc->gbphy_dev->dev;
235 struct gb_gpio_irq_mask_request request;
236 int ret;
237
238 request.which = hwirq;
239 ret = gb_operation_sync(ggc->connection,
240 GB_GPIO_TYPE_IRQ_MASK,
241 &request, sizeof(request), NULL, 0);
242 if (ret)
243 dev_err(dev, "failed to mask irq: %d\n", ret);
244}
245
246static void _gb_gpio_irq_unmask(struct gb_gpio_controller *ggc, u8 hwirq)
247{
248 struct device *dev = &ggc->gbphy_dev->dev;
249 struct gb_gpio_irq_unmask_request request;
250 int ret;
251
252 request.which = hwirq;
253 ret = gb_operation_sync(ggc->connection,
254 GB_GPIO_TYPE_IRQ_UNMASK,
255 &request, sizeof(request), NULL, 0);
256 if (ret)
257 dev_err(dev, "failed to unmask irq: %d\n", ret);
258}
259
260static void _gb_gpio_irq_set_type(struct gb_gpio_controller *ggc,
261 u8 hwirq, u8 type)
262{
263 struct device *dev = &ggc->gbphy_dev->dev;
264 struct gb_gpio_irq_type_request request;
265 int ret;
266
267 request.which = hwirq;
268 request.type = type;
269
270 ret = gb_operation_sync(ggc->connection,
271 GB_GPIO_TYPE_IRQ_TYPE,
272 &request, sizeof(request), NULL, 0);
273 if (ret)
274 dev_err(dev, "failed to set irq type: %d\n", ret);
275}
276
277static void gb_gpio_irq_mask(struct irq_data *d)
278{
279 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
280 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
281 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
282
283 line->masked = true;
284 line->masked_pending = true;
285}
286
287static void gb_gpio_irq_unmask(struct irq_data *d)
288{
289 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
290 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
291 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
292
293 line->masked = false;
294 line->masked_pending = true;
295}
296
297static int gb_gpio_irq_set_type(struct irq_data *d, unsigned int type)
298{
299 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
300 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
301 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
302 struct device *dev = &ggc->gbphy_dev->dev;
303 u8 irq_type;
304
305 switch (type) {
306 case IRQ_TYPE_NONE:
307 irq_type = GB_GPIO_IRQ_TYPE_NONE;
308 break;
309 case IRQ_TYPE_EDGE_RISING:
310 irq_type = GB_GPIO_IRQ_TYPE_EDGE_RISING;
311 break;
312 case IRQ_TYPE_EDGE_FALLING:
313 irq_type = GB_GPIO_IRQ_TYPE_EDGE_FALLING;
314 break;
315 case IRQ_TYPE_EDGE_BOTH:
316 irq_type = GB_GPIO_IRQ_TYPE_EDGE_BOTH;
317 break;
318 case IRQ_TYPE_LEVEL_LOW:
319 irq_type = GB_GPIO_IRQ_TYPE_LEVEL_LOW;
320 break;
321 case IRQ_TYPE_LEVEL_HIGH:
322 irq_type = GB_GPIO_IRQ_TYPE_LEVEL_HIGH;
323 break;
324 default:
325 dev_err(dev, "unsupported irq type: %u\n", type);
326 return -EINVAL;
327 }
328
329 line->irq_type = irq_type;
330 line->irq_type_pending = true;
331
332 return 0;
333}
334
335static void gb_gpio_irq_bus_lock(struct irq_data *d)
336{
337 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
338 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
339
340 mutex_lock(&ggc->irq_lock);
341}
342
343static void gb_gpio_irq_bus_sync_unlock(struct irq_data *d)
344{
345 struct gpio_chip *chip = irq_data_to_gpio_chip(d);
346 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
347 struct gb_gpio_line *line = &ggc->lines[d->hwirq];
348
349 if (line->irq_type_pending) {
350 _gb_gpio_irq_set_type(ggc, d->hwirq, line->irq_type);
351 line->irq_type_pending = false;
352 }
353
354 if (line->masked_pending) {
355 if (line->masked)
356 _gb_gpio_irq_mask(ggc, d->hwirq);
357 else
358 _gb_gpio_irq_unmask(ggc, d->hwirq);
359 line->masked_pending = false;
360 }
361
362 mutex_unlock(&ggc->irq_lock);
363}
364
365static int gb_gpio_request_handler(struct gb_operation *op)
366{
367 struct gb_connection *connection = op->connection;
368 struct gb_gpio_controller *ggc = gb_connection_get_data(connection);
369 struct device *dev = &ggc->gbphy_dev->dev;
370 struct gb_message *request;
371 struct gb_gpio_irq_event_request *event;
372 u8 type = op->type;
373 int irq;
374 struct irq_desc *desc;
375
376 if (type != GB_GPIO_TYPE_IRQ_EVENT) {
377 dev_err(dev, "unsupported unsolicited request: %u\n", type);
378 return -EINVAL;
379 }
380
381 request = op->request;
382
383 if (request->payload_size < sizeof(*event)) {
384 dev_err(dev, "short event received (%zu < %zu)\n",
385 request->payload_size, sizeof(*event));
386 return -EINVAL;
387 }
388
389 event = request->payload;
390 if (event->which > ggc->line_max) {
391 dev_err(dev, "invalid hw irq: %d\n", event->which);
392 return -EINVAL;
393 }
394
395 irq = irq_find_mapping(ggc->irqdomain, event->which);
396 if (!irq) {
397 dev_err(dev, "failed to find IRQ\n");
398 return -EINVAL;
399 }
400 desc = irq_to_desc(irq);
401 if (!desc) {
402 dev_err(dev, "failed to look up irq\n");
403 return -EINVAL;
404 }
405
406 local_irq_disable();
407 generic_handle_irq_desc(desc);
408 local_irq_enable();
409
410 return 0;
411}
412
413static int gb_gpio_request(struct gpio_chip *chip, unsigned offset)
414{
415 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
416
417 return gb_gpio_activate_operation(ggc, (u8)offset);
418}
419
420static void gb_gpio_free(struct gpio_chip *chip, unsigned offset)
421{
422 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
423
424 gb_gpio_deactivate_operation(ggc, (u8)offset);
425}
426
427static int gb_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
428{
429 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
430 u8 which;
431 int ret;
432
433 which = (u8)offset;
434 ret = gb_gpio_get_direction_operation(ggc, which);
435 if (ret)
436 return ret;
437
438 return ggc->lines[which].direction ? 1 : 0;
439}
440
441static int gb_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
442{
443 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
444
445 return gb_gpio_direction_in_operation(ggc, (u8)offset);
446}
447
448static int gb_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
449 int value)
450{
451 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
452
453 return gb_gpio_direction_out_operation(ggc, (u8)offset, !!value);
454}
455
456static int gb_gpio_get(struct gpio_chip *chip, unsigned offset)
457{
458 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
459 u8 which;
460 int ret;
461
462 which = (u8)offset;
463 ret = gb_gpio_get_value_operation(ggc, which);
464 if (ret)
465 return ret;
466
467 return ggc->lines[which].value;
468}
469
470static void gb_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
471{
472 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
473
474 gb_gpio_set_value_operation(ggc, (u8)offset, !!value);
475}
476
477static int gb_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
478 unsigned debounce)
479{
480 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
481 u16 usec;
482
483 if (debounce > U16_MAX)
484 return -EINVAL;
485 usec = (u16)debounce;
486
487 return gb_gpio_set_debounce_operation(ggc, (u8)offset, usec);
488}
489
490static int gb_gpio_controller_setup(struct gb_gpio_controller *ggc)
491{
492 int ret;
493
494 /* Now find out how many lines there are */
495 ret = gb_gpio_line_count_operation(ggc);
496 if (ret)
497 return ret;
498
499 ggc->lines = kcalloc(ggc->line_max + 1, sizeof(*ggc->lines),
500 GFP_KERNEL);
501 if (!ggc->lines)
502 return -ENOMEM;
503
504 return ret;
505}
506
507/**
508 * gb_gpio_irq_map() - maps an IRQ into a GB gpio irqchip
509 * @d: the irqdomain used by this irqchip
510 * @irq: the global irq number used by this GB gpio irqchip irq
511 * @hwirq: the local IRQ/GPIO line offset on this GB gpio
512 *
513 * This function will set up the mapping for a certain IRQ line on a
514 * GB gpio by assigning the GB gpio as chip data, and using the irqchip
515 * stored inside the GB gpio.
516 */
517static int gb_gpio_irq_map(struct irq_domain *domain, unsigned int irq,
518 irq_hw_number_t hwirq)
519{
520 struct gpio_chip *chip = domain->host_data;
521 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
522
523 irq_set_chip_data(irq, ggc);
524 irq_set_chip_and_handler(irq, ggc->irqchip, ggc->irq_handler);
525 irq_set_noprobe(irq);
526 /*
527 * No set-up of the hardware will happen if IRQ_TYPE_NONE
528 * is passed as default type.
529 */
530 if (ggc->irq_default_type != IRQ_TYPE_NONE)
531 irq_set_irq_type(irq, ggc->irq_default_type);
532
533 return 0;
534}
535
536static void gb_gpio_irq_unmap(struct irq_domain *d, unsigned int irq)
537{
538 irq_set_chip_and_handler(irq, NULL, NULL);
539 irq_set_chip_data(irq, NULL);
540}
541
542static const struct irq_domain_ops gb_gpio_domain_ops = {
543 .map = gb_gpio_irq_map,
544 .unmap = gb_gpio_irq_unmap,
545};
546
547/**
548 * gb_gpio_irqchip_remove() - removes an irqchip added to a gb_gpio_controller
549 * @ggc: the gb_gpio_controller to remove the irqchip from
550 *
551 * This is called only from gb_gpio_remove()
552 */
553static void gb_gpio_irqchip_remove(struct gb_gpio_controller *ggc)
554{
555 unsigned int offset;
556
557 /* Remove all IRQ mappings and delete the domain */
558 if (ggc->irqdomain) {
559 for (offset = 0; offset < (ggc->line_max + 1); offset++)
560 irq_dispose_mapping(irq_find_mapping(ggc->irqdomain, offset));
561 irq_domain_remove(ggc->irqdomain);
562 }
563
564 if (ggc->irqchip) {
565 ggc->irqchip = NULL;
566 }
567}
568
569/**
570 * gb_gpio_irqchip_add() - adds an irqchip to a gpio chip
571 * @chip: the gpio chip to add the irqchip to
572 * @irqchip: the irqchip to add to the adapter
573 * @first_irq: if not dynamically assigned, the base (first) IRQ to
574 * allocate gpio irqs from
575 * @handler: the irq handler to use (often a predefined irq core function)
576 * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
577 * to have the core avoid setting up any default type in the hardware.
578 *
579 * This function closely associates a certain irqchip with a certain
580 * gpio chip, providing an irq domain to translate the local IRQs to
581 * global irqs, and making sure that the gpio chip
582 * is passed as chip data to all related functions. Driver callbacks
583 * need to use container_of() to get their local state containers back
584 * from the gpio chip passed as chip data. An irqdomain will be stored
585 * in the gpio chip that shall be used by the driver to handle IRQ number
586 * translation. The gpio chip will need to be initialized and registered
587 * before calling this function.
588 */
589static int gb_gpio_irqchip_add(struct gpio_chip *chip,
590 struct irq_chip *irqchip,
591 unsigned int first_irq,
592 irq_flow_handler_t handler,
593 unsigned int type)
594{
595 struct gb_gpio_controller *ggc;
596 unsigned int offset;
597 unsigned irq_base;
598
599 if (!chip || !irqchip)
600 return -EINVAL;
601
602 ggc = gpio_chip_to_gb_gpio_controller(chip);
603
604 ggc->irqchip = irqchip;
605 ggc->irq_handler = handler;
606 ggc->irq_default_type = type;
607 ggc->irqdomain = irq_domain_add_simple(NULL,
608 ggc->line_max + 1, first_irq,
609 &gb_gpio_domain_ops, chip);
610 if (!ggc->irqdomain) {
611 ggc->irqchip = NULL;
612 return -EINVAL;
613 }
614
615 /*
616 * Prepare the mapping since the irqchip shall be orthogonal to
617 * any gpio calls. If the first_irq was zero, this is
618 * necessary to allocate descriptors for all IRQs.
619 */
620 for (offset = 0; offset < (ggc->line_max + 1); offset++) {
621 irq_base = irq_create_mapping(ggc->irqdomain, offset);
622 if (offset == 0)
623 ggc->irq_base = irq_base;
624 }
625
626 return 0;
627}
628
629static int gb_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
630{
631 struct gb_gpio_controller *ggc = gpio_chip_to_gb_gpio_controller(chip);
632
633 return irq_find_mapping(ggc->irqdomain, offset);
634}
635
636static int gb_gpio_probe(struct gbphy_device *gbphy_dev,
637 const struct gbphy_device_id *id)
638{
639 struct gb_connection *connection;
640 struct gb_gpio_controller *ggc;
641 struct gpio_chip *gpio;
642 struct irq_chip *irqc;
643 int ret;
644
645 ggc = kzalloc(sizeof(*ggc), GFP_KERNEL);
646 if (!ggc)
647 return -ENOMEM;
648
649 connection = gb_connection_create(gbphy_dev->bundle,
650 le16_to_cpu(gbphy_dev->cport_desc->id),
651 gb_gpio_request_handler);
652 if (IS_ERR(connection)) {
653 ret = PTR_ERR(connection);
654 goto exit_ggc_free;
655 }
656
657 ggc->connection = connection;
658 gb_connection_set_data(connection, ggc);
659 ggc->gbphy_dev = gbphy_dev;
660 gb_gbphy_set_data(gbphy_dev, ggc);
661
662 ret = gb_connection_enable_tx(connection);
663 if (ret)
664 goto exit_connection_destroy;
665
666 ret = gb_gpio_controller_setup(ggc);
667 if (ret)
668 goto exit_connection_disable;
669
670 irqc = &ggc->irqc;
671 irqc->irq_mask = gb_gpio_irq_mask;
672 irqc->irq_unmask = gb_gpio_irq_unmask;
673 irqc->irq_set_type = gb_gpio_irq_set_type;
674 irqc->irq_bus_lock = gb_gpio_irq_bus_lock;
675 irqc->irq_bus_sync_unlock = gb_gpio_irq_bus_sync_unlock;
676 irqc->name = "greybus_gpio";
677
678 mutex_init(&ggc->irq_lock);
679
680 gpio = &ggc->chip;
681
682 gpio->label = "greybus_gpio";
683 gpio->parent = &gbphy_dev->dev;
684 gpio->owner = THIS_MODULE;
685
686 gpio->request = gb_gpio_request;
687 gpio->free = gb_gpio_free;
688 gpio->get_direction = gb_gpio_get_direction;
689 gpio->direction_input = gb_gpio_direction_input;
690 gpio->direction_output = gb_gpio_direction_output;
691 gpio->get = gb_gpio_get;
692 gpio->set = gb_gpio_set;
693 gpio->set_debounce = gb_gpio_set_debounce;
694 gpio->to_irq = gb_gpio_to_irq;
695 gpio->base = -1; /* Allocate base dynamically */
696 gpio->ngpio = ggc->line_max + 1;
697 gpio->can_sleep = true;
698
699 ret = gb_connection_enable(connection);
700 if (ret)
701 goto exit_line_free;
702
703 ret = gb_gpio_irqchip_add(gpio, irqc, 0,
704 handle_level_irq, IRQ_TYPE_NONE);
705 if (ret) {
706 dev_err(&connection->bundle->dev,
707 "failed to add irq chip: %d\n", ret);
708 goto exit_line_free;
709 }
710
711 ret = gpiochip_add(gpio);
712 if (ret) {
713 dev_err(&connection->bundle->dev,
714 "failed to add gpio chip: %d\n", ret);
715 goto exit_gpio_irqchip_remove;
716 }
717
718 gbphy_runtime_put_autosuspend(gbphy_dev);
719 return 0;
720
721exit_gpio_irqchip_remove:
722 gb_gpio_irqchip_remove(ggc);
723exit_line_free:
724 kfree(ggc->lines);
725exit_connection_disable:
726 gb_connection_disable(connection);
727exit_connection_destroy:
728 gb_connection_destroy(connection);
729exit_ggc_free:
730 kfree(ggc);
731 return ret;
732}
733
734static void gb_gpio_remove(struct gbphy_device *gbphy_dev)
735{
736 struct gb_gpio_controller *ggc = gb_gbphy_get_data(gbphy_dev);
737 struct gb_connection *connection = ggc->connection;
738 int ret;
739
740 ret = gbphy_runtime_get_sync(gbphy_dev);
741 if (ret)
742 gbphy_runtime_get_noresume(gbphy_dev);
743
744 gb_connection_disable_rx(connection);
745 gpiochip_remove(&ggc->chip);
746 gb_gpio_irqchip_remove(ggc);
747 gb_connection_disable(connection);
748 gb_connection_destroy(connection);
749 kfree(ggc->lines);
750 kfree(ggc);
751}
752
753static const struct gbphy_device_id gb_gpio_id_table[] = {
754 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_GPIO) },
755 { },
756};
757MODULE_DEVICE_TABLE(gbphy, gb_gpio_id_table);
758
759static struct gbphy_driver gpio_driver = {
760 .name = "gpio",
761 .probe = gb_gpio_probe,
762 .remove = gb_gpio_remove,
763 .id_table = gb_gpio_id_table,
764};
765
766module_gbphy_driver(gpio_driver);
767MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/greybus.h b/drivers/staging/greybus/greybus.h
new file mode 100644
index 000000000000..12526887ae2e
--- /dev/null
+++ b/drivers/staging/greybus/greybus.h
@@ -0,0 +1,154 @@
1/*
2 * Greybus driver and device API
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __LINUX_GREYBUS_H
11#define __LINUX_GREYBUS_H
12
13#ifdef __KERNEL__
14
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/list.h>
18#include <linux/slab.h>
19#include <linux/device.h>
20#include <linux/module.h>
21#include <linux/pm_runtime.h>
22#include <linux/idr.h>
23
24#include "greybus_id.h"
25#include "greybus_manifest.h"
26#include "greybus_protocols.h"
27#include "manifest.h"
28#include "hd.h"
29#include "svc.h"
30#include "control.h"
31#include "module.h"
32#include "interface.h"
33#include "bundle.h"
34#include "connection.h"
35#include "operation.h"
36#include "timesync.h"
37
38/* Matches up with the Greybus Protocol specification document */
39#define GREYBUS_VERSION_MAJOR 0x00
40#define GREYBUS_VERSION_MINOR 0x01
41
42#define GREYBUS_ID_MATCH_DEVICE \
43 (GREYBUS_ID_MATCH_VENDOR | GREYBUS_ID_MATCH_PRODUCT)
44
45#define GREYBUS_DEVICE(v, p) \
46 .match_flags = GREYBUS_ID_MATCH_DEVICE, \
47 .vendor = (v), \
48 .product = (p),
49
50#define GREYBUS_DEVICE_CLASS(c) \
51 .match_flags = GREYBUS_ID_MATCH_CLASS, \
52 .class = (c),
53
54/* Maximum number of CPorts */
55#define CPORT_ID_MAX 4095 /* UniPro max id is 4095 */
56#define CPORT_ID_BAD U16_MAX
57
58struct greybus_driver {
59 const char *name;
60
61 int (*probe)(struct gb_bundle *bundle,
62 const struct greybus_bundle_id *id);
63 void (*disconnect)(struct gb_bundle *bundle);
64
65 const struct greybus_bundle_id *id_table;
66
67 struct device_driver driver;
68};
69#define to_greybus_driver(d) container_of(d, struct greybus_driver, driver)
70
71static inline void greybus_set_drvdata(struct gb_bundle *bundle, void *data)
72{
73 dev_set_drvdata(&bundle->dev, data);
74}
75
76static inline void *greybus_get_drvdata(struct gb_bundle *bundle)
77{
78 return dev_get_drvdata(&bundle->dev);
79}
80
81/* Don't call these directly, use the module_greybus_driver() macro instead */
82int greybus_register_driver(struct greybus_driver *driver,
83 struct module *module, const char *mod_name);
84void greybus_deregister_driver(struct greybus_driver *driver);
85
86/* define to get proper THIS_MODULE and KBUILD_MODNAME values */
87#define greybus_register(driver) \
88 greybus_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
89#define greybus_deregister(driver) \
90 greybus_deregister_driver(driver)
91
92/**
93 * module_greybus_driver() - Helper macro for registering a Greybus driver
94 * @__greybus_driver: greybus_driver structure
95 *
96 * Helper macro for Greybus drivers to set up proper module init / exit
97 * functions. Replaces module_init() and module_exit() and keeps people from
98 * printing pointless things to the kernel log when their driver is loaded.
99 */
100#define module_greybus_driver(__greybus_driver) \
101 module_driver(__greybus_driver, greybus_register, greybus_deregister)
102
103int greybus_disabled(void);
104
105void gb_debugfs_init(void);
106void gb_debugfs_cleanup(void);
107struct dentry *gb_debugfs_get(void);
108
109extern struct bus_type greybus_bus_type;
110
111extern struct device_type greybus_hd_type;
112extern struct device_type greybus_module_type;
113extern struct device_type greybus_interface_type;
114extern struct device_type greybus_control_type;
115extern struct device_type greybus_bundle_type;
116extern struct device_type greybus_svc_type;
117
118static inline int is_gb_host_device(const struct device *dev)
119{
120 return dev->type == &greybus_hd_type;
121}
122
123static inline int is_gb_module(const struct device *dev)
124{
125 return dev->type == &greybus_module_type;
126}
127
128static inline int is_gb_interface(const struct device *dev)
129{
130 return dev->type == &greybus_interface_type;
131}
132
133static inline int is_gb_control(const struct device *dev)
134{
135 return dev->type == &greybus_control_type;
136}
137
138static inline int is_gb_bundle(const struct device *dev)
139{
140 return dev->type == &greybus_bundle_type;
141}
142
143static inline int is_gb_svc(const struct device *dev)
144{
145 return dev->type == &greybus_svc_type;
146}
147
148static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id)
149{
150 return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports;
151}
152
153#endif /* __KERNEL__ */
154#endif /* __LINUX_GREYBUS_H */
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
new file mode 100644
index 000000000000..4784ed98e8a3
--- /dev/null
+++ b/drivers/staging/greybus/greybus_authentication.h
@@ -0,0 +1,120 @@
1/*
2 * Greybus Component Authentication User Header
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Google Inc. All rights reserved.
10 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License version 2 for more details.
20 *
21 * BSD LICENSE
22 *
23 * Copyright(c) 2016 Google Inc. All rights reserved.
24 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 *
30 * * Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * * Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in
34 * the documentation and/or other materials provided with the
35 * distribution.
36 * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
37 * its contributors may be used to endorse or promote products
38 * derived from this software without specific prior written
39 * permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
45 * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
46 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
47 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
48 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
49 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifndef __GREYBUS_AUTHENTICATION_USER_H
55#define __GREYBUS_AUTHENTICATION_USER_H
56
57#include <linux/ioctl.h>
58#include <linux/types.h>
59
60#define CAP_CERTIFICATE_MAX_SIZE 1600
61#define CAP_SIGNATURE_MAX_SIZE 320
62
63/* Certificate class types */
64#define CAP_CERT_IMS_EAPC 0x00000001
65#define CAP_CERT_IMS_EASC 0x00000002
66#define CAP_CERT_IMS_EARC 0x00000003
67#define CAP_CERT_IMS_IAPC 0x00000004
68#define CAP_CERT_IMS_IASC 0x00000005
69#define CAP_CERT_IMS_IARC 0x00000006
70
71/* IMS Certificate response result codes */
72#define CAP_IMS_RESULT_CERT_FOUND 0x00
73#define CAP_IMS_RESULT_CERT_CLASS_INVAL 0x01
74#define CAP_IMS_RESULT_CERT_CORRUPT 0x02
75#define CAP_IMS_RESULT_CERT_NOT_FOUND 0x03
76
77/* Authentication types */
78#define CAP_AUTH_IMS_PRI 0x00000001
79#define CAP_AUTH_IMS_SEC 0x00000002
80#define CAP_AUTH_IMS_RSA 0x00000003
81
82/* Authenticate response result codes */
83#define CAP_AUTH_RESULT_CR_SUCCESS 0x00
84#define CAP_AUTH_RESULT_CR_BAD_TYPE 0x01
85#define CAP_AUTH_RESULT_CR_WRONG_EP 0x02
86#define CAP_AUTH_RESULT_CR_NO_KEY 0x03
87#define CAP_AUTH_RESULT_CR_SIG_FAIL 0x04
88
89
90/* IOCTL support */
91struct cap_ioc_get_endpoint_uid {
92 __u8 uid[8];
93} __attribute__ ((__packed__));
94
95struct cap_ioc_get_ims_certificate {
96 __u32 certificate_class;
97 __u32 certificate_id;
98
99 __u8 result_code;
100 __u32 cert_size;
101 __u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
102} __attribute__ ((__packed__));
103
104struct cap_ioc_authenticate {
105 __u32 auth_type;
106 __u8 uid[8];
107 __u8 challenge[32];
108
109 __u8 result_code;
110 __u8 response[64];
111 __u32 signature_size;
112 __u8 signature[CAP_SIGNATURE_MAX_SIZE];
113} __attribute__ ((__packed__));
114
115#define CAP_IOCTL_BASE 'C'
116#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
117#define CAP_IOC_GET_IMS_CERTIFICATE _IOWR(CAP_IOCTL_BASE, 1, struct cap_ioc_get_ims_certificate)
118#define CAP_IOC_AUTHENTICATE _IOWR(CAP_IOCTL_BASE, 2, struct cap_ioc_authenticate)
119
120#endif /* __GREYBUS_AUTHENTICATION_USER_H */
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
new file mode 100644
index 000000000000..277a2acce6fd
--- /dev/null
+++ b/drivers/staging/greybus/greybus_firmware.h
@@ -0,0 +1,120 @@
1/*
2 * Greybus Firmware Management User Header
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * Copyright(c) 2016 Google Inc. All rights reserved.
10 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License version 2 for more details.
20 *
21 * BSD LICENSE
22 *
23 * Copyright(c) 2016 Google Inc. All rights reserved.
24 * Copyright(c) 2016 Linaro Ltd. All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 *
30 * * Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * * Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in
34 * the documentation and/or other materials provided with the
35 * distribution.
36 * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
37 * its contributors may be used to endorse or promote products
38 * derived from this software without specific prior written
39 * permission.
40 *
41 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
42 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
43 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
44 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
45 * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
46 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
47 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
48 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
49 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
50 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
51 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
52 */
53
54#ifndef __GREYBUS_FIRMWARE_USER_H
55#define __GREYBUS_FIRMWARE_USER_H
56
57#include <linux/ioctl.h>
58#include <linux/types.h>
59
60#define GB_FIRMWARE_U_TAG_MAX_SIZE 10
61
62#define GB_FW_U_LOAD_METHOD_UNIPRO 0x01
63#define GB_FW_U_LOAD_METHOD_INTERNAL 0x02
64
65#define GB_FW_U_LOAD_STATUS_FAILED 0x00
66#define GB_FW_U_LOAD_STATUS_UNVALIDATED 0x01
67#define GB_FW_U_LOAD_STATUS_VALIDATED 0x02
68#define GB_FW_U_LOAD_STATUS_VALIDATION_FAILED 0x03
69
70#define GB_FW_U_BACKEND_FW_STATUS_SUCCESS 0x01
71#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FIND 0x02
72#define GB_FW_U_BACKEND_FW_STATUS_FAIL_FETCH 0x03
73#define GB_FW_U_BACKEND_FW_STATUS_FAIL_WRITE 0x04
74#define GB_FW_U_BACKEND_FW_STATUS_INT 0x05
75#define GB_FW_U_BACKEND_FW_STATUS_RETRY 0x06
76#define GB_FW_U_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
77
78#define GB_FW_U_BACKEND_VERSION_STATUS_SUCCESS 0x01
79#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
80#define GB_FW_U_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
81#define GB_FW_U_BACKEND_VERSION_STATUS_RETRY 0x04
82#define GB_FW_U_BACKEND_VERSION_STATUS_FAIL_INT 0x05
83
84/* IOCTL support */
85struct fw_mgmt_ioc_get_intf_version {
86 __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
87 __u16 major;
88 __u16 minor;
89} __attribute__ ((__packed__));
90
91struct fw_mgmt_ioc_get_backend_version {
92 __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
93 __u16 major;
94 __u16 minor;
95 __u8 status;
96} __attribute__ ((__packed__));
97
98struct fw_mgmt_ioc_intf_load_and_validate {
99 __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
100 __u8 load_method;
101 __u8 status;
102 __u16 major;
103 __u16 minor;
104} __attribute__ ((__packed__));
105
106struct fw_mgmt_ioc_backend_fw_update {
107 __u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
108 __u8 status;
109} __attribute__ ((__packed__));
110
111#define FW_MGMT_IOCTL_BASE 'F'
112#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
113#define FW_MGMT_IOC_GET_BACKEND_FW _IOWR(FW_MGMT_IOCTL_BASE, 1, struct fw_mgmt_ioc_get_backend_version)
114#define FW_MGMT_IOC_INTF_LOAD_AND_VALIDATE _IOWR(FW_MGMT_IOCTL_BASE, 2, struct fw_mgmt_ioc_intf_load_and_validate)
115#define FW_MGMT_IOC_INTF_BACKEND_FW_UPDATE _IOWR(FW_MGMT_IOCTL_BASE, 3, struct fw_mgmt_ioc_backend_fw_update)
116#define FW_MGMT_IOC_SET_TIMEOUT_MS _IOW(FW_MGMT_IOCTL_BASE, 4, unsigned int)
117#define FW_MGMT_IOC_MODE_SWITCH _IO(FW_MGMT_IOCTL_BASE, 5)
118
119#endif /* __GREYBUS_FIRMWARE_USER_H */
120
diff --git a/drivers/staging/greybus/greybus_id.h b/drivers/staging/greybus/greybus_id.h
new file mode 100644
index 000000000000..4bb1fc1b811d
--- /dev/null
+++ b/drivers/staging/greybus/greybus_id.h
@@ -0,0 +1,26 @@
1/* FIXME
2 * move this to include/linux/mod_devicetable.h when merging
3 */
4
5#ifndef __LINUX_GREYBUS_ID_H
6#define __LINUX_GREYBUS_ID_H
7
8#include <linux/types.h>
9#include <linux/mod_devicetable.h>
10
11
12struct greybus_bundle_id {
13 __u16 match_flags;
14 __u32 vendor;
15 __u32 product;
16 __u8 class;
17
18 kernel_ulong_t driver_info __aligned(sizeof(kernel_ulong_t));
19};
20
21/* Used to match the greybus_bundle_id */
22#define GREYBUS_ID_MATCH_VENDOR BIT(0)
23#define GREYBUS_ID_MATCH_PRODUCT BIT(1)
24#define GREYBUS_ID_MATCH_CLASS BIT(2)
25
26#endif /* __LINUX_GREYBUS_ID_H */
diff --git a/drivers/staging/greybus/greybus_manifest.h b/drivers/staging/greybus/greybus_manifest.h
new file mode 100644
index 000000000000..d135945cefe1
--- /dev/null
+++ b/drivers/staging/greybus/greybus_manifest.h
@@ -0,0 +1,177 @@
1/*
2 * Greybus manifest definition
3 *
4 * See "Greybus Application Protocol" document (version 0.1) for
5 * details on these values and structures.
6 *
7 * Copyright 2014-2015 Google Inc.
8 * Copyright 2014-2015 Linaro Ltd.
9 *
10 * Released under the GPLv2 and BSD licenses.
11 */
12
13#ifndef __GREYBUS_MANIFEST_H
14#define __GREYBUS_MANIFEST_H
15
16enum greybus_descriptor_type {
17 GREYBUS_TYPE_INVALID = 0x00,
18 GREYBUS_TYPE_INTERFACE = 0x01,
19 GREYBUS_TYPE_STRING = 0x02,
20 GREYBUS_TYPE_BUNDLE = 0x03,
21 GREYBUS_TYPE_CPORT = 0x04,
22};
23
24enum greybus_protocol {
25 GREYBUS_PROTOCOL_CONTROL = 0x00,
26 /* 0x01 is unused */
27 GREYBUS_PROTOCOL_GPIO = 0x02,
28 GREYBUS_PROTOCOL_I2C = 0x03,
29 GREYBUS_PROTOCOL_UART = 0x04,
30 GREYBUS_PROTOCOL_HID = 0x05,
31 GREYBUS_PROTOCOL_USB = 0x06,
32 GREYBUS_PROTOCOL_SDIO = 0x07,
33 GREYBUS_PROTOCOL_POWER_SUPPLY = 0x08,
34 GREYBUS_PROTOCOL_PWM = 0x09,
35 /* 0x0a is unused */
36 GREYBUS_PROTOCOL_SPI = 0x0b,
37 GREYBUS_PROTOCOL_DISPLAY = 0x0c,
38 GREYBUS_PROTOCOL_CAMERA_MGMT = 0x0d,
39 GREYBUS_PROTOCOL_SENSOR = 0x0e,
40 GREYBUS_PROTOCOL_LIGHTS = 0x0f,
41 GREYBUS_PROTOCOL_VIBRATOR = 0x10,
42 GREYBUS_PROTOCOL_LOOPBACK = 0x11,
43 GREYBUS_PROTOCOL_AUDIO_MGMT = 0x12,
44 GREYBUS_PROTOCOL_AUDIO_DATA = 0x13,
45 GREYBUS_PROTOCOL_SVC = 0x14,
46 GREYBUS_PROTOCOL_BOOTROM = 0x15,
47 GREYBUS_PROTOCOL_CAMERA_DATA = 0x16,
48 GREYBUS_PROTOCOL_FW_DOWNLOAD = 0x17,
49 GREYBUS_PROTOCOL_FW_MANAGEMENT = 0x18,
50 GREYBUS_PROTOCOL_AUTHENTICATION = 0x19,
51 GREYBUS_PROTOCOL_LOG = 0x1a,
52 /* ... */
53 GREYBUS_PROTOCOL_RAW = 0xfe,
54 GREYBUS_PROTOCOL_VENDOR = 0xff,
55};
56
57enum greybus_class_type {
58 GREYBUS_CLASS_CONTROL = 0x00,
59 /* 0x01 is unused */
60 /* 0x02 is unused */
61 /* 0x03 is unused */
62 /* 0x04 is unused */
63 GREYBUS_CLASS_HID = 0x05,
64 /* 0x06 is unused */
65 /* 0x07 is unused */
66 GREYBUS_CLASS_POWER_SUPPLY = 0x08,
67 /* 0x09 is unused */
68 GREYBUS_CLASS_BRIDGED_PHY = 0x0a,
69 /* 0x0b is unused */
70 GREYBUS_CLASS_DISPLAY = 0x0c,
71 GREYBUS_CLASS_CAMERA = 0x0d,
72 GREYBUS_CLASS_SENSOR = 0x0e,
73 GREYBUS_CLASS_LIGHTS = 0x0f,
74 GREYBUS_CLASS_VIBRATOR = 0x10,
75 GREYBUS_CLASS_LOOPBACK = 0x11,
76 GREYBUS_CLASS_AUDIO = 0x12,
77 /* 0x13 is unused */
78 /* 0x14 is unused */
79 GREYBUS_CLASS_BOOTROM = 0x15,
80 GREYBUS_CLASS_FW_MANAGEMENT = 0x16,
81 GREYBUS_CLASS_LOG = 0x17,
82 /* ... */
83 GREYBUS_CLASS_RAW = 0xfe,
84 GREYBUS_CLASS_VENDOR = 0xff,
85};
86
87enum {
88 GREYBUS_INTERFACE_FEATURE_TIMESYNC = BIT(0),
89};
90
91/*
92 * The string in a string descriptor is not NUL-terminated. The
93 * size of the descriptor will be rounded up to a multiple of 4
94 * bytes, by padding the string with 0x00 bytes if necessary.
95 */
96struct greybus_descriptor_string {
97 __u8 length;
98 __u8 id;
99 __u8 string[0];
100} __packed;
101
102/*
103 * An interface descriptor describes information about an interface as a whole,
104 * *not* the functions within it.
105 */
106struct greybus_descriptor_interface {
107 __u8 vendor_stringid;
108 __u8 product_stringid;
109 __u8 features;
110 __u8 pad;
111} __packed;
112
113/*
114 * An bundle descriptor defines an identification number and a class for
115 * each bundle.
116 *
117 * @id: Uniquely identifies a bundle within a interface, its sole purpose is to
118 * allow CPort descriptors to specify which bundle they are associated with.
119 * The first bundle will have id 0, second will have 1 and so on.
120 *
121 * The largest CPort id associated with an bundle (defined by a
122 * CPort descriptor in the manifest) is used to determine how to
123 * encode the device id and module number in UniPro packets
124 * that use the bundle.
125 *
126 * @class: It is used by kernel to know the functionality provided by the
127 * bundle and will be matched against drivers functinality while probing greybus
128 * driver. It should contain one of the values defined in
129 * 'enum greybus_class_type'.
130 *
131 */
132struct greybus_descriptor_bundle {
133 __u8 id; /* interface-relative id (0..) */
134 __u8 class;
135 __u8 pad[2];
136} __packed;
137
138/*
139 * A CPort descriptor indicates the id of the bundle within the
140 * module it's associated with, along with the CPort id used to
141 * address the CPort. The protocol id defines the format of messages
142 * exchanged using the CPort.
143 */
144struct greybus_descriptor_cport {
145 __le16 id;
146 __u8 bundle;
147 __u8 protocol_id; /* enum greybus_protocol */
148} __packed;
149
150struct greybus_descriptor_header {
151 __le16 size;
152 __u8 type; /* enum greybus_descriptor_type */
153 __u8 pad;
154} __packed;
155
156struct greybus_descriptor {
157 struct greybus_descriptor_header header;
158 union {
159 struct greybus_descriptor_string string;
160 struct greybus_descriptor_interface interface;
161 struct greybus_descriptor_bundle bundle;
162 struct greybus_descriptor_cport cport;
163 };
164} __packed;
165
166struct greybus_manifest_header {
167 __le16 size;
168 __u8 version_major;
169 __u8 version_minor;
170} __packed;
171
172struct greybus_manifest {
173 struct greybus_manifest_header header;
174 struct greybus_descriptor descriptors[0];
175} __packed;
176
177#endif /* __GREYBUS_MANIFEST_H */
diff --git a/drivers/staging/greybus/greybus_protocols.h b/drivers/staging/greybus/greybus_protocols.h
new file mode 100644
index 000000000000..639578309c2a
--- /dev/null
+++ b/drivers/staging/greybus/greybus_protocols.h
@@ -0,0 +1,2268 @@
1/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
8 * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License version 2 for more details.
18 *
19 * BSD LICENSE
20 *
21 * Copyright(c) 2014 - 2015 Google Inc. All rights reserved.
22 * Copyright(c) 2014 - 2015 Linaro Ltd. All rights reserved.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 *
28 * * Redistributions of source code must retain the above copyright
29 * notice, this list of conditions and the following disclaimer.
30 * * Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in
32 * the documentation and/or other materials provided with the
33 * distribution.
34 * * Neither the name of Google Inc. or Linaro Ltd. nor the names of
35 * its contributors may be used to endorse or promote products
36 * derived from this software without specific prior written
37 * permission.
38 *
39 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
40 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
41 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
42 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GOOGLE INC. OR
43 * LINARO LTD. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
44 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
45 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
46 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
47 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
48 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
49 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
50 */
51
52#ifndef __GREYBUS_PROTOCOLS_H
53#define __GREYBUS_PROTOCOLS_H
54
55/* Fixed IDs for control/svc protocols */
56
57/* SVC switch-port device ids */
58#define GB_SVC_DEVICE_ID_SVC 0
59#define GB_SVC_DEVICE_ID_AP 1
60#define GB_SVC_DEVICE_ID_MIN 2
61#define GB_SVC_DEVICE_ID_MAX 31
62
63#define GB_SVC_CPORT_ID 0
64#define GB_CONTROL_BUNDLE_ID 0
65#define GB_CONTROL_CPORT_ID 0
66
67
68/*
69 * All operation messages (both requests and responses) begin with
70 * a header that encodes the size of the message (header included).
71 * This header also contains a unique identifier, that associates a
72 * response message with its operation. The header contains an
73 * operation type field, whose interpretation is dependent on what
74 * type of protocol is used over the connection. The high bit
75 * (0x80) of the operation type field is used to indicate whether
76 * the message is a request (clear) or a response (set).
77 *
78 * Response messages include an additional result byte, which
79 * communicates the result of the corresponding request. A zero
80 * result value means the operation completed successfully. Any
81 * other value indicates an error; in this case, the payload of the
82 * response message (if any) is ignored. The result byte must be
83 * zero in the header for a request message.
84 *
85 * The wire format for all numeric fields in the header is little
86 * endian. Any operation-specific data begins immediately after the
87 * header.
88 */
89struct gb_operation_msg_hdr {
90 __le16 size; /* Size in bytes of header + payload */
91 __le16 operation_id; /* Operation unique id */
92 __u8 type; /* E.g GB_I2C_TYPE_* or GB_GPIO_TYPE_* */
93 __u8 result; /* Result of request (in responses only) */
94 __u8 pad[2]; /* must be zero (ignore when read) */
95} __packed;
96
97
98/* Generic request types */
99#define GB_REQUEST_TYPE_CPORT_SHUTDOWN 0x00
100#define GB_REQUEST_TYPE_INVALID 0x7f
101
102struct gb_cport_shutdown_request {
103 __u8 phase;
104} __packed;
105
106
107/* Control Protocol */
108
109/* Greybus control request types */
110#define GB_CONTROL_TYPE_VERSION 0x01
111#define GB_CONTROL_TYPE_PROBE_AP 0x02
112#define GB_CONTROL_TYPE_GET_MANIFEST_SIZE 0x03
113#define GB_CONTROL_TYPE_GET_MANIFEST 0x04
114#define GB_CONTROL_TYPE_CONNECTED 0x05
115#define GB_CONTROL_TYPE_DISCONNECTED 0x06
116#define GB_CONTROL_TYPE_TIMESYNC_ENABLE 0x07
117#define GB_CONTROL_TYPE_TIMESYNC_DISABLE 0x08
118#define GB_CONTROL_TYPE_TIMESYNC_AUTHORITATIVE 0x09
119/* Unused 0x0a */
120#define GB_CONTROL_TYPE_BUNDLE_VERSION 0x0b
121#define GB_CONTROL_TYPE_DISCONNECTING 0x0c
122#define GB_CONTROL_TYPE_TIMESYNC_GET_LAST_EVENT 0x0d
123#define GB_CONTROL_TYPE_MODE_SWITCH 0x0e
124#define GB_CONTROL_TYPE_BUNDLE_SUSPEND 0x0f
125#define GB_CONTROL_TYPE_BUNDLE_RESUME 0x10
126#define GB_CONTROL_TYPE_BUNDLE_DEACTIVATE 0x11
127#define GB_CONTROL_TYPE_BUNDLE_ACTIVATE 0x12
128#define GB_CONTROL_TYPE_INTF_SUSPEND_PREPARE 0x13
129#define GB_CONTROL_TYPE_INTF_DEACTIVATE_PREPARE 0x14
130#define GB_CONTROL_TYPE_INTF_HIBERNATE_ABORT 0x15
131
132struct gb_control_version_request {
133 __u8 major;
134 __u8 minor;
135} __packed;
136
137struct gb_control_version_response {
138 __u8 major;
139 __u8 minor;
140} __packed;
141
142struct gb_control_bundle_version_request {
143 __u8 bundle_id;
144} __packed;
145
146struct gb_control_bundle_version_response {
147 __u8 major;
148 __u8 minor;
149} __packed;
150
151/* Control protocol manifest get size request has no payload*/
152struct gb_control_get_manifest_size_response {
153 __le16 size;
154} __packed;
155
156/* Control protocol manifest get request has no payload */
157struct gb_control_get_manifest_response {
158 __u8 data[0];
159} __packed;
160
161/* Control protocol [dis]connected request */
162struct gb_control_connected_request {
163 __le16 cport_id;
164} __packed;
165
166struct gb_control_disconnecting_request {
167 __le16 cport_id;
168} __packed;
169/* disconnecting response has no payload */
170
171struct gb_control_disconnected_request {
172 __le16 cport_id;
173} __packed;
174/* Control protocol [dis]connected response has no payload */
175
176#define GB_TIMESYNC_MAX_STROBES 0x04
177
178struct gb_control_timesync_enable_request {
179 __u8 count;
180 __le64 frame_time;
181 __le32 strobe_delay;
182 __le32 refclk;
183} __packed;
184/* timesync enable response has no payload */
185
186struct gb_control_timesync_authoritative_request {
187 __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
188} __packed;
189/* timesync authoritative response has no payload */
190
191/* timesync get_last_event_request has no payload */
192struct gb_control_timesync_get_last_event_response {
193 __le64 frame_time;
194} __packed;
195
196/*
197 * All Bundle power management operations use the same request and response
198 * layout and status codes.
199 */
200
201#define GB_CONTROL_BUNDLE_PM_OK 0x00
202#define GB_CONTROL_BUNDLE_PM_INVAL 0x01
203#define GB_CONTROL_BUNDLE_PM_BUSY 0x02
204#define GB_CONTROL_BUNDLE_PM_FAIL 0x03
205#define GB_CONTROL_BUNDLE_PM_NA 0x04
206
207struct gb_control_bundle_pm_request {
208 __u8 bundle_id;
209} __packed;
210
211struct gb_control_bundle_pm_response {
212 __u8 status;
213} __packed;
214
215/*
216 * Interface Suspend Prepare and Deactivate Prepare operations use the same
217 * response layout and error codes. Define a single response structure and reuse
218 * it. Both operations have no payload.
219 */
220
221#define GB_CONTROL_INTF_PM_OK 0x00
222#define GB_CONTROL_INTF_PM_BUSY 0x01
223#define GB_CONTROL_INTF_PM_NA 0x02
224
225struct gb_control_intf_pm_response {
226 __u8 status;
227} __packed;
228
229/* APBridge protocol */
230
231/* request APB1 log */
232#define GB_APB_REQUEST_LOG 0x02
233
234/* request to map a cport to bulk in and bulk out endpoints */
235#define GB_APB_REQUEST_EP_MAPPING 0x03
236
237/* request to get the number of cports available */
238#define GB_APB_REQUEST_CPORT_COUNT 0x04
239
240/* request to reset a cport state */
241#define GB_APB_REQUEST_RESET_CPORT 0x05
242
243/* request to time the latency of messages on a given cport */
244#define GB_APB_REQUEST_LATENCY_TAG_EN 0x06
245#define GB_APB_REQUEST_LATENCY_TAG_DIS 0x07
246
247/* request to control the CSI transmitter */
248#define GB_APB_REQUEST_CSI_TX_CONTROL 0x08
249
250/* request to control audio streaming */
251#define GB_APB_REQUEST_AUDIO_CONTROL 0x09
252
253/* TimeSync requests */
254#define GB_APB_REQUEST_TIMESYNC_ENABLE 0x0d
255#define GB_APB_REQUEST_TIMESYNC_DISABLE 0x0e
256#define GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE 0x0f
257#define GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT 0x10
258
259/* requests to set Greybus CPort flags */
260#define GB_APB_REQUEST_CPORT_FLAGS 0x11
261
262/* ARPC request */
263#define GB_APB_REQUEST_ARPC_RUN 0x12
264
265struct gb_apb_request_cport_flags {
266 __le32 flags;
267#define GB_APB_CPORT_FLAG_CONTROL 0x01
268#define GB_APB_CPORT_FLAG_HIGH_PRIO 0x02
269} __packed;
270
271
272/* Firmware Download Protocol */
273
274/* Request Types */
275#define GB_FW_DOWNLOAD_TYPE_FIND_FIRMWARE 0x01
276#define GB_FW_DOWNLOAD_TYPE_FETCH_FIRMWARE 0x02
277#define GB_FW_DOWNLOAD_TYPE_RELEASE_FIRMWARE 0x03
278
279#define GB_FIRMWARE_TAG_MAX_SIZE 10
280
281/* firmware download find firmware request/response */
282struct gb_fw_download_find_firmware_request {
283 __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
284} __packed;
285
286struct gb_fw_download_find_firmware_response {
287 __u8 firmware_id;
288 __le32 size;
289} __packed;
290
291/* firmware download fetch firmware request/response */
292struct gb_fw_download_fetch_firmware_request {
293 __u8 firmware_id;
294 __le32 offset;
295 __le32 size;
296} __packed;
297
298struct gb_fw_download_fetch_firmware_response {
299 __u8 data[0];
300} __packed;
301
302/* firmware download release firmware request */
303struct gb_fw_download_release_firmware_request {
304 __u8 firmware_id;
305} __packed;
306/* firmware download release firmware response has no payload */
307
308
309/* Firmware Management Protocol */
310
311/* Request Types */
312#define GB_FW_MGMT_TYPE_INTERFACE_FW_VERSION 0x01
313#define GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW 0x02
314#define GB_FW_MGMT_TYPE_LOADED_FW 0x03
315#define GB_FW_MGMT_TYPE_BACKEND_FW_VERSION 0x04
316#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE 0x05
317#define GB_FW_MGMT_TYPE_BACKEND_FW_UPDATED 0x06
318
319#define GB_FW_LOAD_METHOD_UNIPRO 0x01
320#define GB_FW_LOAD_METHOD_INTERNAL 0x02
321
322#define GB_FW_LOAD_STATUS_FAILED 0x00
323#define GB_FW_LOAD_STATUS_UNVALIDATED 0x01
324#define GB_FW_LOAD_STATUS_VALIDATED 0x02
325#define GB_FW_LOAD_STATUS_VALIDATION_FAILED 0x03
326
327#define GB_FW_BACKEND_FW_STATUS_SUCCESS 0x01
328#define GB_FW_BACKEND_FW_STATUS_FAIL_FIND 0x02
329#define GB_FW_BACKEND_FW_STATUS_FAIL_FETCH 0x03
330#define GB_FW_BACKEND_FW_STATUS_FAIL_WRITE 0x04
331#define GB_FW_BACKEND_FW_STATUS_INT 0x05
332#define GB_FW_BACKEND_FW_STATUS_RETRY 0x06
333#define GB_FW_BACKEND_FW_STATUS_NOT_SUPPORTED 0x07
334
335#define GB_FW_BACKEND_VERSION_STATUS_SUCCESS 0x01
336#define GB_FW_BACKEND_VERSION_STATUS_NOT_AVAILABLE 0x02
337#define GB_FW_BACKEND_VERSION_STATUS_NOT_SUPPORTED 0x03
338#define GB_FW_BACKEND_VERSION_STATUS_RETRY 0x04
339#define GB_FW_BACKEND_VERSION_STATUS_FAIL_INT 0x05
340
341/* firmware management interface firmware version request has no payload */
342struct gb_fw_mgmt_interface_fw_version_response {
343 __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
344 __le16 major;
345 __le16 minor;
346} __packed;
347
348/* firmware management load and validate firmware request/response */
349struct gb_fw_mgmt_load_and_validate_fw_request {
350 __u8 request_id;
351 __u8 load_method;
352 __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
353} __packed;
354/* firmware management load and validate firmware response has no payload*/
355
356/* firmware management loaded firmware request */
357struct gb_fw_mgmt_loaded_fw_request {
358 __u8 request_id;
359 __u8 status;
360 __le16 major;
361 __le16 minor;
362} __packed;
363/* firmware management loaded firmware response has no payload */
364
365/* firmware management backend firmware version request/response */
366struct gb_fw_mgmt_backend_fw_version_request {
367 __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
368} __packed;
369
370struct gb_fw_mgmt_backend_fw_version_response {
371 __le16 major;
372 __le16 minor;
373 __u8 status;
374} __packed;
375
376/* firmware management backend firmware update request */
377struct gb_fw_mgmt_backend_fw_update_request {
378 __u8 request_id;
379 __u8 firmware_tag[GB_FIRMWARE_TAG_MAX_SIZE];
380} __packed;
381/* firmware management backend firmware update response has no payload */
382
383/* firmware management backend firmware updated request */
384struct gb_fw_mgmt_backend_fw_updated_request {
385 __u8 request_id;
386 __u8 status;
387} __packed;
388/* firmware management backend firmware updated response has no payload */
389
390
391/* Component Authentication Protocol (CAP) */
392
393/* Request Types */
394#define GB_CAP_TYPE_GET_ENDPOINT_UID 0x01
395#define GB_CAP_TYPE_GET_IMS_CERTIFICATE 0x02
396#define GB_CAP_TYPE_AUTHENTICATE 0x03
397
398/* CAP get endpoint uid request has no payload */
399struct gb_cap_get_endpoint_uid_response {
400 __u8 uid[8];
401} __packed;
402
403/* CAP get endpoint ims certificate request/response */
404struct gb_cap_get_ims_certificate_request {
405 __le32 certificate_class;
406 __le32 certificate_id;
407} __packed;
408
409struct gb_cap_get_ims_certificate_response {
410 __u8 result_code;
411 __u8 certificate[0];
412} __packed;
413
414/* CAP authenticate request/response */
415struct gb_cap_authenticate_request {
416 __le32 auth_type;
417 __u8 uid[8];
418 __u8 challenge[32];
419} __packed;
420
421struct gb_cap_authenticate_response {
422 __u8 result_code;
423 __u8 response[64];
424 __u8 signature[0];
425} __packed;
426
427
428/* Bootrom Protocol */
429
430/* Version of the Greybus bootrom protocol we support */
431#define GB_BOOTROM_VERSION_MAJOR 0x00
432#define GB_BOOTROM_VERSION_MINOR 0x01
433
434/* Greybus bootrom request types */
435#define GB_BOOTROM_TYPE_VERSION 0x01
436#define GB_BOOTROM_TYPE_FIRMWARE_SIZE 0x02
437#define GB_BOOTROM_TYPE_GET_FIRMWARE 0x03
438#define GB_BOOTROM_TYPE_READY_TO_BOOT 0x04
439#define GB_BOOTROM_TYPE_AP_READY 0x05 /* Request with no-payload */
440#define GB_BOOTROM_TYPE_GET_VID_PID 0x06 /* Request with no-payload */
441
442/* Greybus bootrom boot stages */
443#define GB_BOOTROM_BOOT_STAGE_ONE 0x01 /* Reserved for the boot ROM */
444#define GB_BOOTROM_BOOT_STAGE_TWO 0x02 /* Bootrom package to be loaded by the boot ROM */
445#define GB_BOOTROM_BOOT_STAGE_THREE 0x03 /* Module personality package loaded by Stage 2 firmware */
446
447/* Greybus bootrom ready to boot status */
448#define GB_BOOTROM_BOOT_STATUS_INVALID 0x00 /* Firmware blob could not be validated */
449#define GB_BOOTROM_BOOT_STATUS_INSECURE 0x01 /* Firmware blob is valid but insecure */
450#define GB_BOOTROM_BOOT_STATUS_SECURE 0x02 /* Firmware blob is valid and secure */
451
452/* Max bootrom data fetch size in bytes */
453#define GB_BOOTROM_FETCH_MAX 2000
454
455struct gb_bootrom_version_request {
456 __u8 major;
457 __u8 minor;
458} __packed;
459
460struct gb_bootrom_version_response {
461 __u8 major;
462 __u8 minor;
463} __packed;
464
465/* Bootrom protocol firmware size request/response */
466struct gb_bootrom_firmware_size_request {
467 __u8 stage;
468} __packed;
469
470struct gb_bootrom_firmware_size_response {
471 __le32 size;
472} __packed;
473
474/* Bootrom protocol get firmware request/response */
475struct gb_bootrom_get_firmware_request {
476 __le32 offset;
477 __le32 size;
478} __packed;
479
480struct gb_bootrom_get_firmware_response {
481 __u8 data[0];
482} __packed;
483
484/* Bootrom protocol Ready to boot request */
485struct gb_bootrom_ready_to_boot_request {
486 __u8 status;
487} __packed;
488/* Bootrom protocol Ready to boot response has no payload */
489
490/* Bootrom protocol get VID/PID request has no payload */
491struct gb_bootrom_get_vid_pid_response {
492 __le32 vendor_id;
493 __le32 product_id;
494} __packed;
495
496
497/* Power Supply */
498
499/* Greybus power supply request types */
500#define GB_POWER_SUPPLY_TYPE_GET_SUPPLIES 0x02
501#define GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION 0x03
502#define GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS 0x04
503#define GB_POWER_SUPPLY_TYPE_GET_PROPERTY 0x05
504#define GB_POWER_SUPPLY_TYPE_SET_PROPERTY 0x06
505#define GB_POWER_SUPPLY_TYPE_EVENT 0x07
506
507/* Greybus power supply battery technologies types */
508#define GB_POWER_SUPPLY_TECH_UNKNOWN 0x0000
509#define GB_POWER_SUPPLY_TECH_NiMH 0x0001
510#define GB_POWER_SUPPLY_TECH_LION 0x0002
511#define GB_POWER_SUPPLY_TECH_LIPO 0x0003
512#define GB_POWER_SUPPLY_TECH_LiFe 0x0004
513#define GB_POWER_SUPPLY_TECH_NiCd 0x0005
514#define GB_POWER_SUPPLY_TECH_LiMn 0x0006
515
516/* Greybus power supply types */
517#define GB_POWER_SUPPLY_UNKNOWN_TYPE 0x0000
518#define GB_POWER_SUPPLY_BATTERY_TYPE 0x0001
519#define GB_POWER_SUPPLY_UPS_TYPE 0x0002
520#define GB_POWER_SUPPLY_MAINS_TYPE 0x0003
521#define GB_POWER_SUPPLY_USB_TYPE 0x0004
522#define GB_POWER_SUPPLY_USB_DCP_TYPE 0x0005
523#define GB_POWER_SUPPLY_USB_CDP_TYPE 0x0006
524#define GB_POWER_SUPPLY_USB_ACA_TYPE 0x0007
525
526/* Greybus power supply health values */
527#define GB_POWER_SUPPLY_HEALTH_UNKNOWN 0x0000
528#define GB_POWER_SUPPLY_HEALTH_GOOD 0x0001
529#define GB_POWER_SUPPLY_HEALTH_OVERHEAT 0x0002
530#define GB_POWER_SUPPLY_HEALTH_DEAD 0x0003
531#define GB_POWER_SUPPLY_HEALTH_OVERVOLTAGE 0x0004
532#define GB_POWER_SUPPLY_HEALTH_UNSPEC_FAILURE 0x0005
533#define GB_POWER_SUPPLY_HEALTH_COLD 0x0006
534#define GB_POWER_SUPPLY_HEALTH_WATCHDOG_TIMER_EXPIRE 0x0007
535#define GB_POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE 0x0008
536
537/* Greybus power supply status values */
538#define GB_POWER_SUPPLY_STATUS_UNKNOWN 0x0000
539#define GB_POWER_SUPPLY_STATUS_CHARGING 0x0001
540#define GB_POWER_SUPPLY_STATUS_DISCHARGING 0x0002
541#define GB_POWER_SUPPLY_STATUS_NOT_CHARGING 0x0003
542#define GB_POWER_SUPPLY_STATUS_FULL 0x0004
543
544/* Greybus power supply capacity level values */
545#define GB_POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN 0x0000
546#define GB_POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL 0x0001
547#define GB_POWER_SUPPLY_CAPACITY_LEVEL_LOW 0x0002
548#define GB_POWER_SUPPLY_CAPACITY_LEVEL_NORMAL 0x0003
549#define GB_POWER_SUPPLY_CAPACITY_LEVEL_HIGH 0x0004
550#define GB_POWER_SUPPLY_CAPACITY_LEVEL_FULL 0x0005
551
552/* Greybus power supply scope values */
553#define GB_POWER_SUPPLY_SCOPE_UNKNOWN 0x0000
554#define GB_POWER_SUPPLY_SCOPE_SYSTEM 0x0001
555#define GB_POWER_SUPPLY_SCOPE_DEVICE 0x0002
556
557struct gb_power_supply_get_supplies_response {
558 __u8 supplies_count;
559} __packed;
560
561struct gb_power_supply_get_description_request {
562 __u8 psy_id;
563} __packed;
564
565struct gb_power_supply_get_description_response {
566 __u8 manufacturer[32];
567 __u8 model[32];
568 __u8 serial_number[32];
569 __le16 type;
570 __u8 properties_count;
571} __packed;
572
573struct gb_power_supply_props_desc {
574 __u8 property;
575#define GB_POWER_SUPPLY_PROP_STATUS 0x00
576#define GB_POWER_SUPPLY_PROP_CHARGE_TYPE 0x01
577#define GB_POWER_SUPPLY_PROP_HEALTH 0x02
578#define GB_POWER_SUPPLY_PROP_PRESENT 0x03
579#define GB_POWER_SUPPLY_PROP_ONLINE 0x04
580#define GB_POWER_SUPPLY_PROP_AUTHENTIC 0x05
581#define GB_POWER_SUPPLY_PROP_TECHNOLOGY 0x06
582#define GB_POWER_SUPPLY_PROP_CYCLE_COUNT 0x07
583#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX 0x08
584#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN 0x09
585#define GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN 0x0A
586#define GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN 0x0B
587#define GB_POWER_SUPPLY_PROP_VOLTAGE_NOW 0x0C
588#define GB_POWER_SUPPLY_PROP_VOLTAGE_AVG 0x0D
589#define GB_POWER_SUPPLY_PROP_VOLTAGE_OCV 0x0E
590#define GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT 0x0F
591#define GB_POWER_SUPPLY_PROP_CURRENT_MAX 0x10
592#define GB_POWER_SUPPLY_PROP_CURRENT_NOW 0x11
593#define GB_POWER_SUPPLY_PROP_CURRENT_AVG 0x12
594#define GB_POWER_SUPPLY_PROP_CURRENT_BOOT 0x13
595#define GB_POWER_SUPPLY_PROP_POWER_NOW 0x14
596#define GB_POWER_SUPPLY_PROP_POWER_AVG 0x15
597#define GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN 0x16
598#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN 0x17
599#define GB_POWER_SUPPLY_PROP_CHARGE_FULL 0x18
600#define GB_POWER_SUPPLY_PROP_CHARGE_EMPTY 0x19
601#define GB_POWER_SUPPLY_PROP_CHARGE_NOW 0x1A
602#define GB_POWER_SUPPLY_PROP_CHARGE_AVG 0x1B
603#define GB_POWER_SUPPLY_PROP_CHARGE_COUNTER 0x1C
604#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT 0x1D
605#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX 0x1E
606#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE 0x1F
607#define GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX 0x20
608#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT 0x21
609#define GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX 0x22
610#define GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT 0x23
611#define GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN 0x24
612#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN 0x25
613#define GB_POWER_SUPPLY_PROP_ENERGY_FULL 0x26
614#define GB_POWER_SUPPLY_PROP_ENERGY_EMPTY 0x27
615#define GB_POWER_SUPPLY_PROP_ENERGY_NOW 0x28
616#define GB_POWER_SUPPLY_PROP_ENERGY_AVG 0x29
617#define GB_POWER_SUPPLY_PROP_CAPACITY 0x2A
618#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN 0x2B
619#define GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX 0x2C
620#define GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL 0x2D
621#define GB_POWER_SUPPLY_PROP_TEMP 0x2E
622#define GB_POWER_SUPPLY_PROP_TEMP_MAX 0x2F
623#define GB_POWER_SUPPLY_PROP_TEMP_MIN 0x30
624#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN 0x31
625#define GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX 0x32
626#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT 0x33
627#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN 0x34
628#define GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX 0x35
629#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW 0x36
630#define GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG 0x37
631#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW 0x38
632#define GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG 0x39
633#define GB_POWER_SUPPLY_PROP_TYPE 0x3A
634#define GB_POWER_SUPPLY_PROP_SCOPE 0x3B
635#define GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT 0x3C
636#define GB_POWER_SUPPLY_PROP_CALIBRATE 0x3D
637 __u8 is_writeable;
638} __packed;
639
640struct gb_power_supply_get_property_descriptors_request {
641 __u8 psy_id;
642} __packed;
643
644struct gb_power_supply_get_property_descriptors_response {
645 __u8 properties_count;
646 struct gb_power_supply_props_desc props[];
647} __packed;
648
649struct gb_power_supply_get_property_request {
650 __u8 psy_id;
651 __u8 property;
652} __packed;
653
654struct gb_power_supply_get_property_response {
655 __le32 prop_val;
656};
657
658struct gb_power_supply_set_property_request {
659 __u8 psy_id;
660 __u8 property;
661 __le32 prop_val;
662} __packed;
663
664struct gb_power_supply_event_request {
665 __u8 psy_id;
666 __u8 event;
667#define GB_POWER_SUPPLY_UPDATE 0x01
668} __packed;
669
670
671/* HID */
672
673/* Greybus HID operation types */
674#define GB_HID_TYPE_GET_DESC 0x02
675#define GB_HID_TYPE_GET_REPORT_DESC 0x03
676#define GB_HID_TYPE_PWR_ON 0x04
677#define GB_HID_TYPE_PWR_OFF 0x05
678#define GB_HID_TYPE_GET_REPORT 0x06
679#define GB_HID_TYPE_SET_REPORT 0x07
680#define GB_HID_TYPE_IRQ_EVENT 0x08
681
682/* Report type */
683#define GB_HID_INPUT_REPORT 0
684#define GB_HID_OUTPUT_REPORT 1
685#define GB_HID_FEATURE_REPORT 2
686
687/* Different request/response structures */
688/* HID get descriptor response */
689struct gb_hid_desc_response {
690 __u8 bLength;
691 __le16 wReportDescLength;
692 __le16 bcdHID;
693 __le16 wProductID;
694 __le16 wVendorID;
695 __u8 bCountryCode;
696} __packed;
697
698/* HID get report request/response */
699struct gb_hid_get_report_request {
700 __u8 report_type;
701 __u8 report_id;
702} __packed;
703
704/* HID set report request */
705struct gb_hid_set_report_request {
706 __u8 report_type;
707 __u8 report_id;
708 __u8 report[0];
709} __packed;
710
711/* HID input report request, via interrupt pipe */
712struct gb_hid_input_report_request {
713 __u8 report[0];
714} __packed;
715
716
717/* I2C */
718
719/* Greybus i2c request types */
720#define GB_I2C_TYPE_FUNCTIONALITY 0x02
721#define GB_I2C_TYPE_TRANSFER 0x05
722
723/* functionality request has no payload */
724struct gb_i2c_functionality_response {
725 __le32 functionality;
726} __packed;
727
728/*
729 * Outgoing data immediately follows the op count and ops array.
730 * The data for each write (master -> slave) op in the array is sent
731 * in order, with no (e.g. pad) bytes separating them.
732 *
733 * Short reads cause the entire transfer request to fail So response
734 * payload consists only of bytes read, and the number of bytes is
735 * exactly what was specified in the corresponding op. Like
736 * outgoing data, the incoming data is in order and contiguous.
737 */
738struct gb_i2c_transfer_op {
739 __le16 addr;
740 __le16 flags;
741 __le16 size;
742} __packed;
743
744struct gb_i2c_transfer_request {
745 __le16 op_count;
746 struct gb_i2c_transfer_op ops[0]; /* op_count of these */
747} __packed;
748struct gb_i2c_transfer_response {
749 __u8 data[0]; /* inbound data */
750} __packed;
751
752
753/* GPIO */
754
755/* Greybus GPIO request types */
756#define GB_GPIO_TYPE_LINE_COUNT 0x02
757#define GB_GPIO_TYPE_ACTIVATE 0x03
758#define GB_GPIO_TYPE_DEACTIVATE 0x04
759#define GB_GPIO_TYPE_GET_DIRECTION 0x05
760#define GB_GPIO_TYPE_DIRECTION_IN 0x06
761#define GB_GPIO_TYPE_DIRECTION_OUT 0x07
762#define GB_GPIO_TYPE_GET_VALUE 0x08
763#define GB_GPIO_TYPE_SET_VALUE 0x09
764#define GB_GPIO_TYPE_SET_DEBOUNCE 0x0a
765#define GB_GPIO_TYPE_IRQ_TYPE 0x0b
766#define GB_GPIO_TYPE_IRQ_MASK 0x0c
767#define GB_GPIO_TYPE_IRQ_UNMASK 0x0d
768#define GB_GPIO_TYPE_IRQ_EVENT 0x0e
769
770#define GB_GPIO_IRQ_TYPE_NONE 0x00
771#define GB_GPIO_IRQ_TYPE_EDGE_RISING 0x01
772#define GB_GPIO_IRQ_TYPE_EDGE_FALLING 0x02
773#define GB_GPIO_IRQ_TYPE_EDGE_BOTH 0x03
774#define GB_GPIO_IRQ_TYPE_LEVEL_HIGH 0x04
775#define GB_GPIO_IRQ_TYPE_LEVEL_LOW 0x08
776
777/* line count request has no payload */
778struct gb_gpio_line_count_response {
779 __u8 count;
780} __packed;
781
782struct gb_gpio_activate_request {
783 __u8 which;
784} __packed;
785/* activate response has no payload */
786
787struct gb_gpio_deactivate_request {
788 __u8 which;
789} __packed;
790/* deactivate response has no payload */
791
792struct gb_gpio_get_direction_request {
793 __u8 which;
794} __packed;
795struct gb_gpio_get_direction_response {
796 __u8 direction;
797} __packed;
798
799struct gb_gpio_direction_in_request {
800 __u8 which;
801} __packed;
802/* direction in response has no payload */
803
804struct gb_gpio_direction_out_request {
805 __u8 which;
806 __u8 value;
807} __packed;
808/* direction out response has no payload */
809
810struct gb_gpio_get_value_request {
811 __u8 which;
812} __packed;
813struct gb_gpio_get_value_response {
814 __u8 value;
815} __packed;
816
817struct gb_gpio_set_value_request {
818 __u8 which;
819 __u8 value;
820} __packed;
821/* set value response has no payload */
822
823struct gb_gpio_set_debounce_request {
824 __u8 which;
825 __le16 usec;
826} __packed;
827/* debounce response has no payload */
828
829struct gb_gpio_irq_type_request {
830 __u8 which;
831 __u8 type;
832} __packed;
833/* irq type response has no payload */
834
835struct gb_gpio_irq_mask_request {
836 __u8 which;
837} __packed;
838/* irq mask response has no payload */
839
840struct gb_gpio_irq_unmask_request {
841 __u8 which;
842} __packed;
843/* irq unmask response has no payload */
844
845/* irq event requests originate on another module and are handled on the AP */
846struct gb_gpio_irq_event_request {
847 __u8 which;
848} __packed;
849/* irq event has no response */
850
851
852/* PWM */
853
854/* Greybus PWM operation types */
855#define GB_PWM_TYPE_PWM_COUNT 0x02
856#define GB_PWM_TYPE_ACTIVATE 0x03
857#define GB_PWM_TYPE_DEACTIVATE 0x04
858#define GB_PWM_TYPE_CONFIG 0x05
859#define GB_PWM_TYPE_POLARITY 0x06
860#define GB_PWM_TYPE_ENABLE 0x07
861#define GB_PWM_TYPE_DISABLE 0x08
862
863/* pwm count request has no payload */
864struct gb_pwm_count_response {
865 __u8 count;
866} __packed;
867
868struct gb_pwm_activate_request {
869 __u8 which;
870} __packed;
871
872struct gb_pwm_deactivate_request {
873 __u8 which;
874} __packed;
875
876struct gb_pwm_config_request {
877 __u8 which;
878 __le32 duty;
879 __le32 period;
880} __packed;
881
882struct gb_pwm_polarity_request {
883 __u8 which;
884 __u8 polarity;
885} __packed;
886
887struct gb_pwm_enable_request {
888 __u8 which;
889} __packed;
890
891struct gb_pwm_disable_request {
892 __u8 which;
893} __packed;
894
895/* SPI */
896
897/* Should match up with modes in linux/spi/spi.h */
898#define GB_SPI_MODE_CPHA 0x01 /* clock phase */
899#define GB_SPI_MODE_CPOL 0x02 /* clock polarity */
900#define GB_SPI_MODE_MODE_0 (0|0) /* (original MicroWire) */
901#define GB_SPI_MODE_MODE_1 (0|GB_SPI_MODE_CPHA)
902#define GB_SPI_MODE_MODE_2 (GB_SPI_MODE_CPOL|0)
903#define GB_SPI_MODE_MODE_3 (GB_SPI_MODE_CPOL|GB_SPI_MODE_CPHA)
904#define GB_SPI_MODE_CS_HIGH 0x04 /* chipselect active high? */
905#define GB_SPI_MODE_LSB_FIRST 0x08 /* per-word bits-on-wire */
906#define GB_SPI_MODE_3WIRE 0x10 /* SI/SO signals shared */
907#define GB_SPI_MODE_LOOP 0x20 /* loopback mode */
908#define GB_SPI_MODE_NO_CS 0x40 /* 1 dev/bus, no chipselect */
909#define GB_SPI_MODE_READY 0x80 /* slave pulls low to pause */
910
911/* Should match up with flags in linux/spi/spi.h */
912#define GB_SPI_FLAG_HALF_DUPLEX BIT(0) /* can't do full duplex */
913#define GB_SPI_FLAG_NO_RX BIT(1) /* can't do buffer read */
914#define GB_SPI_FLAG_NO_TX BIT(2) /* can't do buffer write */
915
916/* Greybus spi operation types */
917#define GB_SPI_TYPE_MASTER_CONFIG 0x02
918#define GB_SPI_TYPE_DEVICE_CONFIG 0x03
919#define GB_SPI_TYPE_TRANSFER 0x04
920
921/* mode request has no payload */
922struct gb_spi_master_config_response {
923 __le32 bits_per_word_mask;
924 __le32 min_speed_hz;
925 __le32 max_speed_hz;
926 __le16 mode;
927 __le16 flags;
928 __u8 num_chipselect;
929} __packed;
930
931struct gb_spi_device_config_request {
932 __u8 chip_select;
933} __packed;
934
935struct gb_spi_device_config_response {
936 __le16 mode;
937 __u8 bits_per_word;
938 __le32 max_speed_hz;
939 __u8 device_type;
940#define GB_SPI_SPI_DEV 0x00
941#define GB_SPI_SPI_NOR 0x01
942#define GB_SPI_SPI_MODALIAS 0x02
943 __u8 name[32];
944} __packed;
945
946/**
947 * struct gb_spi_transfer - a read/write buffer pair
948 * @speed_hz: Select a speed other than the device default for this transfer. If
949 * 0 the default (from @spi_device) is used.
950 * @len: size of rx and tx buffers (in bytes)
951 * @delay_usecs: microseconds to delay after this transfer before (optionally)
952 * changing the chipselect status, then starting the next transfer or
953 * completing this spi_message.
954 * @cs_change: affects chipselect after this transfer completes
955 * @bits_per_word: select a bits_per_word other than the device default for this
956 * transfer. If 0 the default (from @spi_device) is used.
957 */
958struct gb_spi_transfer {
959 __le32 speed_hz;
960 __le32 len;
961 __le16 delay_usecs;
962 __u8 cs_change;
963 __u8 bits_per_word;
964 __u8 xfer_flags;
965#define GB_SPI_XFER_READ 0x01
966#define GB_SPI_XFER_WRITE 0x02
967#define GB_SPI_XFER_INPROGRESS 0x04
968} __packed;
969
970struct gb_spi_transfer_request {
971 __u8 chip_select; /* of the spi device */
972 __u8 mode; /* of the spi device */
973 __le16 count;
974 struct gb_spi_transfer transfers[0]; /* count of these */
975} __packed;
976
977struct gb_spi_transfer_response {
978 __u8 data[0]; /* inbound data */
979} __packed;
980
981/* Version of the Greybus SVC protocol we support */
982#define GB_SVC_VERSION_MAJOR 0x00
983#define GB_SVC_VERSION_MINOR 0x01
984
985/* Greybus SVC request types */
986#define GB_SVC_TYPE_PROTOCOL_VERSION 0x01
987#define GB_SVC_TYPE_SVC_HELLO 0x02
988#define GB_SVC_TYPE_INTF_DEVICE_ID 0x03
989#define GB_SVC_TYPE_INTF_RESET 0x06
990#define GB_SVC_TYPE_CONN_CREATE 0x07
991#define GB_SVC_TYPE_CONN_DESTROY 0x08
992#define GB_SVC_TYPE_DME_PEER_GET 0x09
993#define GB_SVC_TYPE_DME_PEER_SET 0x0a
994#define GB_SVC_TYPE_ROUTE_CREATE 0x0b
995#define GB_SVC_TYPE_ROUTE_DESTROY 0x0c
996#define GB_SVC_TYPE_TIMESYNC_ENABLE 0x0d
997#define GB_SVC_TYPE_TIMESYNC_DISABLE 0x0e
998#define GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE 0x0f
999#define GB_SVC_TYPE_INTF_SET_PWRM 0x10
1000#define GB_SVC_TYPE_INTF_EJECT 0x11
1001#define GB_SVC_TYPE_PING 0x13
1002#define GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET 0x14
1003#define GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET 0x15
1004#define GB_SVC_TYPE_PWRMON_SAMPLE_GET 0x16
1005#define GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET 0x17
1006#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE 0x18
1007#define GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE 0x19
1008#define GB_SVC_TYPE_TIMESYNC_PING 0x1a
1009#define GB_SVC_TYPE_MODULE_INSERTED 0x1f
1010#define GB_SVC_TYPE_MODULE_REMOVED 0x20
1011#define GB_SVC_TYPE_INTF_VSYS_ENABLE 0x21
1012#define GB_SVC_TYPE_INTF_VSYS_DISABLE 0x22
1013#define GB_SVC_TYPE_INTF_REFCLK_ENABLE 0x23
1014#define GB_SVC_TYPE_INTF_REFCLK_DISABLE 0x24
1015#define GB_SVC_TYPE_INTF_UNIPRO_ENABLE 0x25
1016#define GB_SVC_TYPE_INTF_UNIPRO_DISABLE 0x26
1017#define GB_SVC_TYPE_INTF_ACTIVATE 0x27
1018#define GB_SVC_TYPE_INTF_RESUME 0x28
1019#define GB_SVC_TYPE_INTF_MAILBOX_EVENT 0x29
1020#define GB_SVC_TYPE_INTF_OOPS 0x2a
1021
1022/* Greybus SVC protocol status values */
1023#define GB_SVC_OP_SUCCESS 0x00
1024#define GB_SVC_OP_UNKNOWN_ERROR 0x01
1025#define GB_SVC_INTF_NOT_DETECTED 0x02
1026#define GB_SVC_INTF_NO_UPRO_LINK 0x03
1027#define GB_SVC_INTF_UPRO_NOT_DOWN 0x04
1028#define GB_SVC_INTF_UPRO_NOT_HIBERNATED 0x05
1029#define GB_SVC_INTF_NO_V_SYS 0x06
1030#define GB_SVC_INTF_V_CHG 0x07
1031#define GB_SVC_INTF_WAKE_BUSY 0x08
1032#define GB_SVC_INTF_NO_REFCLK 0x09
1033#define GB_SVC_INTF_RELEASING 0x0a
1034#define GB_SVC_INTF_NO_ORDER 0x0b
1035#define GB_SVC_INTF_MBOX_SET 0x0c
1036#define GB_SVC_INTF_BAD_MBOX 0x0d
1037#define GB_SVC_INTF_OP_TIMEOUT 0x0e
1038#define GB_SVC_PWRMON_OP_NOT_PRESENT 0x0f
1039
1040struct gb_svc_version_request {
1041 __u8 major;
1042 __u8 minor;
1043} __packed;
1044
1045struct gb_svc_version_response {
1046 __u8 major;
1047 __u8 minor;
1048} __packed;
1049
1050/* SVC protocol hello request */
1051struct gb_svc_hello_request {
1052 __le16 endo_id;
1053 __u8 interface_id;
1054} __packed;
1055/* hello response has no payload */
1056
1057struct gb_svc_intf_device_id_request {
1058 __u8 intf_id;
1059 __u8 device_id;
1060} __packed;
1061/* device id response has no payload */
1062
1063struct gb_svc_intf_reset_request {
1064 __u8 intf_id;
1065} __packed;
1066/* interface reset response has no payload */
1067
1068struct gb_svc_intf_eject_request {
1069 __u8 intf_id;
1070} __packed;
1071/* interface eject response has no payload */
1072
1073struct gb_svc_conn_create_request {
1074 __u8 intf1_id;
1075 __le16 cport1_id;
1076 __u8 intf2_id;
1077 __le16 cport2_id;
1078 __u8 tc;
1079 __u8 flags;
1080} __packed;
1081/* connection create response has no payload */
1082
1083struct gb_svc_conn_destroy_request {
1084 __u8 intf1_id;
1085 __le16 cport1_id;
1086 __u8 intf2_id;
1087 __le16 cport2_id;
1088} __packed;
1089/* connection destroy response has no payload */
1090
1091struct gb_svc_dme_peer_get_request {
1092 __u8 intf_id;
1093 __le16 attr;
1094 __le16 selector;
1095} __packed;
1096
1097struct gb_svc_dme_peer_get_response {
1098 __le16 result_code;
1099 __le32 attr_value;
1100} __packed;
1101
1102struct gb_svc_dme_peer_set_request {
1103 __u8 intf_id;
1104 __le16 attr;
1105 __le16 selector;
1106 __le32 value;
1107} __packed;
1108
1109struct gb_svc_dme_peer_set_response {
1110 __le16 result_code;
1111} __packed;
1112
1113/* Greybus init-status values, currently retrieved using DME peer gets. */
1114#define GB_INIT_SPI_BOOT_STARTED 0x02
1115#define GB_INIT_TRUSTED_SPI_BOOT_FINISHED 0x03
1116#define GB_INIT_UNTRUSTED_SPI_BOOT_FINISHED 0x04
1117#define GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED 0x06
1118#define GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED 0x09
1119#define GB_INIT_S2_LOADER_BOOT_STARTED 0x0D
1120
1121struct gb_svc_route_create_request {
1122 __u8 intf1_id;
1123 __u8 dev1_id;
1124 __u8 intf2_id;
1125 __u8 dev2_id;
1126} __packed;
1127/* route create response has no payload */
1128
1129struct gb_svc_route_destroy_request {
1130 __u8 intf1_id;
1131 __u8 intf2_id;
1132} __packed;
1133/* route destroy response has no payload */
1134
1135/* used for svc_intf_vsys_{enable,disable} */
1136struct gb_svc_intf_vsys_request {
1137 __u8 intf_id;
1138} __packed;
1139
1140struct gb_svc_intf_vsys_response {
1141 __u8 result_code;
1142#define GB_SVC_INTF_VSYS_OK 0x00
1143 /* 0x01 is reserved */
1144#define GB_SVC_INTF_VSYS_FAIL 0x02
1145} __packed;
1146
1147/* used for svc_intf_refclk_{enable,disable} */
1148struct gb_svc_intf_refclk_request {
1149 __u8 intf_id;
1150} __packed;
1151
1152struct gb_svc_intf_refclk_response {
1153 __u8 result_code;
1154#define GB_SVC_INTF_REFCLK_OK 0x00
1155 /* 0x01 is reserved */
1156#define GB_SVC_INTF_REFCLK_FAIL 0x02
1157} __packed;
1158
1159/* used for svc_intf_unipro_{enable,disable} */
1160struct gb_svc_intf_unipro_request {
1161 __u8 intf_id;
1162} __packed;
1163
1164struct gb_svc_intf_unipro_response {
1165 __u8 result_code;
1166#define GB_SVC_INTF_UNIPRO_OK 0x00
1167 /* 0x01 is reserved */
1168#define GB_SVC_INTF_UNIPRO_FAIL 0x02
1169#define GB_SVC_INTF_UNIPRO_NOT_OFF 0x03
1170} __packed;
1171
1172struct gb_svc_timesync_enable_request {
1173 __u8 count;
1174 __le64 frame_time;
1175 __le32 strobe_delay;
1176 __le32 refclk;
1177} __packed;
1178/* timesync enable response has no payload */
1179
1180/* timesync authoritative request has no payload */
1181struct gb_svc_timesync_authoritative_response {
1182 __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
1183};
1184
1185struct gb_svc_timesync_wake_pins_acquire_request {
1186 __le32 strobe_mask;
1187};
1188
1189/* timesync wake pins acquire response has no payload */
1190
1191/* timesync wake pins release request has no payload */
1192/* timesync wake pins release response has no payload */
1193
1194/* timesync svc ping request has no payload */
1195struct gb_svc_timesync_ping_response {
1196 __le64 frame_time;
1197} __packed;
1198
1199#define GB_SVC_UNIPRO_FAST_MODE 0x01
1200#define GB_SVC_UNIPRO_SLOW_MODE 0x02
1201#define GB_SVC_UNIPRO_FAST_AUTO_MODE 0x04
1202#define GB_SVC_UNIPRO_SLOW_AUTO_MODE 0x05
1203#define GB_SVC_UNIPRO_MODE_UNCHANGED 0x07
1204#define GB_SVC_UNIPRO_HIBERNATE_MODE 0x11
1205#define GB_SVC_UNIPRO_OFF_MODE 0x12
1206
1207#define GB_SVC_SMALL_AMPLITUDE 0x01
1208#define GB_SVC_LARGE_AMPLITUDE 0x02
1209
1210#define GB_SVC_NO_DE_EMPHASIS 0x00
1211#define GB_SVC_SMALL_DE_EMPHASIS 0x01
1212#define GB_SVC_LARGE_DE_EMPHASIS 0x02
1213
1214#define GB_SVC_PWRM_RXTERMINATION 0x01
1215#define GB_SVC_PWRM_TXTERMINATION 0x02
1216#define GB_SVC_PWRM_LINE_RESET 0x04
1217#define GB_SVC_PWRM_SCRAMBLING 0x20
1218
1219#define GB_SVC_PWRM_QUIRK_HSSER 0x00000001
1220
1221#define GB_SVC_UNIPRO_HS_SERIES_A 0x01
1222#define GB_SVC_UNIPRO_HS_SERIES_B 0x02
1223
1224#define GB_SVC_SETPWRM_PWR_OK 0x00
1225#define GB_SVC_SETPWRM_PWR_LOCAL 0x01
1226#define GB_SVC_SETPWRM_PWR_REMOTE 0x02
1227#define GB_SVC_SETPWRM_PWR_BUSY 0x03
1228#define GB_SVC_SETPWRM_PWR_ERROR_CAP 0x04
1229#define GB_SVC_SETPWRM_PWR_FATAL_ERROR 0x05
1230
1231struct gb_svc_l2_timer_cfg {
1232 __le16 tsb_fc0_protection_timeout;
1233 __le16 tsb_tc0_replay_timeout;
1234 __le16 tsb_afc0_req_timeout;
1235 __le16 tsb_fc1_protection_timeout;
1236 __le16 tsb_tc1_replay_timeout;
1237 __le16 tsb_afc1_req_timeout;
1238 __le16 reserved_for_tc2[3];
1239 __le16 reserved_for_tc3[3];
1240} __packed;
1241
1242struct gb_svc_intf_set_pwrm_request {
1243 __u8 intf_id;
1244 __u8 hs_series;
1245 __u8 tx_mode;
1246 __u8 tx_gear;
1247 __u8 tx_nlanes;
1248 __u8 tx_amplitude;
1249 __u8 tx_hs_equalizer;
1250 __u8 rx_mode;
1251 __u8 rx_gear;
1252 __u8 rx_nlanes;
1253 __u8 flags;
1254 __le32 quirks;
1255 struct gb_svc_l2_timer_cfg local_l2timerdata, remote_l2timerdata;
1256} __packed;
1257
1258struct gb_svc_intf_set_pwrm_response {
1259 __u8 result_code;
1260} __packed;
1261
1262struct gb_svc_key_event_request {
1263 __le16 key_code;
1264#define GB_KEYCODE_ARA 0x00
1265
1266 __u8 key_event;
1267#define GB_SVC_KEY_RELEASED 0x00
1268#define GB_SVC_KEY_PRESSED 0x01
1269} __packed;
1270
1271#define GB_SVC_PWRMON_MAX_RAIL_COUNT 254
1272
1273struct gb_svc_pwrmon_rail_count_get_response {
1274 __u8 rail_count;
1275} __packed;
1276
1277#define GB_SVC_PWRMON_RAIL_NAME_BUFSIZE 32
1278
1279struct gb_svc_pwrmon_rail_names_get_response {
1280 __u8 status;
1281 __u8 name[0][GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
1282} __packed;
1283
1284#define GB_SVC_PWRMON_TYPE_CURR 0x01
1285#define GB_SVC_PWRMON_TYPE_VOL 0x02
1286#define GB_SVC_PWRMON_TYPE_PWR 0x03
1287
1288#define GB_SVC_PWRMON_GET_SAMPLE_OK 0x00
1289#define GB_SVC_PWRMON_GET_SAMPLE_INVAL 0x01
1290#define GB_SVC_PWRMON_GET_SAMPLE_NOSUPP 0x02
1291#define GB_SVC_PWRMON_GET_SAMPLE_HWERR 0x03
1292
1293struct gb_svc_pwrmon_sample_get_request {
1294 __u8 rail_id;
1295 __u8 measurement_type;
1296} __packed;
1297
1298struct gb_svc_pwrmon_sample_get_response {
1299 __u8 result;
1300 __le32 measurement;
1301} __packed;
1302
1303struct gb_svc_pwrmon_intf_sample_get_request {
1304 __u8 intf_id;
1305 __u8 measurement_type;
1306} __packed;
1307
1308struct gb_svc_pwrmon_intf_sample_get_response {
1309 __u8 result;
1310 __le32 measurement;
1311} __packed;
1312
1313#define GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY 0x0001
1314
1315struct gb_svc_module_inserted_request {
1316 __u8 primary_intf_id;
1317 __u8 intf_count;
1318 __le16 flags;
1319} __packed;
1320/* module_inserted response has no payload */
1321
1322struct gb_svc_module_removed_request {
1323 __u8 primary_intf_id;
1324} __packed;
1325/* module_removed response has no payload */
1326
1327struct gb_svc_intf_activate_request {
1328 __u8 intf_id;
1329} __packed;
1330
1331#define GB_SVC_INTF_TYPE_UNKNOWN 0x00
1332#define GB_SVC_INTF_TYPE_DUMMY 0x01
1333#define GB_SVC_INTF_TYPE_UNIPRO 0x02
1334#define GB_SVC_INTF_TYPE_GREYBUS 0x03
1335
1336struct gb_svc_intf_activate_response {
1337 __u8 status;
1338 __u8 intf_type;
1339} __packed;
1340
1341struct gb_svc_intf_resume_request {
1342 __u8 intf_id;
1343} __packed;
1344
1345struct gb_svc_intf_resume_response {
1346 __u8 status;
1347} __packed;
1348
1349#define GB_SVC_INTF_MAILBOX_NONE 0x00
1350#define GB_SVC_INTF_MAILBOX_AP 0x01
1351#define GB_SVC_INTF_MAILBOX_GREYBUS 0x02
1352
1353struct gb_svc_intf_mailbox_event_request {
1354 __u8 intf_id;
1355 __le16 result_code;
1356 __le32 mailbox;
1357} __packed;
1358/* intf_mailbox_event response has no payload */
1359
1360struct gb_svc_intf_oops_request {
1361 __u8 intf_id;
1362 __u8 reason;
1363} __packed;
1364/* intf_oops response has no payload */
1365
1366
1367/* RAW */
1368
1369/* Greybus raw request types */
1370#define GB_RAW_TYPE_SEND 0x02
1371
1372struct gb_raw_send_request {
1373 __le32 len;
1374 __u8 data[0];
1375} __packed;
1376
1377
1378/* UART */
1379
1380/* Greybus UART operation types */
1381#define GB_UART_TYPE_SEND_DATA 0x02
1382#define GB_UART_TYPE_RECEIVE_DATA 0x03 /* Unsolicited data */
1383#define GB_UART_TYPE_SET_LINE_CODING 0x04
1384#define GB_UART_TYPE_SET_CONTROL_LINE_STATE 0x05
1385#define GB_UART_TYPE_SEND_BREAK 0x06
1386#define GB_UART_TYPE_SERIAL_STATE 0x07 /* Unsolicited data */
1387#define GB_UART_TYPE_RECEIVE_CREDITS 0x08
1388#define GB_UART_TYPE_FLUSH_FIFOS 0x09
1389
1390/* Represents data from AP -> Module */
1391struct gb_uart_send_data_request {
1392 __le16 size;
1393 __u8 data[0];
1394} __packed;
1395
1396/* recv-data-request flags */
1397#define GB_UART_RECV_FLAG_FRAMING 0x01 /* Framing error */
1398#define GB_UART_RECV_FLAG_PARITY 0x02 /* Parity error */
1399#define GB_UART_RECV_FLAG_OVERRUN 0x04 /* Overrun error */
1400#define GB_UART_RECV_FLAG_BREAK 0x08 /* Break */
1401
1402/* Represents data from Module -> AP */
1403struct gb_uart_recv_data_request {
1404 __le16 size;
1405 __u8 flags;
1406 __u8 data[0];
1407} __packed;
1408
1409struct gb_uart_receive_credits_request {
1410 __le16 count;
1411} __packed;
1412
1413struct gb_uart_set_line_coding_request {
1414 __le32 rate;
1415 __u8 format;
1416#define GB_SERIAL_1_STOP_BITS 0
1417#define GB_SERIAL_1_5_STOP_BITS 1
1418#define GB_SERIAL_2_STOP_BITS 2
1419
1420 __u8 parity;
1421#define GB_SERIAL_NO_PARITY 0
1422#define GB_SERIAL_ODD_PARITY 1
1423#define GB_SERIAL_EVEN_PARITY 2
1424#define GB_SERIAL_MARK_PARITY 3
1425#define GB_SERIAL_SPACE_PARITY 4
1426
1427 __u8 data_bits;
1428
1429 __u8 flow_control;
1430#define GB_SERIAL_AUTO_RTSCTS_EN 0x1
1431} __packed;
1432
1433/* output control lines */
1434#define GB_UART_CTRL_DTR 0x01
1435#define GB_UART_CTRL_RTS 0x02
1436
1437struct gb_uart_set_control_line_state_request {
1438 __u8 control;
1439} __packed;
1440
1441struct gb_uart_set_break_request {
1442 __u8 state;
1443} __packed;
1444
1445/* input control lines and line errors */
1446#define GB_UART_CTRL_DCD 0x01
1447#define GB_UART_CTRL_DSR 0x02
1448#define GB_UART_CTRL_RI 0x04
1449
1450struct gb_uart_serial_state_request {
1451 __u8 control;
1452} __packed;
1453
1454struct gb_uart_serial_flush_request {
1455 __u8 flags;
1456#define GB_SERIAL_FLAG_FLUSH_TRANSMITTER 0x01
1457#define GB_SERIAL_FLAG_FLUSH_RECEIVER 0x02
1458} __packed;
1459
1460/* Loopback */
1461
1462/* Greybus loopback request types */
1463#define GB_LOOPBACK_TYPE_PING 0x02
1464#define GB_LOOPBACK_TYPE_TRANSFER 0x03
1465#define GB_LOOPBACK_TYPE_SINK 0x04
1466
1467/*
1468 * Loopback request/response header format should be identical
1469 * to simplify bandwidth and data movement analysis.
1470 */
1471struct gb_loopback_transfer_request {
1472 __le32 len;
1473 __le32 reserved0;
1474 __le32 reserved1;
1475 __u8 data[0];
1476} __packed;
1477
1478struct gb_loopback_transfer_response {
1479 __le32 len;
1480 __le32 reserved0;
1481 __le32 reserved1;
1482 __u8 data[0];
1483} __packed;
1484
1485/* SDIO */
1486/* Greybus SDIO operation types */
1487#define GB_SDIO_TYPE_GET_CAPABILITIES 0x02
1488#define GB_SDIO_TYPE_SET_IOS 0x03
1489#define GB_SDIO_TYPE_COMMAND 0x04
1490#define GB_SDIO_TYPE_TRANSFER 0x05
1491#define GB_SDIO_TYPE_EVENT 0x06
1492
1493/* get caps response: request has no payload */
1494struct gb_sdio_get_caps_response {
1495 __le32 caps;
1496#define GB_SDIO_CAP_NONREMOVABLE 0x00000001
1497#define GB_SDIO_CAP_4_BIT_DATA 0x00000002
1498#define GB_SDIO_CAP_8_BIT_DATA 0x00000004
1499#define GB_SDIO_CAP_MMC_HS 0x00000008
1500#define GB_SDIO_CAP_SD_HS 0x00000010
1501#define GB_SDIO_CAP_ERASE 0x00000020
1502#define GB_SDIO_CAP_1_2V_DDR 0x00000040
1503#define GB_SDIO_CAP_1_8V_DDR 0x00000080
1504#define GB_SDIO_CAP_POWER_OFF_CARD 0x00000100
1505#define GB_SDIO_CAP_UHS_SDR12 0x00000200
1506#define GB_SDIO_CAP_UHS_SDR25 0x00000400
1507#define GB_SDIO_CAP_UHS_SDR50 0x00000800
1508#define GB_SDIO_CAP_UHS_SDR104 0x00001000
1509#define GB_SDIO_CAP_UHS_DDR50 0x00002000
1510#define GB_SDIO_CAP_DRIVER_TYPE_A 0x00004000
1511#define GB_SDIO_CAP_DRIVER_TYPE_C 0x00008000
1512#define GB_SDIO_CAP_DRIVER_TYPE_D 0x00010000
1513#define GB_SDIO_CAP_HS200_1_2V 0x00020000
1514#define GB_SDIO_CAP_HS200_1_8V 0x00040000
1515#define GB_SDIO_CAP_HS400_1_2V 0x00080000
1516#define GB_SDIO_CAP_HS400_1_8V 0x00100000
1517
1518 /* see possible values below at vdd */
1519 __le32 ocr;
1520 __le32 f_min;
1521 __le32 f_max;
1522 __le16 max_blk_count;
1523 __le16 max_blk_size;
1524} __packed;
1525
1526/* set ios request: response has no payload */
1527struct gb_sdio_set_ios_request {
1528 __le32 clock;
1529 __le32 vdd;
1530#define GB_SDIO_VDD_165_195 0x00000001
1531#define GB_SDIO_VDD_20_21 0x00000002
1532#define GB_SDIO_VDD_21_22 0x00000004
1533#define GB_SDIO_VDD_22_23 0x00000008
1534#define GB_SDIO_VDD_23_24 0x00000010
1535#define GB_SDIO_VDD_24_25 0x00000020
1536#define GB_SDIO_VDD_25_26 0x00000040
1537#define GB_SDIO_VDD_26_27 0x00000080
1538#define GB_SDIO_VDD_27_28 0x00000100
1539#define GB_SDIO_VDD_28_29 0x00000200
1540#define GB_SDIO_VDD_29_30 0x00000400
1541#define GB_SDIO_VDD_30_31 0x00000800
1542#define GB_SDIO_VDD_31_32 0x00001000
1543#define GB_SDIO_VDD_32_33 0x00002000
1544#define GB_SDIO_VDD_33_34 0x00004000
1545#define GB_SDIO_VDD_34_35 0x00008000
1546#define GB_SDIO_VDD_35_36 0x00010000
1547
1548 __u8 bus_mode;
1549#define GB_SDIO_BUSMODE_OPENDRAIN 0x00
1550#define GB_SDIO_BUSMODE_PUSHPULL 0x01
1551
1552 __u8 power_mode;
1553#define GB_SDIO_POWER_OFF 0x00
1554#define GB_SDIO_POWER_UP 0x01
1555#define GB_SDIO_POWER_ON 0x02
1556#define GB_SDIO_POWER_UNDEFINED 0x03
1557
1558 __u8 bus_width;
1559#define GB_SDIO_BUS_WIDTH_1 0x00
1560#define GB_SDIO_BUS_WIDTH_4 0x02
1561#define GB_SDIO_BUS_WIDTH_8 0x03
1562
1563 __u8 timing;
1564#define GB_SDIO_TIMING_LEGACY 0x00
1565#define GB_SDIO_TIMING_MMC_HS 0x01
1566#define GB_SDIO_TIMING_SD_HS 0x02
1567#define GB_SDIO_TIMING_UHS_SDR12 0x03
1568#define GB_SDIO_TIMING_UHS_SDR25 0x04
1569#define GB_SDIO_TIMING_UHS_SDR50 0x05
1570#define GB_SDIO_TIMING_UHS_SDR104 0x06
1571#define GB_SDIO_TIMING_UHS_DDR50 0x07
1572#define GB_SDIO_TIMING_MMC_DDR52 0x08
1573#define GB_SDIO_TIMING_MMC_HS200 0x09
1574#define GB_SDIO_TIMING_MMC_HS400 0x0A
1575
1576 __u8 signal_voltage;
1577#define GB_SDIO_SIGNAL_VOLTAGE_330 0x00
1578#define GB_SDIO_SIGNAL_VOLTAGE_180 0x01
1579#define GB_SDIO_SIGNAL_VOLTAGE_120 0x02
1580
1581 __u8 drv_type;
1582#define GB_SDIO_SET_DRIVER_TYPE_B 0x00
1583#define GB_SDIO_SET_DRIVER_TYPE_A 0x01
1584#define GB_SDIO_SET_DRIVER_TYPE_C 0x02
1585#define GB_SDIO_SET_DRIVER_TYPE_D 0x03
1586} __packed;
1587
1588/* command request */
1589struct gb_sdio_command_request {
1590 __u8 cmd;
1591 __u8 cmd_flags;
1592#define GB_SDIO_RSP_NONE 0x00
1593#define GB_SDIO_RSP_PRESENT 0x01
1594#define GB_SDIO_RSP_136 0x02
1595#define GB_SDIO_RSP_CRC 0x04
1596#define GB_SDIO_RSP_BUSY 0x08
1597#define GB_SDIO_RSP_OPCODE 0x10
1598
1599 __u8 cmd_type;
1600#define GB_SDIO_CMD_AC 0x00
1601#define GB_SDIO_CMD_ADTC 0x01
1602#define GB_SDIO_CMD_BC 0x02
1603#define GB_SDIO_CMD_BCR 0x03
1604
1605 __le32 cmd_arg;
1606 __le16 data_blocks;
1607 __le16 data_blksz;
1608} __packed;
1609
1610struct gb_sdio_command_response {
1611 __le32 resp[4];
1612} __packed;
1613
1614/* transfer request */
1615struct gb_sdio_transfer_request {
1616 __u8 data_flags;
1617#define GB_SDIO_DATA_WRITE 0x01
1618#define GB_SDIO_DATA_READ 0x02
1619#define GB_SDIO_DATA_STREAM 0x04
1620
1621 __le16 data_blocks;
1622 __le16 data_blksz;
1623 __u8 data[0];
1624} __packed;
1625
1626struct gb_sdio_transfer_response {
1627 __le16 data_blocks;
1628 __le16 data_blksz;
1629 __u8 data[0];
1630} __packed;
1631
1632/* event request: generated by module and is defined as unidirectional */
1633struct gb_sdio_event_request {
1634 __u8 event;
1635#define GB_SDIO_CARD_INSERTED 0x01
1636#define GB_SDIO_CARD_REMOVED 0x02
1637#define GB_SDIO_WP 0x04
1638} __packed;
1639
1640/* Camera */
1641
1642/* Greybus Camera request types */
1643#define GB_CAMERA_TYPE_CAPABILITIES 0x02
1644#define GB_CAMERA_TYPE_CONFIGURE_STREAMS 0x03
1645#define GB_CAMERA_TYPE_CAPTURE 0x04
1646#define GB_CAMERA_TYPE_FLUSH 0x05
1647#define GB_CAMERA_TYPE_METADATA 0x06
1648
1649#define GB_CAMERA_MAX_STREAMS 4
1650#define GB_CAMERA_MAX_SETTINGS_SIZE 8192
1651
1652/* Greybus Camera Configure Streams request payload */
1653struct gb_camera_stream_config_request {
1654 __le16 width;
1655 __le16 height;
1656 __le16 format;
1657 __le16 padding;
1658} __packed;
1659
1660struct gb_camera_configure_streams_request {
1661 __u8 num_streams;
1662 __u8 flags;
1663#define GB_CAMERA_CONFIGURE_STREAMS_TEST_ONLY 0x01
1664 __le16 padding;
1665 struct gb_camera_stream_config_request config[0];
1666} __packed;
1667
1668/* Greybus Camera Configure Streams response payload */
1669struct gb_camera_stream_config_response {
1670 __le16 width;
1671 __le16 height;
1672 __le16 format;
1673 __u8 virtual_channel;
1674 __u8 data_type[2];
1675 __le16 max_pkt_size;
1676 __u8 padding;
1677 __le32 max_size;
1678} __packed;
1679
1680struct gb_camera_configure_streams_response {
1681 __u8 num_streams;
1682#define GB_CAMERA_CONFIGURE_STREAMS_ADJUSTED 0x01
1683 __u8 flags;
1684 __u8 padding[2];
1685 __le32 data_rate;
1686 struct gb_camera_stream_config_response config[0];
1687};
1688
1689/* Greybus Camera Capture request payload - response has no payload */
1690struct gb_camera_capture_request {
1691 __le32 request_id;
1692 __u8 streams;
1693 __u8 padding;
1694 __le16 num_frames;
1695 __u8 settings[0];
1696} __packed;
1697
1698/* Greybus Camera Flush response payload - request has no payload */
1699struct gb_camera_flush_response {
1700 __le32 request_id;
1701} __packed;
1702
1703/* Greybus Camera Metadata request payload - operation has no response */
1704struct gb_camera_metadata_request {
1705 __le32 request_id;
1706 __le16 frame_number;
1707 __u8 stream;
1708 __u8 padding;
1709 __u8 metadata[0];
1710} __packed;
1711
1712/* Lights */
1713
1714/* Greybus Lights request types */
1715#define GB_LIGHTS_TYPE_GET_LIGHTS 0x02
1716#define GB_LIGHTS_TYPE_GET_LIGHT_CONFIG 0x03
1717#define GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG 0x04
1718#define GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG 0x05
1719#define GB_LIGHTS_TYPE_SET_BRIGHTNESS 0x06
1720#define GB_LIGHTS_TYPE_SET_BLINK 0x07
1721#define GB_LIGHTS_TYPE_SET_COLOR 0x08
1722#define GB_LIGHTS_TYPE_SET_FADE 0x09
1723#define GB_LIGHTS_TYPE_EVENT 0x0A
1724#define GB_LIGHTS_TYPE_SET_FLASH_INTENSITY 0x0B
1725#define GB_LIGHTS_TYPE_SET_FLASH_STROBE 0x0C
1726#define GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT 0x0D
1727#define GB_LIGHTS_TYPE_GET_FLASH_FAULT 0x0E
1728
1729/* Greybus Light modes */
1730
1731/*
1732 * if you add any specific mode below, update also the
1733 * GB_CHANNEL_MODE_DEFINED_RANGE value accordingly
1734 */
1735#define GB_CHANNEL_MODE_NONE 0x00000000
1736#define GB_CHANNEL_MODE_BATTERY 0x00000001
1737#define GB_CHANNEL_MODE_POWER 0x00000002
1738#define GB_CHANNEL_MODE_WIRELESS 0x00000004
1739#define GB_CHANNEL_MODE_BLUETOOTH 0x00000008
1740#define GB_CHANNEL_MODE_KEYBOARD 0x00000010
1741#define GB_CHANNEL_MODE_BUTTONS 0x00000020
1742#define GB_CHANNEL_MODE_NOTIFICATION 0x00000040
1743#define GB_CHANNEL_MODE_ATTENTION 0x00000080
1744#define GB_CHANNEL_MODE_FLASH 0x00000100
1745#define GB_CHANNEL_MODE_TORCH 0x00000200
1746#define GB_CHANNEL_MODE_INDICATOR 0x00000400
1747
1748/* Lights Mode valid bit values */
1749#define GB_CHANNEL_MODE_DEFINED_RANGE 0x000004FF
1750#define GB_CHANNEL_MODE_VENDOR_RANGE 0x00F00000
1751
1752/* Greybus Light Channels Flags */
1753#define GB_LIGHT_CHANNEL_MULTICOLOR 0x00000001
1754#define GB_LIGHT_CHANNEL_FADER 0x00000002
1755#define GB_LIGHT_CHANNEL_BLINK 0x00000004
1756
1757/* get count of lights in module */
1758struct gb_lights_get_lights_response {
1759 __u8 lights_count;
1760} __packed;
1761
1762/* light config request payload */
1763struct gb_lights_get_light_config_request {
1764 __u8 id;
1765} __packed;
1766
1767/* light config response payload */
1768struct gb_lights_get_light_config_response {
1769 __u8 channel_count;
1770 __u8 name[32];
1771} __packed;
1772
1773/* channel config request payload */
1774struct gb_lights_get_channel_config_request {
1775 __u8 light_id;
1776 __u8 channel_id;
1777} __packed;
1778
1779/* channel flash config request payload */
1780struct gb_lights_get_channel_flash_config_request {
1781 __u8 light_id;
1782 __u8 channel_id;
1783} __packed;
1784
1785/* channel config response payload */
1786struct gb_lights_get_channel_config_response {
1787 __u8 max_brightness;
1788 __le32 flags;
1789 __le32 color;
1790 __u8 color_name[32];
1791 __le32 mode;
1792 __u8 mode_name[32];
1793} __packed;
1794
1795/* channel flash config response payload */
1796struct gb_lights_get_channel_flash_config_response {
1797 __le32 intensity_min_uA;
1798 __le32 intensity_max_uA;
1799 __le32 intensity_step_uA;
1800 __le32 timeout_min_us;
1801 __le32 timeout_max_us;
1802 __le32 timeout_step_us;
1803} __packed;
1804
1805/* blink request payload: response have no payload */
1806struct gb_lights_blink_request {
1807 __u8 light_id;
1808 __u8 channel_id;
1809 __le16 time_on_ms;
1810 __le16 time_off_ms;
1811} __packed;
1812
1813/* set brightness request payload: response have no payload */
1814struct gb_lights_set_brightness_request {
1815 __u8 light_id;
1816 __u8 channel_id;
1817 __u8 brightness;
1818} __packed;
1819
1820/* set color request payload: response have no payload */
1821struct gb_lights_set_color_request {
1822 __u8 light_id;
1823 __u8 channel_id;
1824 __le32 color;
1825} __packed;
1826
1827/* set fade request payload: response have no payload */
1828struct gb_lights_set_fade_request {
1829 __u8 light_id;
1830 __u8 channel_id;
1831 __u8 fade_in;
1832 __u8 fade_out;
1833} __packed;
1834
1835/* event request: generated by module */
1836struct gb_lights_event_request {
1837 __u8 light_id;
1838 __u8 event;
1839#define GB_LIGHTS_LIGHT_CONFIG 0x01
1840} __packed;
1841
1842/* set flash intensity request payload: response have no payload */
1843struct gb_lights_set_flash_intensity_request {
1844 __u8 light_id;
1845 __u8 channel_id;
1846 __le32 intensity_uA;
1847} __packed;
1848
1849/* set flash strobe state request payload: response have no payload */
1850struct gb_lights_set_flash_strobe_request {
1851 __u8 light_id;
1852 __u8 channel_id;
1853 __u8 state;
1854} __packed;
1855
1856/* set flash timeout request payload: response have no payload */
1857struct gb_lights_set_flash_timeout_request {
1858 __u8 light_id;
1859 __u8 channel_id;
1860 __le32 timeout_us;
1861} __packed;
1862
1863/* get flash fault request payload */
1864struct gb_lights_get_flash_fault_request {
1865 __u8 light_id;
1866 __u8 channel_id;
1867} __packed;
1868
1869/* get flash fault response payload */
1870struct gb_lights_get_flash_fault_response {
1871 __le32 fault;
1872#define GB_LIGHTS_FLASH_FAULT_OVER_VOLTAGE 0x00000000
1873#define GB_LIGHTS_FLASH_FAULT_TIMEOUT 0x00000001
1874#define GB_LIGHTS_FLASH_FAULT_OVER_TEMPERATURE 0x00000002
1875#define GB_LIGHTS_FLASH_FAULT_SHORT_CIRCUIT 0x00000004
1876#define GB_LIGHTS_FLASH_FAULT_OVER_CURRENT 0x00000008
1877#define GB_LIGHTS_FLASH_FAULT_INDICATOR 0x00000010
1878#define GB_LIGHTS_FLASH_FAULT_UNDER_VOLTAGE 0x00000020
1879#define GB_LIGHTS_FLASH_FAULT_INPUT_VOLTAGE 0x00000040
1880#define GB_LIGHTS_FLASH_FAULT_LED_OVER_TEMPERATURE 0x00000080
1881} __packed;
1882
1883/* Audio */
1884
1885#define GB_AUDIO_TYPE_GET_TOPOLOGY_SIZE 0x02
1886#define GB_AUDIO_TYPE_GET_TOPOLOGY 0x03
1887#define GB_AUDIO_TYPE_GET_CONTROL 0x04
1888#define GB_AUDIO_TYPE_SET_CONTROL 0x05
1889#define GB_AUDIO_TYPE_ENABLE_WIDGET 0x06
1890#define GB_AUDIO_TYPE_DISABLE_WIDGET 0x07
1891#define GB_AUDIO_TYPE_GET_PCM 0x08
1892#define GB_AUDIO_TYPE_SET_PCM 0x09
1893#define GB_AUDIO_TYPE_SET_TX_DATA_SIZE 0x0a
1894 /* 0x0b unused */
1895#define GB_AUDIO_TYPE_ACTIVATE_TX 0x0c
1896#define GB_AUDIO_TYPE_DEACTIVATE_TX 0x0d
1897#define GB_AUDIO_TYPE_SET_RX_DATA_SIZE 0x0e
1898 /* 0x0f unused */
1899#define GB_AUDIO_TYPE_ACTIVATE_RX 0x10
1900#define GB_AUDIO_TYPE_DEACTIVATE_RX 0x11
1901#define GB_AUDIO_TYPE_JACK_EVENT 0x12
1902#define GB_AUDIO_TYPE_BUTTON_EVENT 0x13
1903#define GB_AUDIO_TYPE_STREAMING_EVENT 0x14
1904#define GB_AUDIO_TYPE_SEND_DATA 0x15
1905
1906/* Module must be able to buffer 10ms of audio data, minimum */
1907#define GB_AUDIO_SAMPLE_BUFFER_MIN_US 10000
1908
1909#define GB_AUDIO_PCM_NAME_MAX 32
1910#define AUDIO_DAI_NAME_MAX 32
1911#define AUDIO_CONTROL_NAME_MAX 32
1912#define AUDIO_CTL_ELEM_NAME_MAX 44
1913#define AUDIO_ENUM_NAME_MAX 64
1914#define AUDIO_WIDGET_NAME_MAX 32
1915
1916/* See SNDRV_PCM_FMTBIT_* in Linux source */
1917#define GB_AUDIO_PCM_FMT_S8 BIT(0)
1918#define GB_AUDIO_PCM_FMT_U8 BIT(1)
1919#define GB_AUDIO_PCM_FMT_S16_LE BIT(2)
1920#define GB_AUDIO_PCM_FMT_S16_BE BIT(3)
1921#define GB_AUDIO_PCM_FMT_U16_LE BIT(4)
1922#define GB_AUDIO_PCM_FMT_U16_BE BIT(5)
1923#define GB_AUDIO_PCM_FMT_S24_LE BIT(6)
1924#define GB_AUDIO_PCM_FMT_S24_BE BIT(7)
1925#define GB_AUDIO_PCM_FMT_U24_LE BIT(8)
1926#define GB_AUDIO_PCM_FMT_U24_BE BIT(9)
1927#define GB_AUDIO_PCM_FMT_S32_LE BIT(10)
1928#define GB_AUDIO_PCM_FMT_S32_BE BIT(11)
1929#define GB_AUDIO_PCM_FMT_U32_LE BIT(12)
1930#define GB_AUDIO_PCM_FMT_U32_BE BIT(13)
1931
1932/* See SNDRV_PCM_RATE_* in Linux source */
1933#define GB_AUDIO_PCM_RATE_5512 BIT(0)
1934#define GB_AUDIO_PCM_RATE_8000 BIT(1)
1935#define GB_AUDIO_PCM_RATE_11025 BIT(2)
1936#define GB_AUDIO_PCM_RATE_16000 BIT(3)
1937#define GB_AUDIO_PCM_RATE_22050 BIT(4)
1938#define GB_AUDIO_PCM_RATE_32000 BIT(5)
1939#define GB_AUDIO_PCM_RATE_44100 BIT(6)
1940#define GB_AUDIO_PCM_RATE_48000 BIT(7)
1941#define GB_AUDIO_PCM_RATE_64000 BIT(8)
1942#define GB_AUDIO_PCM_RATE_88200 BIT(9)
1943#define GB_AUDIO_PCM_RATE_96000 BIT(10)
1944#define GB_AUDIO_PCM_RATE_176400 BIT(11)
1945#define GB_AUDIO_PCM_RATE_192000 BIT(12)
1946
1947#define GB_AUDIO_STREAM_TYPE_CAPTURE 0x1
1948#define GB_AUDIO_STREAM_TYPE_PLAYBACK 0x2
1949
1950#define GB_AUDIO_CTL_ELEM_ACCESS_READ BIT(0)
1951#define GB_AUDIO_CTL_ELEM_ACCESS_WRITE BIT(1)
1952
1953/* See SNDRV_CTL_ELEM_TYPE_* in Linux source */
1954#define GB_AUDIO_CTL_ELEM_TYPE_BOOLEAN 0x01
1955#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER 0x02
1956#define GB_AUDIO_CTL_ELEM_TYPE_ENUMERATED 0x03
1957#define GB_AUDIO_CTL_ELEM_TYPE_INTEGER64 0x06
1958
1959/* See SNDRV_CTL_ELEM_IFACE_* in Linux source */
1960#define GB_AUDIO_CTL_ELEM_IFACE_CARD 0x00
1961#define GB_AUDIO_CTL_ELEM_IFACE_HWDEP 0x01
1962#define GB_AUDIO_CTL_ELEM_IFACE_MIXER 0x02
1963#define GB_AUDIO_CTL_ELEM_IFACE_PCM 0x03
1964#define GB_AUDIO_CTL_ELEM_IFACE_RAWMIDI 0x04
1965#define GB_AUDIO_CTL_ELEM_IFACE_TIMER 0x05
1966#define GB_AUDIO_CTL_ELEM_IFACE_SEQUENCER 0x06
1967
1968/* SNDRV_CTL_ELEM_ACCESS_* in Linux source */
1969#define GB_AUDIO_ACCESS_READ BIT(0)
1970#define GB_AUDIO_ACCESS_WRITE BIT(1)
1971#define GB_AUDIO_ACCESS_VOLATILE BIT(2)
1972#define GB_AUDIO_ACCESS_TIMESTAMP BIT(3)
1973#define GB_AUDIO_ACCESS_TLV_READ BIT(4)
1974#define GB_AUDIO_ACCESS_TLV_WRITE BIT(5)
1975#define GB_AUDIO_ACCESS_TLV_COMMAND BIT(6)
1976#define GB_AUDIO_ACCESS_INACTIVE BIT(7)
1977#define GB_AUDIO_ACCESS_LOCK BIT(8)
1978#define GB_AUDIO_ACCESS_OWNER BIT(9)
1979
1980/* enum snd_soc_dapm_type */
1981#define GB_AUDIO_WIDGET_TYPE_INPUT 0x0
1982#define GB_AUDIO_WIDGET_TYPE_OUTPUT 0x1
1983#define GB_AUDIO_WIDGET_TYPE_MUX 0x2
1984#define GB_AUDIO_WIDGET_TYPE_VIRT_MUX 0x3
1985#define GB_AUDIO_WIDGET_TYPE_VALUE_MUX 0x4
1986#define GB_AUDIO_WIDGET_TYPE_MIXER 0x5
1987#define GB_AUDIO_WIDGET_TYPE_MIXER_NAMED_CTL 0x6
1988#define GB_AUDIO_WIDGET_TYPE_PGA 0x7
1989#define GB_AUDIO_WIDGET_TYPE_OUT_DRV 0x8
1990#define GB_AUDIO_WIDGET_TYPE_ADC 0x9
1991#define GB_AUDIO_WIDGET_TYPE_DAC 0xa
1992#define GB_AUDIO_WIDGET_TYPE_MICBIAS 0xb
1993#define GB_AUDIO_WIDGET_TYPE_MIC 0xc
1994#define GB_AUDIO_WIDGET_TYPE_HP 0xd
1995#define GB_AUDIO_WIDGET_TYPE_SPK 0xe
1996#define GB_AUDIO_WIDGET_TYPE_LINE 0xf
1997#define GB_AUDIO_WIDGET_TYPE_SWITCH 0x10
1998#define GB_AUDIO_WIDGET_TYPE_VMID 0x11
1999#define GB_AUDIO_WIDGET_TYPE_PRE 0x12
2000#define GB_AUDIO_WIDGET_TYPE_POST 0x13
2001#define GB_AUDIO_WIDGET_TYPE_SUPPLY 0x14
2002#define GB_AUDIO_WIDGET_TYPE_REGULATOR_SUPPLY 0x15
2003#define GB_AUDIO_WIDGET_TYPE_CLOCK_SUPPLY 0x16
2004#define GB_AUDIO_WIDGET_TYPE_AIF_IN 0x17
2005#define GB_AUDIO_WIDGET_TYPE_AIF_OUT 0x18
2006#define GB_AUDIO_WIDGET_TYPE_SIGGEN 0x19
2007#define GB_AUDIO_WIDGET_TYPE_DAI_IN 0x1a
2008#define GB_AUDIO_WIDGET_TYPE_DAI_OUT 0x1b
2009#define GB_AUDIO_WIDGET_TYPE_DAI_LINK 0x1c
2010
2011#define GB_AUDIO_WIDGET_STATE_DISABLED 0x01
2012#define GB_AUDIO_WIDGET_STATE_ENAABLED 0x02
2013
2014#define GB_AUDIO_JACK_EVENT_INSERTION 0x1
2015#define GB_AUDIO_JACK_EVENT_REMOVAL 0x2
2016
2017#define GB_AUDIO_BUTTON_EVENT_PRESS 0x1
2018#define GB_AUDIO_BUTTON_EVENT_RELEASE 0x2
2019
2020#define GB_AUDIO_STREAMING_EVENT_UNSPECIFIED 0x1
2021#define GB_AUDIO_STREAMING_EVENT_HALT 0x2
2022#define GB_AUDIO_STREAMING_EVENT_INTERNAL_ERROR 0x3
2023#define GB_AUDIO_STREAMING_EVENT_PROTOCOL_ERROR 0x4
2024#define GB_AUDIO_STREAMING_EVENT_FAILURE 0x5
2025#define GB_AUDIO_STREAMING_EVENT_UNDERRUN 0x6
2026#define GB_AUDIO_STREAMING_EVENT_OVERRUN 0x7
2027#define GB_AUDIO_STREAMING_EVENT_CLOCKING 0x8
2028#define GB_AUDIO_STREAMING_EVENT_DATA_LEN 0x9
2029
2030#define GB_AUDIO_INVALID_INDEX 0xff
2031
2032/* enum snd_jack_types */
2033#define GB_AUDIO_JACK_HEADPHONE 0x0000001
2034#define GB_AUDIO_JACK_MICROPHONE 0x0000002
2035#define GB_AUDIO_JACK_HEADSET (GB_AUDIO_JACK_HEADPHONE | \
2036 GB_AUDIO_JACK_MICROPHONE)
2037#define GB_AUDIO_JACK_LINEOUT 0x0000004
2038#define GB_AUDIO_JACK_MECHANICAL 0x0000008
2039#define GB_AUDIO_JACK_VIDEOOUT 0x0000010
2040#define GB_AUDIO_JACK_AVOUT (GB_AUDIO_JACK_LINEOUT | \
2041 GB_AUDIO_JACK_VIDEOOUT)
2042#define GB_AUDIO_JACK_LINEIN 0x0000020
2043#define GB_AUDIO_JACK_OC_HPHL 0x0000040
2044#define GB_AUDIO_JACK_OC_HPHR 0x0000080
2045#define GB_AUDIO_JACK_MICROPHONE2 0x0000200
2046#define GB_AUDIO_JACK_ANC_HEADPHONE (GB_AUDIO_JACK_HEADPHONE | \
2047 GB_AUDIO_JACK_MICROPHONE | \
2048 GB_AUDIO_JACK_MICROPHONE2)
2049/* Kept separate from switches to facilitate implementation */
2050#define GB_AUDIO_JACK_BTN_0 0x4000000
2051#define GB_AUDIO_JACK_BTN_1 0x2000000
2052#define GB_AUDIO_JACK_BTN_2 0x1000000
2053#define GB_AUDIO_JACK_BTN_3 0x0800000
2054
2055struct gb_audio_pcm {
2056 __u8 stream_name[GB_AUDIO_PCM_NAME_MAX];
2057 __le32 formats; /* GB_AUDIO_PCM_FMT_* */
2058 __le32 rates; /* GB_AUDIO_PCM_RATE_* */
2059 __u8 chan_min;
2060 __u8 chan_max;
2061 __u8 sig_bits; /* number of bits of content */
2062} __packed;
2063
2064struct gb_audio_dai {
2065 __u8 name[AUDIO_DAI_NAME_MAX];
2066 __le16 data_cport;
2067 struct gb_audio_pcm capture;
2068 struct gb_audio_pcm playback;
2069} __packed;
2070
2071struct gb_audio_integer {
2072 __le32 min;
2073 __le32 max;
2074 __le32 step;
2075} __packed;
2076
2077struct gb_audio_integer64 {
2078 __le64 min;
2079 __le64 max;
2080 __le64 step;
2081} __packed;
2082
2083struct gb_audio_enumerated {
2084 __le32 items;
2085 __le16 names_length;
2086 __u8 names[0];
2087} __packed;
2088
2089struct gb_audio_ctl_elem_info { /* See snd_ctl_elem_info in Linux source */
2090 __u8 type; /* GB_AUDIO_CTL_ELEM_TYPE_* */
2091 __le16 dimen[4];
2092 union {
2093 struct gb_audio_integer integer;
2094 struct gb_audio_integer64 integer64;
2095 struct gb_audio_enumerated enumerated;
2096 } value;
2097} __packed;
2098
2099struct gb_audio_ctl_elem_value { /* See snd_ctl_elem_value in Linux source */
2100 __le64 timestamp; /* XXX needed? */
2101 union {
2102 __le32 integer_value[2]; /* consider CTL_DOUBLE_xxx */
2103 __le64 integer64_value[2];
2104 __le32 enumerated_item[2];
2105 } value;
2106} __packed;
2107
2108struct gb_audio_control {
2109 __u8 name[AUDIO_CONTROL_NAME_MAX];
2110 __u8 id; /* 0-63 */
2111 __u8 iface; /* GB_AUDIO_IFACE_* */
2112 __le16 data_cport;
2113 __le32 access; /* GB_AUDIO_ACCESS_* */
2114 __u8 count; /* count of same elements */
2115 __u8 count_values; /* count of values, max=2 for CTL_DOUBLE_xxx */
2116 struct gb_audio_ctl_elem_info info;
2117} __packed;
2118
2119struct gb_audio_widget {
2120 __u8 name[AUDIO_WIDGET_NAME_MAX];
2121 __u8 sname[AUDIO_WIDGET_NAME_MAX];
2122 __u8 id;
2123 __u8 type; /* GB_AUDIO_WIDGET_TYPE_* */
2124 __u8 state; /* GB_AUDIO_WIDGET_STATE_* */
2125 __u8 ncontrols;
2126 struct gb_audio_control ctl[0]; /* 'ncontrols' entries */
2127} __packed;
2128
2129struct gb_audio_route {
2130 __u8 source_id; /* widget id */
2131 __u8 destination_id; /* widget id */
2132 __u8 control_id; /* 0-63 */
2133 __u8 index; /* Selection within the control */
2134} __packed;
2135
2136struct gb_audio_topology {
2137 __u8 num_dais;
2138 __u8 num_controls;
2139 __u8 num_widgets;
2140 __u8 num_routes;
2141 __le32 size_dais;
2142 __le32 size_controls;
2143 __le32 size_widgets;
2144 __le32 size_routes;
2145 __le32 jack_type;
2146 /*
2147 * struct gb_audio_dai dai[num_dais];
2148 * struct gb_audio_control controls[num_controls];
2149 * struct gb_audio_widget widgets[num_widgets];
2150 * struct gb_audio_route routes[num_routes];
2151 */
2152 __u8 data[0];
2153} __packed;
2154
2155struct gb_audio_get_topology_size_response {
2156 __le16 size;
2157} __packed;
2158
2159struct gb_audio_get_topology_response {
2160 struct gb_audio_topology topology;
2161} __packed;
2162
2163struct gb_audio_get_control_request {
2164 __u8 control_id;
2165 __u8 index;
2166} __packed;
2167
2168struct gb_audio_get_control_response {
2169 struct gb_audio_ctl_elem_value value;
2170} __packed;
2171
2172struct gb_audio_set_control_request {
2173 __u8 control_id;
2174 __u8 index;
2175 struct gb_audio_ctl_elem_value value;
2176} __packed;
2177
2178struct gb_audio_enable_widget_request {
2179 __u8 widget_id;
2180} __packed;
2181
2182struct gb_audio_disable_widget_request {
2183 __u8 widget_id;
2184} __packed;
2185
2186struct gb_audio_get_pcm_request {
2187 __le16 data_cport;
2188} __packed;
2189
2190struct gb_audio_get_pcm_response {
2191 __le32 format;
2192 __le32 rate;
2193 __u8 channels;
2194 __u8 sig_bits;
2195} __packed;
2196
2197struct gb_audio_set_pcm_request {
2198 __le16 data_cport;
2199 __le32 format;
2200 __le32 rate;
2201 __u8 channels;
2202 __u8 sig_bits;
2203} __packed;
2204
2205struct gb_audio_set_tx_data_size_request {
2206 __le16 data_cport;
2207 __le16 size;
2208} __packed;
2209
2210struct gb_audio_activate_tx_request {
2211 __le16 data_cport;
2212} __packed;
2213
2214struct gb_audio_deactivate_tx_request {
2215 __le16 data_cport;
2216} __packed;
2217
2218struct gb_audio_set_rx_data_size_request {
2219 __le16 data_cport;
2220 __le16 size;
2221} __packed;
2222
2223struct gb_audio_activate_rx_request {
2224 __le16 data_cport;
2225} __packed;
2226
2227struct gb_audio_deactivate_rx_request {
2228 __le16 data_cport;
2229} __packed;
2230
2231struct gb_audio_jack_event_request {
2232 __u8 widget_id;
2233 __u8 jack_attribute;
2234 __u8 event;
2235} __packed;
2236
2237struct gb_audio_button_event_request {
2238 __u8 widget_id;
2239 __u8 button_id;
2240 __u8 event;
2241} __packed;
2242
2243struct gb_audio_streaming_event_request {
2244 __le16 data_cport;
2245 __u8 event;
2246} __packed;
2247
2248struct gb_audio_send_data_request {
2249 __le64 timestamp;
2250 __u8 data[0];
2251} __packed;
2252
2253
2254/* Log */
2255
2256/* operations */
2257#define GB_LOG_TYPE_SEND_LOG 0x02
2258
2259/* length */
2260#define GB_LOG_MAX_LEN 1024
2261
2262struct gb_log_send_log_request {
2263 __le16 len;
2264 __u8 msg[0];
2265} __packed;
2266
2267#endif /* __GREYBUS_PROTOCOLS_H */
2268
diff --git a/drivers/staging/greybus/greybus_trace.h b/drivers/staging/greybus/greybus_trace.h
new file mode 100644
index 000000000000..6f8692da9ec8
--- /dev/null
+++ b/drivers/staging/greybus/greybus_trace.h
@@ -0,0 +1,531 @@
1/*
2 * Greybus driver and device API
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9#undef TRACE_SYSTEM
10#define TRACE_SYSTEM greybus
11
12#if !defined(_TRACE_GREYBUS_H) || defined(TRACE_HEADER_MULTI_READ)
13#define _TRACE_GREYBUS_H
14
15#include <linux/tracepoint.h>
16
17struct gb_message;
18struct gb_operation;
19struct gb_connection;
20struct gb_bundle;
21struct gb_host_device;
22
23DECLARE_EVENT_CLASS(gb_message,
24
25 TP_PROTO(struct gb_message *message),
26
27 TP_ARGS(message),
28
29 TP_STRUCT__entry(
30 __field(u16, size)
31 __field(u16, operation_id)
32 __field(u8, type)
33 __field(u8, result)
34 ),
35
36 TP_fast_assign(
37 __entry->size = le16_to_cpu(message->header->size);
38 __entry->operation_id =
39 le16_to_cpu(message->header->operation_id);
40 __entry->type = message->header->type;
41 __entry->result = message->header->result;
42 ),
43
44 TP_printk("size=%hu operation_id=0x%04x type=0x%02x result=0x%02x",
45 __entry->size, __entry->operation_id,
46 __entry->type, __entry->result)
47);
48
49#define DEFINE_MESSAGE_EVENT(name) \
50 DEFINE_EVENT(gb_message, name, \
51 TP_PROTO(struct gb_message *message), \
52 TP_ARGS(message))
53
54/*
55 * Occurs immediately before calling a host device's message_send()
56 * method.
57 */
58DEFINE_MESSAGE_EVENT(gb_message_send);
59
60/*
61 * Occurs after an incoming request message has been received
62 */
63DEFINE_MESSAGE_EVENT(gb_message_recv_request);
64
65/*
66 * Occurs after an incoming response message has been received,
67 * after its matching request has been found.
68 */
69DEFINE_MESSAGE_EVENT(gb_message_recv_response);
70
71/*
72 * Occurs after an operation has been canceled, possibly before the
73 * cancellation is complete.
74 */
75DEFINE_MESSAGE_EVENT(gb_message_cancel_outgoing);
76
77/*
78 * Occurs when an incoming request is cancelled; if the response has
79 * been queued for sending, this occurs after it is sent.
80 */
81DEFINE_MESSAGE_EVENT(gb_message_cancel_incoming);
82
83/*
84 * Occurs in the host driver message_send() function just prior to
85 * handing off the data to be processed by hardware.
86 */
87DEFINE_MESSAGE_EVENT(gb_message_submit);
88
89#undef DEFINE_MESSAGE_EVENT
90
91DECLARE_EVENT_CLASS(gb_operation,
92
93 TP_PROTO(struct gb_operation *operation),
94
95 TP_ARGS(operation),
96
97 TP_STRUCT__entry(
98 __field(u16, cport_id) /* CPort of HD side of connection */
99 __field(u16, id) /* Operation ID */
100 __field(u8, type)
101 __field(unsigned long, flags)
102 __field(int, active)
103 __field(int, waiters)
104 __field(int, errno)
105 ),
106
107 TP_fast_assign(
108 __entry->cport_id = operation->connection->hd_cport_id;
109 __entry->id = operation->id;
110 __entry->type = operation->type;
111 __entry->flags = operation->flags;
112 __entry->active = operation->active;
113 __entry->waiters = atomic_read(&operation->waiters);
114 __entry->errno = operation->errno;
115 ),
116
117 TP_printk("id=%04x type=0x%02x cport_id=%04x flags=0x%lx active=%d waiters=%d errno=%d",
118 __entry->id, __entry->cport_id, __entry->type, __entry->flags,
119 __entry->active, __entry->waiters, __entry->errno)
120);
121
122#define DEFINE_OPERATION_EVENT(name) \
123 DEFINE_EVENT(gb_operation, name, \
124 TP_PROTO(struct gb_operation *operation), \
125 TP_ARGS(operation))
126
127/*
128 * Occurs after a new operation is created for an outgoing request
129 * has been successfully created.
130 */
131DEFINE_OPERATION_EVENT(gb_operation_create);
132
133/*
134 * Occurs after a new core operation has been created.
135 */
136DEFINE_OPERATION_EVENT(gb_operation_create_core);
137
138/*
139 * Occurs after a new operation has been created for an incoming
140 * request has been successfully created and initialized.
141 */
142DEFINE_OPERATION_EVENT(gb_operation_create_incoming);
143
144/*
145 * Occurs when the last reference to an operation has been dropped,
146 * prior to freeing resources.
147 */
148DEFINE_OPERATION_EVENT(gb_operation_destroy);
149
150/*
151 * Occurs when an operation has been marked active, after updating
152 * its active count.
153 */
154DEFINE_OPERATION_EVENT(gb_operation_get_active);
155
156/*
157 * Occurs when an operation has been marked active, before updating
158 * its active count.
159 */
160DEFINE_OPERATION_EVENT(gb_operation_put_active);
161
162#undef DEFINE_OPERATION_EVENT
163
164DECLARE_EVENT_CLASS(gb_connection,
165
166 TP_PROTO(struct gb_connection *connection),
167
168 TP_ARGS(connection),
169
170 TP_STRUCT__entry(
171 __field(int, hd_bus_id)
172 __field(u8, bundle_id)
173 /* name contains "hd_cport_id/intf_id:cport_id" */
174 __dynamic_array(char, name, sizeof(connection->name))
175 __field(enum gb_connection_state, state)
176 __field(unsigned long, flags)
177 ),
178
179 TP_fast_assign(
180 __entry->hd_bus_id = connection->hd->bus_id;
181 __entry->bundle_id = connection->bundle ?
182 connection->bundle->id : BUNDLE_ID_NONE;
183 memcpy(__get_str(name), connection->name,
184 sizeof(connection->name));
185 __entry->state = connection->state;
186 __entry->flags = connection->flags;
187 ),
188
189 TP_printk("hd_bus_id=%d bundle_id=0x%02x name=\"%s\" state=%u flags=0x%lx",
190 __entry->hd_bus_id, __entry->bundle_id, __get_str(name),
191 (unsigned int)__entry->state, __entry->flags)
192);
193
194#define DEFINE_CONNECTION_EVENT(name) \
195 DEFINE_EVENT(gb_connection, name, \
196 TP_PROTO(struct gb_connection *connection), \
197 TP_ARGS(connection))
198
199/*
200 * Occurs after a new connection is successfully created.
201 */
202DEFINE_CONNECTION_EVENT(gb_connection_create);
203
204/*
205 * Occurs when the last reference to a connection has been dropped,
206 * before its resources are freed.
207 */
208DEFINE_CONNECTION_EVENT(gb_connection_release);
209
210/*
211 * Occurs when a new reference to connection is added, currently
212 * only when a message over the connection is received.
213 */
214DEFINE_CONNECTION_EVENT(gb_connection_get);
215
216/*
217 * Occurs when a new reference to connection is dropped, after a
218 * a received message is handled, or when the connection is
219 * destroyed.
220 */
221DEFINE_CONNECTION_EVENT(gb_connection_put);
222
223/*
224 * Occurs when a request to enable a connection is made, either for
225 * transmit only, or for both transmit and receive.
226 */
227DEFINE_CONNECTION_EVENT(gb_connection_enable);
228
229/*
230 * Occurs when a request to disable a connection is made, either for
231 * receive only, or for both transmit and receive. Also occurs when
232 * a request to forcefully disable a connection is made.
233 */
234DEFINE_CONNECTION_EVENT(gb_connection_disable);
235
236#undef DEFINE_CONNECTION_EVENT
237
238DECLARE_EVENT_CLASS(gb_bundle,
239
240 TP_PROTO(struct gb_bundle *bundle),
241
242 TP_ARGS(bundle),
243
244 TP_STRUCT__entry(
245 __field(u8, intf_id)
246 __field(u8, id)
247 __field(u8, class)
248 __field(size_t, num_cports)
249 ),
250
251 TP_fast_assign(
252 __entry->intf_id = bundle->intf->interface_id;
253 __entry->id = bundle->id;
254 __entry->class = bundle->class;
255 __entry->num_cports = bundle->num_cports;
256 ),
257
258 TP_printk("intf_id=0x%02x id=%02x class=0x%02x num_cports=%zu",
259 __entry->intf_id, __entry->id, __entry->class,
260 __entry->num_cports)
261);
262
263#define DEFINE_BUNDLE_EVENT(name) \
264 DEFINE_EVENT(gb_bundle, name, \
265 TP_PROTO(struct gb_bundle *bundle), \
266 TP_ARGS(bundle))
267
268/*
269 * Occurs after a new bundle is successfully created.
270 */
271DEFINE_BUNDLE_EVENT(gb_bundle_create);
272
273/*
274 * Occurs when the last reference to a bundle has been dropped,
275 * before its resources are freed.
276 */
277DEFINE_BUNDLE_EVENT(gb_bundle_release);
278
279/*
280 * Occurs when a bundle is added to an interface when the interface
281 * is enabled.
282 */
283DEFINE_BUNDLE_EVENT(gb_bundle_add);
284
285/*
286 * Occurs when a registered bundle gets destroyed, normally at the
287 * time an interface is disabled.
288 */
289DEFINE_BUNDLE_EVENT(gb_bundle_destroy);
290
291#undef DEFINE_BUNDLE_EVENT
292
293DECLARE_EVENT_CLASS(gb_interface,
294
295 TP_PROTO(struct gb_interface *intf),
296
297 TP_ARGS(intf),
298
299 TP_STRUCT__entry(
300 __field(u8, module_id)
301 __field(u8, id) /* Interface id */
302 __field(u8, device_id)
303 __field(int, disconnected) /* bool */
304 __field(int, ejected) /* bool */
305 __field(int, active) /* bool */
306 __field(int, enabled) /* bool */
307 __field(int, mode_switch) /* bool */
308 ),
309
310 TP_fast_assign(
311 __entry->module_id = intf->module->module_id;
312 __entry->id = intf->interface_id;
313 __entry->device_id = intf->device_id;
314 __entry->disconnected = intf->disconnected;
315 __entry->ejected = intf->ejected;
316 __entry->active = intf->active;
317 __entry->enabled = intf->enabled;
318 __entry->mode_switch = intf->mode_switch;
319 ),
320
321 TP_printk("intf_id=%hhu device_id=%hhu module_id=%hhu D=%d J=%d A=%d E=%d M=%d",
322 __entry->id, __entry->device_id, __entry->module_id,
323 __entry->disconnected, __entry->ejected, __entry->active,
324 __entry->enabled, __entry->mode_switch)
325);
326
327#define DEFINE_INTERFACE_EVENT(name) \
328 DEFINE_EVENT(gb_interface, name, \
329 TP_PROTO(struct gb_interface *intf), \
330 TP_ARGS(intf))
331
332/*
333 * Occurs after a new interface is successfully created.
334 */
335DEFINE_INTERFACE_EVENT(gb_interface_create);
336
337/*
338 * Occurs after the last reference to an interface has been dropped.
339 */
340DEFINE_INTERFACE_EVENT(gb_interface_release);
341
342/*
343 * Occurs after an interface been registerd.
344 */
345DEFINE_INTERFACE_EVENT(gb_interface_add);
346
347/*
348 * Occurs when a registered interface gets deregisterd.
349 */
350DEFINE_INTERFACE_EVENT(gb_interface_del);
351
352/*
353 * Occurs when a registered interface has been successfully
354 * activated.
355 */
356DEFINE_INTERFACE_EVENT(gb_interface_activate);
357
358/*
359 * Occurs when an activated interface is being deactivated.
360 */
361DEFINE_INTERFACE_EVENT(gb_interface_deactivate);
362
363/*
364 * Occurs when an interface has been successfully enabled.
365 */
366DEFINE_INTERFACE_EVENT(gb_interface_enable);
367
368/*
369 * Occurs when an enabled interface is being disabled.
370 */
371DEFINE_INTERFACE_EVENT(gb_interface_disable);
372
373#undef DEFINE_INTERFACE_EVENT
374
375DECLARE_EVENT_CLASS(gb_module,
376
377 TP_PROTO(struct gb_module *module),
378
379 TP_ARGS(module),
380
381 TP_STRUCT__entry(
382 __field(int, hd_bus_id)
383 __field(u8, module_id)
384 __field(size_t, num_interfaces)
385 __field(int, disconnected) /* bool */
386 ),
387
388 TP_fast_assign(
389 __entry->hd_bus_id = module->hd->bus_id;
390 __entry->module_id = module->module_id;
391 __entry->num_interfaces = module->num_interfaces;
392 __entry->disconnected = module->disconnected;
393 ),
394
395 TP_printk("hd_bus_id=%d module_id=%hhu num_interfaces=%zu disconnected=%d",
396 __entry->hd_bus_id, __entry->module_id,
397 __entry->num_interfaces, __entry->disconnected)
398);
399
400#define DEFINE_MODULE_EVENT(name) \
401 DEFINE_EVENT(gb_module, name, \
402 TP_PROTO(struct gb_module *module), \
403 TP_ARGS(module))
404
405/*
406 * Occurs after a new module is successfully created, before
407 * creating any of its interfaces.
408 */
409DEFINE_MODULE_EVENT(gb_module_create);
410
411/*
412 * Occurs after the last reference to a module has been dropped.
413 */
414DEFINE_MODULE_EVENT(gb_module_release);
415
416/*
417 * Occurs after a module is successfully created, before registering
418 * any of its interfaces.
419 */
420DEFINE_MODULE_EVENT(gb_module_add);
421
422/*
423 * Occurs when a module is deleted, before deregistering its
424 * interfaces.
425 */
426DEFINE_MODULE_EVENT(gb_module_del);
427
428#undef DEFINE_MODULE_EVENT
429
430DECLARE_EVENT_CLASS(gb_host_device,
431
432 TP_PROTO(struct gb_host_device *hd),
433
434 TP_ARGS(hd),
435
436 TP_STRUCT__entry(
437 __field(int, bus_id)
438 __field(size_t, num_cports)
439 __field(size_t, buffer_size_max)
440 ),
441
442 TP_fast_assign(
443 __entry->bus_id = hd->bus_id;
444 __entry->num_cports = hd->num_cports;
445 __entry->buffer_size_max = hd->buffer_size_max;
446 ),
447
448 TP_printk("bus_id=%d num_cports=%zu mtu=%zu",
449 __entry->bus_id, __entry->num_cports,
450 __entry->buffer_size_max)
451);
452
453#define DEFINE_HD_EVENT(name) \
454 DEFINE_EVENT(gb_host_device, name, \
455 TP_PROTO(struct gb_host_device *hd), \
456 TP_ARGS(hd))
457
458/*
459 * Occurs after a new host device is successfully created, before
460 * its SVC has been set up.
461 */
462DEFINE_HD_EVENT(gb_hd_create);
463
464/*
465 * Occurs after the last reference to a host device has been
466 * dropped.
467 */
468DEFINE_HD_EVENT(gb_hd_release);
469
470/*
471 * Occurs after a new host device has been added, after the
472 * connection to its SVC has been enabled.
473 */
474DEFINE_HD_EVENT(gb_hd_add);
475
476/*
477 * Occurs when a host device is being disconnected from the AP USB
478 * host controller.
479 */
480DEFINE_HD_EVENT(gb_hd_del);
481
482/*
483 * Occurs when a host device has passed received data to the Greybus
484 * core, after it has been determined it is destined for a valid
485 * CPort.
486 */
487DEFINE_HD_EVENT(gb_hd_in);
488
489#undef DEFINE_HD_EVENT
490
491/*
492 * Occurs on a TimeSync synchronization event or a TimeSync ping event.
493 */
494TRACE_EVENT(gb_timesync_irq,
495
496 TP_PROTO(u8 ping, u8 strobe, u8 count, u64 frame_time),
497
498 TP_ARGS(ping, strobe, count, frame_time),
499
500 TP_STRUCT__entry(
501 __field(u8, ping)
502 __field(u8, strobe)
503 __field(u8, count)
504 __field(u64, frame_time)
505 ),
506
507 TP_fast_assign(
508 __entry->ping = ping;
509 __entry->strobe = strobe;
510 __entry->count = count;
511 __entry->frame_time = frame_time;
512 ),
513
514 TP_printk("%s %d/%d frame-time %llu\n",
515 __entry->ping ? "ping" : "strobe", __entry->strobe,
516 __entry->count, __entry->frame_time)
517);
518
519#endif /* _TRACE_GREYBUS_H */
520
521/* This part must be outside protection */
522#undef TRACE_INCLUDE_PATH
523#define TRACE_INCLUDE_PATH .
524
525/*
526 * TRACE_INCLUDE_FILE is not needed if the filename and TRACE_SYSTEM are equal
527 */
528#undef TRACE_INCLUDE_FILE
529#define TRACE_INCLUDE_FILE greybus_trace
530#include <trace/define_trace.h>
531
diff --git a/drivers/staging/greybus/hd.c b/drivers/staging/greybus/hd.c
new file mode 100644
index 000000000000..185ae3fa10fd
--- /dev/null
+++ b/drivers/staging/greybus/hd.c
@@ -0,0 +1,257 @@
1/*
2 * Greybus Host Device
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12
13#include "greybus.h"
14#include "greybus_trace.h"
15
16EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_create);
17EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_release);
18EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_add);
19EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_del);
20EXPORT_TRACEPOINT_SYMBOL_GPL(gb_hd_in);
21EXPORT_TRACEPOINT_SYMBOL_GPL(gb_message_submit);
22
23static struct ida gb_hd_bus_id_map;
24
25int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
26 bool async)
27{
28 if (!hd || !hd->driver || !hd->driver->output)
29 return -EINVAL;
30 return hd->driver->output(hd, req, size, cmd, async);
31}
32EXPORT_SYMBOL_GPL(gb_hd_output);
33
34static ssize_t bus_id_show(struct device *dev,
35 struct device_attribute *attr, char *buf)
36{
37 struct gb_host_device *hd = to_gb_host_device(dev);
38
39 return sprintf(buf, "%d\n", hd->bus_id);
40}
41static DEVICE_ATTR_RO(bus_id);
42
43static struct attribute *bus_attrs[] = {
44 &dev_attr_bus_id.attr,
45 NULL
46};
47ATTRIBUTE_GROUPS(bus);
48
49int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
50{
51 struct ida *id_map = &hd->cport_id_map;
52 int ret;
53
54 ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
55 if (ret < 0) {
56 dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
57 return ret;
58 }
59
60 return 0;
61}
62EXPORT_SYMBOL_GPL(gb_hd_cport_reserve);
63
64void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
65{
66 struct ida *id_map = &hd->cport_id_map;
67
68 ida_simple_remove(id_map, cport_id);
69}
70EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
71
72/* Locking: Caller guarantees serialisation */
73int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
74 unsigned long flags)
75{
76 struct ida *id_map = &hd->cport_id_map;
77 int ida_start, ida_end;
78
79 if (hd->driver->cport_allocate)
80 return hd->driver->cport_allocate(hd, cport_id, flags);
81
82 if (cport_id < 0) {
83 ida_start = 0;
84 ida_end = hd->num_cports;
85 } else if (cport_id < hd->num_cports) {
86 ida_start = cport_id;
87 ida_end = cport_id + 1;
88 } else {
89 dev_err(&hd->dev, "cport %d not available\n", cport_id);
90 return -EINVAL;
91 }
92
93 return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
94}
95
96/* Locking: Caller guarantees serialisation */
97void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
98{
99 if (hd->driver->cport_release) {
100 hd->driver->cport_release(hd, cport_id);
101 return;
102 }
103
104 ida_simple_remove(&hd->cport_id_map, cport_id);
105}
106
107static void gb_hd_release(struct device *dev)
108{
109 struct gb_host_device *hd = to_gb_host_device(dev);
110
111 trace_gb_hd_release(hd);
112
113 if (hd->svc)
114 gb_svc_put(hd->svc);
115 ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
116 ida_destroy(&hd->cport_id_map);
117 kfree(hd);
118}
119
120struct device_type greybus_hd_type = {
121 .name = "greybus_host_device",
122 .release = gb_hd_release,
123};
124
125struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
126 struct device *parent,
127 size_t buffer_size_max,
128 size_t num_cports)
129{
130 struct gb_host_device *hd;
131 int ret;
132
133 /*
134 * Validate that the driver implements all of the callbacks
135 * so that we don't have to every time we make them.
136 */
137 if ((!driver->message_send) || (!driver->message_cancel)) {
138 dev_err(parent, "mandatory hd-callbacks missing\n");
139 return ERR_PTR(-EINVAL);
140 }
141
142 if (buffer_size_max < GB_OPERATION_MESSAGE_SIZE_MIN) {
143 dev_err(parent, "greybus host-device buffers too small\n");
144 return ERR_PTR(-EINVAL);
145 }
146
147 if (num_cports == 0 || num_cports > CPORT_ID_MAX + 1) {
148 dev_err(parent, "Invalid number of CPorts: %zu\n", num_cports);
149 return ERR_PTR(-EINVAL);
150 }
151
152 /*
153 * Make sure to never allocate messages larger than what the Greybus
154 * protocol supports.
155 */
156 if (buffer_size_max > GB_OPERATION_MESSAGE_SIZE_MAX) {
157 dev_warn(parent, "limiting buffer size to %u\n",
158 GB_OPERATION_MESSAGE_SIZE_MAX);
159 buffer_size_max = GB_OPERATION_MESSAGE_SIZE_MAX;
160 }
161
162 hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL);
163 if (!hd)
164 return ERR_PTR(-ENOMEM);
165
166 ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
167 if (ret < 0) {
168 kfree(hd);
169 return ERR_PTR(ret);
170 }
171 hd->bus_id = ret;
172
173 hd->driver = driver;
174 INIT_LIST_HEAD(&hd->modules);
175 INIT_LIST_HEAD(&hd->connections);
176 ida_init(&hd->cport_id_map);
177 hd->buffer_size_max = buffer_size_max;
178 hd->num_cports = num_cports;
179
180 hd->dev.parent = parent;
181 hd->dev.bus = &greybus_bus_type;
182 hd->dev.type = &greybus_hd_type;
183 hd->dev.groups = bus_groups;
184 hd->dev.dma_mask = hd->dev.parent->dma_mask;
185 device_initialize(&hd->dev);
186 dev_set_name(&hd->dev, "greybus%d", hd->bus_id);
187
188 trace_gb_hd_create(hd);
189
190 hd->svc = gb_svc_create(hd);
191 if (!hd->svc) {
192 dev_err(&hd->dev, "failed to create svc\n");
193 put_device(&hd->dev);
194 return ERR_PTR(-ENOMEM);
195 }
196
197 return hd;
198}
199EXPORT_SYMBOL_GPL(gb_hd_create);
200
201int gb_hd_add(struct gb_host_device *hd)
202{
203 int ret;
204
205 ret = device_add(&hd->dev);
206 if (ret)
207 return ret;
208
209 ret = gb_svc_add(hd->svc);
210 if (ret) {
211 device_del(&hd->dev);
212 return ret;
213 }
214
215 trace_gb_hd_add(hd);
216
217 return 0;
218}
219EXPORT_SYMBOL_GPL(gb_hd_add);
220
221void gb_hd_del(struct gb_host_device *hd)
222{
223 trace_gb_hd_del(hd);
224
225 /*
226 * Tear down the svc and flush any on-going hotplug processing before
227 * removing the remaining interfaces.
228 */
229 gb_svc_del(hd->svc);
230
231 device_del(&hd->dev);
232}
233EXPORT_SYMBOL_GPL(gb_hd_del);
234
235void gb_hd_shutdown(struct gb_host_device *hd)
236{
237 gb_svc_del(hd->svc);
238}
239EXPORT_SYMBOL_GPL(gb_hd_shutdown);
240
241void gb_hd_put(struct gb_host_device *hd)
242{
243 put_device(&hd->dev);
244}
245EXPORT_SYMBOL_GPL(gb_hd_put);
246
247int __init gb_hd_init(void)
248{
249 ida_init(&gb_hd_bus_id_map);
250
251 return 0;
252}
253
254void gb_hd_exit(void)
255{
256 ida_destroy(&gb_hd_bus_id_map);
257}
diff --git a/drivers/staging/greybus/hd.h b/drivers/staging/greybus/hd.h
new file mode 100644
index 000000000000..c4250cfe595f
--- /dev/null
+++ b/drivers/staging/greybus/hd.h
@@ -0,0 +1,90 @@
1/*
2 * Greybus Host Device
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __HD_H
11#define __HD_H
12
13struct gb_host_device;
14struct gb_message;
15
16struct gb_hd_driver {
17 size_t hd_priv_size;
18
19 int (*cport_allocate)(struct gb_host_device *hd, int cport_id,
20 unsigned long flags);
21 void (*cport_release)(struct gb_host_device *hd, u16 cport_id);
22 int (*cport_enable)(struct gb_host_device *hd, u16 cport_id,
23 unsigned long flags);
24 int (*cport_disable)(struct gb_host_device *hd, u16 cport_id);
25 int (*cport_connected)(struct gb_host_device *hd, u16 cport_id);
26 int (*cport_flush)(struct gb_host_device *hd, u16 cport_id);
27 int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id,
28 u8 phase, unsigned int timeout);
29 int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id,
30 size_t peer_space, unsigned int timeout);
31 int (*cport_clear)(struct gb_host_device *hd, u16 cport_id);
32
33 int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id,
34 struct gb_message *message, gfp_t gfp_mask);
35 void (*message_cancel)(struct gb_message *message);
36 int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id);
37 int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id);
38 int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
39 bool async);
40 int (*timesync_enable)(struct gb_host_device *hd, u8 count,
41 u64 frame_time, u32 strobe_delay, u32 refclk);
42 int (*timesync_disable)(struct gb_host_device *hd);
43 int (*timesync_authoritative)(struct gb_host_device *hd,
44 u64 *frame_time);
45 int (*timesync_get_last_event)(struct gb_host_device *hd,
46 u64 *frame_time);
47};
48
49struct gb_host_device {
50 struct device dev;
51 int bus_id;
52 const struct gb_hd_driver *driver;
53
54 struct list_head modules;
55 struct list_head connections;
56 struct ida cport_id_map;
57
58 /* Number of CPorts supported by the UniPro IP */
59 size_t num_cports;
60
61 /* Host device buffer constraints */
62 size_t buffer_size_max;
63
64 struct gb_svc *svc;
65 /* Private data for the host driver */
66 unsigned long hd_priv[0] __aligned(sizeof(s64));
67};
68#define to_gb_host_device(d) container_of(d, struct gb_host_device, dev)
69
70int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id);
71void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id);
72int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
73 unsigned long flags);
74void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id);
75
76struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
77 struct device *parent,
78 size_t buffer_size_max,
79 size_t num_cports);
80int gb_hd_add(struct gb_host_device *hd);
81void gb_hd_del(struct gb_host_device *hd);
82void gb_hd_shutdown(struct gb_host_device *hd);
83void gb_hd_put(struct gb_host_device *hd);
84int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
85 bool in_irq);
86
87int gb_hd_init(void);
88void gb_hd_exit(void);
89
90#endif /* __HD_H */
diff --git a/drivers/staging/greybus/hid.c b/drivers/staging/greybus/hid.c
new file mode 100644
index 000000000000..730d746fc4c2
--- /dev/null
+++ b/drivers/staging/greybus/hid.c
@@ -0,0 +1,536 @@
1/*
2 * HID class driver for the Greybus.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/bitops.h>
11#include <linux/hid.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/slab.h>
16
17#include "greybus.h"
18
19/* Greybus HID device's structure */
20struct gb_hid {
21 struct gb_bundle *bundle;
22 struct gb_connection *connection;
23
24 struct hid_device *hid;
25 struct gb_hid_desc_response hdesc;
26
27 unsigned long flags;
28#define GB_HID_STARTED 0x01
29#define GB_HID_READ_PENDING 0x04
30
31 unsigned int bufsize;
32 char *inbuf;
33};
34
35static DEFINE_MUTEX(gb_hid_open_mutex);
36
37/* Routines to get controller's information over greybus */
38
39/* Operations performed on greybus */
40static int gb_hid_get_desc(struct gb_hid *ghid)
41{
42 return gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_DESC, NULL,
43 0, &ghid->hdesc, sizeof(ghid->hdesc));
44}
45
46static int gb_hid_get_report_desc(struct gb_hid *ghid, char *rdesc)
47{
48 int ret;
49
50 ret = gb_pm_runtime_get_sync(ghid->bundle);
51 if (ret)
52 return ret;
53
54 ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT_DESC,
55 NULL, 0, rdesc,
56 le16_to_cpu(ghid->hdesc.wReportDescLength));
57
58 gb_pm_runtime_put_autosuspend(ghid->bundle);
59
60 return ret;
61}
62
63static int gb_hid_set_power(struct gb_hid *ghid, int type)
64{
65 int ret;
66
67 ret = gb_pm_runtime_get_sync(ghid->bundle);
68 if (ret)
69 return ret;
70
71 ret = gb_operation_sync(ghid->connection, type, NULL, 0, NULL, 0);
72
73 gb_pm_runtime_put_autosuspend(ghid->bundle);
74
75 return ret;
76}
77
78static int gb_hid_get_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
79 unsigned char *buf, int len)
80{
81 struct gb_hid_get_report_request request;
82 int ret;
83
84 ret = gb_pm_runtime_get_sync(ghid->bundle);
85 if (ret)
86 return ret;
87
88 request.report_type = report_type;
89 request.report_id = report_id;
90
91 ret = gb_operation_sync(ghid->connection, GB_HID_TYPE_GET_REPORT,
92 &request, sizeof(request), buf, len);
93
94 gb_pm_runtime_put_autosuspend(ghid->bundle);
95
96 return ret;
97}
98
99static int gb_hid_set_report(struct gb_hid *ghid, u8 report_type, u8 report_id,
100 unsigned char *buf, int len)
101{
102 struct gb_hid_set_report_request *request;
103 struct gb_operation *operation;
104 int ret, size = sizeof(*request) + len - 1;
105
106 ret = gb_pm_runtime_get_sync(ghid->bundle);
107 if (ret)
108 return ret;
109
110 operation = gb_operation_create(ghid->connection,
111 GB_HID_TYPE_SET_REPORT, size, 0,
112 GFP_KERNEL);
113 if (!operation) {
114 gb_pm_runtime_put_autosuspend(ghid->bundle);
115 return -ENOMEM;
116 }
117
118 request = operation->request->payload;
119 request->report_type = report_type;
120 request->report_id = report_id;
121 memcpy(request->report, buf, len);
122
123 ret = gb_operation_request_send_sync(operation);
124 if (ret) {
125 dev_err(&operation->connection->bundle->dev,
126 "failed to set report: %d\n", ret);
127 } else {
128 ret = len;
129 }
130
131 gb_operation_put(operation);
132 gb_pm_runtime_put_autosuspend(ghid->bundle);
133
134 return ret;
135}
136
137static int gb_hid_request_handler(struct gb_operation *op)
138{
139 struct gb_connection *connection = op->connection;
140 struct gb_hid *ghid = gb_connection_get_data(connection);
141 struct gb_hid_input_report_request *request = op->request->payload;
142
143 if (op->type != GB_HID_TYPE_IRQ_EVENT) {
144 dev_err(&connection->bundle->dev,
145 "unsupported unsolicited request\n");
146 return -EINVAL;
147 }
148
149 if (test_bit(GB_HID_STARTED, &ghid->flags))
150 hid_input_report(ghid->hid, HID_INPUT_REPORT,
151 request->report, op->request->payload_size, 1);
152
153 return 0;
154}
155
156static int gb_hid_report_len(struct hid_report *report)
157{
158 return ((report->size - 1) >> 3) + 1 +
159 report->device->report_enum[report->type].numbered;
160}
161
162static void gb_hid_find_max_report(struct hid_device *hid, unsigned int type,
163 unsigned int *max)
164{
165 struct hid_report *report;
166 unsigned int size;
167
168 list_for_each_entry(report, &hid->report_enum[type].report_list, list) {
169 size = gb_hid_report_len(report);
170 if (*max < size)
171 *max = size;
172 }
173}
174
175static void gb_hid_free_buffers(struct gb_hid *ghid)
176{
177 kfree(ghid->inbuf);
178 ghid->inbuf = NULL;
179 ghid->bufsize = 0;
180}
181
182static int gb_hid_alloc_buffers(struct gb_hid *ghid, size_t bufsize)
183{
184 ghid->inbuf = kzalloc(bufsize, GFP_KERNEL);
185 if (!ghid->inbuf)
186 return -ENOMEM;
187
188 ghid->bufsize = bufsize;
189
190 return 0;
191}
192
193/* Routines dealing with reports */
194static void gb_hid_init_report(struct gb_hid *ghid, struct hid_report *report)
195{
196 unsigned int size;
197
198 size = gb_hid_report_len(report);
199 if (gb_hid_get_report(ghid, report->type, report->id, ghid->inbuf,
200 size))
201 return;
202
203 /*
204 * hid->driver_lock is held as we are in probe function,
205 * we just need to setup the input fields, so using
206 * hid_report_raw_event is safe.
207 */
208 hid_report_raw_event(ghid->hid, report->type, ghid->inbuf, size, 1);
209}
210
211static void gb_hid_init_reports(struct gb_hid *ghid)
212{
213 struct hid_device *hid = ghid->hid;
214 struct hid_report *report;
215
216 list_for_each_entry(report,
217 &hid->report_enum[HID_INPUT_REPORT].report_list, list)
218 gb_hid_init_report(ghid, report);
219
220 list_for_each_entry(report,
221 &hid->report_enum[HID_FEATURE_REPORT].report_list, list)
222 gb_hid_init_report(ghid, report);
223}
224
225static int __gb_hid_get_raw_report(struct hid_device *hid,
226 unsigned char report_number, __u8 *buf, size_t count,
227 unsigned char report_type)
228{
229 struct gb_hid *ghid = hid->driver_data;
230 int ret;
231
232 if (report_type == HID_OUTPUT_REPORT)
233 return -EINVAL;
234
235 ret = gb_hid_get_report(ghid, report_type, report_number, buf, count);
236 if (!ret)
237 ret = count;
238
239 return ret;
240}
241
242static int __gb_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
243 size_t len, unsigned char report_type)
244{
245 struct gb_hid *ghid = hid->driver_data;
246 int report_id = buf[0];
247 int ret;
248
249 if (report_type == HID_INPUT_REPORT)
250 return -EINVAL;
251
252 if (report_id) {
253 buf++;
254 len--;
255 }
256
257 ret = gb_hid_set_report(ghid, report_type, report_id, buf, len);
258 if (report_id && ret >= 0)
259 ret++; /* add report_id to the number of transfered bytes */
260
261 return 0;
262}
263
264static int gb_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
265 __u8 *buf, size_t len, unsigned char rtype,
266 int reqtype)
267{
268 switch (reqtype) {
269 case HID_REQ_GET_REPORT:
270 return __gb_hid_get_raw_report(hid, reportnum, buf, len, rtype);
271 case HID_REQ_SET_REPORT:
272 if (buf[0] != reportnum)
273 return -EINVAL;
274 return __gb_hid_output_raw_report(hid, buf, len, rtype);
275 default:
276 return -EIO;
277 }
278}
279
280/* HID Callbacks */
281static int gb_hid_parse(struct hid_device *hid)
282{
283 struct gb_hid *ghid = hid->driver_data;
284 unsigned int rsize;
285 char *rdesc;
286 int ret;
287
288 rsize = le16_to_cpu(ghid->hdesc.wReportDescLength);
289 if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
290 dbg_hid("weird size of report descriptor (%u)\n", rsize);
291 return -EINVAL;
292 }
293
294 rdesc = kzalloc(rsize, GFP_KERNEL);
295 if (!rdesc) {
296 dbg_hid("couldn't allocate rdesc memory\n");
297 return -ENOMEM;
298 }
299
300 ret = gb_hid_get_report_desc(ghid, rdesc);
301 if (ret) {
302 hid_err(hid, "reading report descriptor failed\n");
303 goto free_rdesc;
304 }
305
306 ret = hid_parse_report(hid, rdesc, rsize);
307 if (ret)
308 dbg_hid("parsing report descriptor failed\n");
309
310free_rdesc:
311 kfree(rdesc);
312
313 return ret;
314}
315
316static int gb_hid_start(struct hid_device *hid)
317{
318 struct gb_hid *ghid = hid->driver_data;
319 unsigned int bufsize = HID_MIN_BUFFER_SIZE;
320 int ret;
321
322 gb_hid_find_max_report(hid, HID_INPUT_REPORT, &bufsize);
323 gb_hid_find_max_report(hid, HID_OUTPUT_REPORT, &bufsize);
324 gb_hid_find_max_report(hid, HID_FEATURE_REPORT, &bufsize);
325
326 if (bufsize > HID_MAX_BUFFER_SIZE)
327 bufsize = HID_MAX_BUFFER_SIZE;
328
329 ret = gb_hid_alloc_buffers(ghid, bufsize);
330 if (ret)
331 return ret;
332
333 if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS))
334 gb_hid_init_reports(ghid);
335
336 return 0;
337}
338
339static void gb_hid_stop(struct hid_device *hid)
340{
341 struct gb_hid *ghid = hid->driver_data;
342
343 gb_hid_free_buffers(ghid);
344}
345
346static int gb_hid_open(struct hid_device *hid)
347{
348 struct gb_hid *ghid = hid->driver_data;
349 int ret = 0;
350
351 mutex_lock(&gb_hid_open_mutex);
352 if (!hid->open++) {
353 ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
354 if (ret < 0)
355 hid->open--;
356 else
357 set_bit(GB_HID_STARTED, &ghid->flags);
358 }
359 mutex_unlock(&gb_hid_open_mutex);
360
361 return ret;
362}
363
364static void gb_hid_close(struct hid_device *hid)
365{
366 struct gb_hid *ghid = hid->driver_data;
367 int ret;
368
369 /*
370 * Protecting hid->open to make sure we don't restart data acquistion
371 * due to a resumption we no longer care about..
372 */
373 mutex_lock(&gb_hid_open_mutex);
374 if (!--hid->open) {
375 clear_bit(GB_HID_STARTED, &ghid->flags);
376
377 /* Save some power */
378 ret = gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
379 if (ret)
380 dev_err(&ghid->connection->bundle->dev,
381 "failed to power off (%d)\n", ret);
382 }
383 mutex_unlock(&gb_hid_open_mutex);
384}
385
386static int gb_hid_power(struct hid_device *hid, int lvl)
387{
388 struct gb_hid *ghid = hid->driver_data;
389
390 switch (lvl) {
391 case PM_HINT_FULLON:
392 return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_ON);
393 case PM_HINT_NORMAL:
394 return gb_hid_set_power(ghid, GB_HID_TYPE_PWR_OFF);
395 }
396
397 return 0;
398}
399
400/* HID structure to pass callbacks */
401static struct hid_ll_driver gb_hid_ll_driver = {
402 .parse = gb_hid_parse,
403 .start = gb_hid_start,
404 .stop = gb_hid_stop,
405 .open = gb_hid_open,
406 .close = gb_hid_close,
407 .power = gb_hid_power,
408 .raw_request = gb_hid_raw_request,
409};
410
411static int gb_hid_init(struct gb_hid *ghid)
412{
413 struct hid_device *hid = ghid->hid;
414 int ret;
415
416 ret = gb_hid_get_desc(ghid);
417 if (ret)
418 return ret;
419
420 hid->version = le16_to_cpu(ghid->hdesc.bcdHID);
421 hid->vendor = le16_to_cpu(ghid->hdesc.wVendorID);
422 hid->product = le16_to_cpu(ghid->hdesc.wProductID);
423 hid->country = ghid->hdesc.bCountryCode;
424
425 hid->driver_data = ghid;
426 hid->ll_driver = &gb_hid_ll_driver;
427 hid->dev.parent = &ghid->connection->bundle->dev;
428// hid->bus = BUS_GREYBUS; /* Need a bustype for GREYBUS in <linux/input.h> */
429
430 /* Set HID device's name */
431 snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
432 dev_name(&ghid->connection->bundle->dev),
433 hid->vendor, hid->product);
434
435 return 0;
436}
437
438static int gb_hid_probe(struct gb_bundle *bundle,
439 const struct greybus_bundle_id *id)
440{
441 struct greybus_descriptor_cport *cport_desc;
442 struct gb_connection *connection;
443 struct hid_device *hid;
444 struct gb_hid *ghid;
445 int ret;
446
447 if (bundle->num_cports != 1)
448 return -ENODEV;
449
450 cport_desc = &bundle->cport_desc[0];
451 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_HID)
452 return -ENODEV;
453
454 ghid = kzalloc(sizeof(*ghid), GFP_KERNEL);
455 if (!ghid)
456 return -ENOMEM;
457
458 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
459 gb_hid_request_handler);
460 if (IS_ERR(connection)) {
461 ret = PTR_ERR(connection);
462 goto err_free_ghid;
463 }
464
465 gb_connection_set_data(connection, ghid);
466 ghid->connection = connection;
467
468 hid = hid_allocate_device();
469 if (IS_ERR(hid)) {
470 ret = PTR_ERR(hid);
471 goto err_connection_destroy;
472 }
473
474 ghid->hid = hid;
475 ghid->bundle = bundle;
476
477 greybus_set_drvdata(bundle, ghid);
478
479 ret = gb_connection_enable(connection);
480 if (ret)
481 goto err_destroy_hid;
482
483 ret = gb_hid_init(ghid);
484 if (ret)
485 goto err_connection_disable;
486
487 ret = hid_add_device(hid);
488 if (ret) {
489 hid_err(hid, "can't add hid device: %d\n", ret);
490 goto err_connection_disable;
491 }
492
493 gb_pm_runtime_put_autosuspend(bundle);
494
495 return 0;
496
497err_connection_disable:
498 gb_connection_disable(connection);
499err_destroy_hid:
500 hid_destroy_device(hid);
501err_connection_destroy:
502 gb_connection_destroy(connection);
503err_free_ghid:
504 kfree(ghid);
505
506 return ret;
507}
508
509static void gb_hid_disconnect(struct gb_bundle *bundle)
510{
511 struct gb_hid *ghid = greybus_get_drvdata(bundle);
512
513 if (gb_pm_runtime_get_sync(bundle))
514 gb_pm_runtime_get_noresume(bundle);
515
516 hid_destroy_device(ghid->hid);
517 gb_connection_disable(ghid->connection);
518 gb_connection_destroy(ghid->connection);
519 kfree(ghid);
520}
521
522static const struct greybus_bundle_id gb_hid_id_table[] = {
523 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_HID) },
524 { }
525};
526MODULE_DEVICE_TABLE(greybus, gb_hid_id_table);
527
528static struct greybus_driver gb_hid_driver = {
529 .name = "hid",
530 .probe = gb_hid_probe,
531 .disconnect = gb_hid_disconnect,
532 .id_table = gb_hid_id_table,
533};
534module_greybus_driver(gb_hid_driver);
535
536MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/i2c.c b/drivers/staging/greybus/i2c.c
new file mode 100644
index 000000000000..c2a50087000c
--- /dev/null
+++ b/drivers/staging/greybus/i2c.c
@@ -0,0 +1,343 @@
1/*
2 * I2C bridge driver for the Greybus "generic" I2C module.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/i2c.h>
14
15#include "greybus.h"
16#include "gbphy.h"
17
18struct gb_i2c_device {
19 struct gb_connection *connection;
20 struct gbphy_device *gbphy_dev;
21
22 u32 functionality;
23
24 struct i2c_adapter adapter;
25};
26
27/*
28 * Map Greybus i2c functionality bits into Linux ones
29 */
30static u32 gb_i2c_functionality_map(u32 gb_i2c_functionality)
31{
32 return gb_i2c_functionality; /* All bits the same for now */
33}
34
35static int gb_i2c_functionality_operation(struct gb_i2c_device *gb_i2c_dev)
36{
37 struct gb_i2c_functionality_response response;
38 u32 functionality;
39 int ret;
40
41 ret = gb_operation_sync(gb_i2c_dev->connection,
42 GB_I2C_TYPE_FUNCTIONALITY,
43 NULL, 0, &response, sizeof(response));
44 if (ret)
45 return ret;
46
47 functionality = le32_to_cpu(response.functionality);
48 gb_i2c_dev->functionality = gb_i2c_functionality_map(functionality);
49
50 return 0;
51}
52
53/*
54 * Map Linux i2c_msg flags into Greybus i2c transfer op flags.
55 */
56static u16 gb_i2c_transfer_op_flags_map(u16 flags)
57{
58 return flags; /* All flags the same for now */
59}
60
61static void
62gb_i2c_fill_transfer_op(struct gb_i2c_transfer_op *op, struct i2c_msg *msg)
63{
64 u16 flags = gb_i2c_transfer_op_flags_map(msg->flags);
65
66 op->addr = cpu_to_le16(msg->addr);
67 op->flags = cpu_to_le16(flags);
68 op->size = cpu_to_le16(msg->len);
69}
70
71static struct gb_operation *
72gb_i2c_operation_create(struct gb_connection *connection,
73 struct i2c_msg *msgs, u32 msg_count)
74{
75 struct gb_i2c_device *gb_i2c_dev = gb_connection_get_data(connection);
76 struct gb_i2c_transfer_request *request;
77 struct gb_operation *operation;
78 struct gb_i2c_transfer_op *op;
79 struct i2c_msg *msg;
80 u32 data_out_size = 0;
81 u32 data_in_size = 0;
82 size_t request_size;
83 void *data;
84 u16 op_count;
85 u32 i;
86
87 if (msg_count > (u32)U16_MAX) {
88 dev_err(&gb_i2c_dev->gbphy_dev->dev, "msg_count (%u) too big\n",
89 msg_count);
90 return NULL;
91 }
92 op_count = (u16)msg_count;
93
94 /*
95 * In addition to space for all message descriptors we need
96 * to have enough to hold all outbound message data.
97 */
98 msg = msgs;
99 for (i = 0; i < msg_count; i++, msg++)
100 if (msg->flags & I2C_M_RD)
101 data_in_size += (u32)msg->len;
102 else
103 data_out_size += (u32)msg->len;
104
105 request_size = sizeof(*request);
106 request_size += msg_count * sizeof(*op);
107 request_size += data_out_size;
108
109 /* Response consists only of incoming data */
110 operation = gb_operation_create(connection, GB_I2C_TYPE_TRANSFER,
111 request_size, data_in_size, GFP_KERNEL);
112 if (!operation)
113 return NULL;
114
115 request = operation->request->payload;
116 request->op_count = cpu_to_le16(op_count);
117 /* Fill in the ops array */
118 op = &request->ops[0];
119 msg = msgs;
120 for (i = 0; i < msg_count; i++)
121 gb_i2c_fill_transfer_op(op++, msg++);
122
123 if (!data_out_size)
124 return operation;
125
126 /* Copy over the outgoing data; it starts after the last op */
127 data = op;
128 msg = msgs;
129 for (i = 0; i < msg_count; i++) {
130 if (!(msg->flags & I2C_M_RD)) {
131 memcpy(data, msg->buf, msg->len);
132 data += msg->len;
133 }
134 msg++;
135 }
136
137 return operation;
138}
139
140static void gb_i2c_decode_response(struct i2c_msg *msgs, u32 msg_count,
141 struct gb_i2c_transfer_response *response)
142{
143 struct i2c_msg *msg = msgs;
144 u8 *data;
145 u32 i;
146
147 if (!response)
148 return;
149 data = response->data;
150 for (i = 0; i < msg_count; i++) {
151 if (msg->flags & I2C_M_RD) {
152 memcpy(msg->buf, data, msg->len);
153 data += msg->len;
154 }
155 msg++;
156 }
157}
158
159/*
160 * Some i2c transfer operations return results that are expected.
161 */
162static bool gb_i2c_expected_transfer_error(int errno)
163{
164 return errno == -EAGAIN || errno == -ENODEV;
165}
166
167static int gb_i2c_transfer_operation(struct gb_i2c_device *gb_i2c_dev,
168 struct i2c_msg *msgs, u32 msg_count)
169{
170 struct gb_connection *connection = gb_i2c_dev->connection;
171 struct device *dev = &gb_i2c_dev->gbphy_dev->dev;
172 struct gb_operation *operation;
173 int ret;
174
175 operation = gb_i2c_operation_create(connection, msgs, msg_count);
176 if (!operation)
177 return -ENOMEM;
178
179 ret = gbphy_runtime_get_sync(gb_i2c_dev->gbphy_dev);
180 if (ret)
181 goto exit_operation_put;
182
183 ret = gb_operation_request_send_sync(operation);
184 if (!ret) {
185 struct gb_i2c_transfer_response *response;
186
187 response = operation->response->payload;
188 gb_i2c_decode_response(msgs, msg_count, response);
189 ret = msg_count;
190 } else if (!gb_i2c_expected_transfer_error(ret)) {
191 dev_err(dev, "transfer operation failed (%d)\n", ret);
192 }
193
194 gbphy_runtime_put_autosuspend(gb_i2c_dev->gbphy_dev);
195
196exit_operation_put:
197 gb_operation_put(operation);
198
199 return ret;
200}
201
202static int gb_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
203 int msg_count)
204{
205 struct gb_i2c_device *gb_i2c_dev;
206
207 gb_i2c_dev = i2c_get_adapdata(adap);
208
209 return gb_i2c_transfer_operation(gb_i2c_dev, msgs, msg_count);
210}
211
212#if 0
213/* Later */
214static int gb_i2c_smbus_xfer(struct i2c_adapter *adap,
215 u16 addr, unsigned short flags, char read_write,
216 u8 command, int size, union i2c_smbus_data *data)
217{
218 struct gb_i2c_device *gb_i2c_dev;
219
220 gb_i2c_dev = i2c_get_adapdata(adap);
221
222 return 0;
223}
224#endif
225
226static u32 gb_i2c_functionality(struct i2c_adapter *adap)
227{
228 struct gb_i2c_device *gb_i2c_dev = i2c_get_adapdata(adap);
229
230 return gb_i2c_dev->functionality;
231}
232
233static const struct i2c_algorithm gb_i2c_algorithm = {
234 .master_xfer = gb_i2c_master_xfer,
235 /* .smbus_xfer = gb_i2c_smbus_xfer, */
236 .functionality = gb_i2c_functionality,
237};
238
239/*
240 * Do initial setup of the i2c device. This includes verifying we
241 * can support it (based on the protocol version it advertises).
242 * If that's OK, we get and cached its functionality bits.
243 *
244 * Note: gb_i2c_dev->connection is assumed to have been valid.
245 */
246static int gb_i2c_device_setup(struct gb_i2c_device *gb_i2c_dev)
247{
248 /* Assume the functionality never changes, just get it once */
249 return gb_i2c_functionality_operation(gb_i2c_dev);
250}
251
252static int gb_i2c_probe(struct gbphy_device *gbphy_dev,
253 const struct gbphy_device_id *id)
254{
255 struct gb_connection *connection;
256 struct gb_i2c_device *gb_i2c_dev;
257 struct i2c_adapter *adapter;
258 int ret;
259
260 gb_i2c_dev = kzalloc(sizeof(*gb_i2c_dev), GFP_KERNEL);
261 if (!gb_i2c_dev)
262 return -ENOMEM;
263
264 connection = gb_connection_create(gbphy_dev->bundle,
265 le16_to_cpu(gbphy_dev->cport_desc->id),
266 NULL);
267 if (IS_ERR(connection)) {
268 ret = PTR_ERR(connection);
269 goto exit_i2cdev_free;
270 }
271
272 gb_i2c_dev->connection = connection;
273 gb_connection_set_data(connection, gb_i2c_dev);
274 gb_i2c_dev->gbphy_dev = gbphy_dev;
275 gb_gbphy_set_data(gbphy_dev, gb_i2c_dev);
276
277 ret = gb_connection_enable(connection);
278 if (ret)
279 goto exit_connection_destroy;
280
281 ret = gb_i2c_device_setup(gb_i2c_dev);
282 if (ret)
283 goto exit_connection_disable;
284
285 /* Looks good; up our i2c adapter */
286 adapter = &gb_i2c_dev->adapter;
287 adapter->owner = THIS_MODULE;
288 adapter->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
289 adapter->algo = &gb_i2c_algorithm;
290 /* adapter->algo_data = what? */
291
292 adapter->dev.parent = &gbphy_dev->dev;
293 snprintf(adapter->name, sizeof(adapter->name), "Greybus i2c adapter");
294 i2c_set_adapdata(adapter, gb_i2c_dev);
295
296 ret = i2c_add_adapter(adapter);
297 if (ret)
298 goto exit_connection_disable;
299
300 gbphy_runtime_put_autosuspend(gbphy_dev);
301 return 0;
302
303exit_connection_disable:
304 gb_connection_disable(connection);
305exit_connection_destroy:
306 gb_connection_destroy(connection);
307exit_i2cdev_free:
308 kfree(gb_i2c_dev);
309
310 return ret;
311}
312
313static void gb_i2c_remove(struct gbphy_device *gbphy_dev)
314{
315 struct gb_i2c_device *gb_i2c_dev = gb_gbphy_get_data(gbphy_dev);
316 struct gb_connection *connection = gb_i2c_dev->connection;
317 int ret;
318
319 ret = gbphy_runtime_get_sync(gbphy_dev);
320 if (ret)
321 gbphy_runtime_get_noresume(gbphy_dev);
322
323 i2c_del_adapter(&gb_i2c_dev->adapter);
324 gb_connection_disable(connection);
325 gb_connection_destroy(connection);
326 kfree(gb_i2c_dev);
327}
328
329static const struct gbphy_device_id gb_i2c_id_table[] = {
330 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_I2C) },
331 { },
332};
333MODULE_DEVICE_TABLE(gbphy, gb_i2c_id_table);
334
335static struct gbphy_driver i2c_driver = {
336 .name = "i2c",
337 .probe = gb_i2c_probe,
338 .remove = gb_i2c_remove,
339 .id_table = gb_i2c_id_table,
340};
341
342module_gbphy_driver(i2c_driver);
343MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/interface.c b/drivers/staging/greybus/interface.c
new file mode 100644
index 000000000000..546b090e2d51
--- /dev/null
+++ b/drivers/staging/greybus/interface.c
@@ -0,0 +1,1316 @@
1/*
2 * Greybus interface code
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/delay.h>
11
12#include "greybus.h"
13#include "greybus_trace.h"
14
15#define GB_INTERFACE_MODE_SWITCH_TIMEOUT 2000
16
17#define GB_INTERFACE_DEVICE_ID_BAD 0xff
18
19#define GB_INTERFACE_AUTOSUSPEND_MS 3000
20
21/* Time required for interface to enter standby before disabling REFCLK */
22#define GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS 20
23
24/* Don't-care selector index */
25#define DME_SELECTOR_INDEX_NULL 0
26
27/* DME attributes */
28/* FIXME: remove ES2 support and DME_T_TST_SRC_INCREMENT */
29#define DME_T_TST_SRC_INCREMENT 0x4083
30
31#define DME_DDBL1_MANUFACTURERID 0x5003
32#define DME_DDBL1_PRODUCTID 0x5004
33
34#define DME_TOSHIBA_GMP_VID 0x6000
35#define DME_TOSHIBA_GMP_PID 0x6001
36#define DME_TOSHIBA_GMP_SN0 0x6002
37#define DME_TOSHIBA_GMP_SN1 0x6003
38#define DME_TOSHIBA_GMP_INIT_STATUS 0x6101
39
40/* DDBL1 Manufacturer and Product ids */
41#define TOSHIBA_DMID 0x0126
42#define TOSHIBA_ES2_BRIDGE_DPID 0x1000
43#define TOSHIBA_ES3_APBRIDGE_DPID 0x1001
44#define TOSHIBA_ES3_GBPHY_DPID 0x1002
45
46static int gb_interface_hibernate_link(struct gb_interface *intf);
47static int gb_interface_refclk_set(struct gb_interface *intf, bool enable);
48
49static int gb_interface_dme_attr_get(struct gb_interface *intf,
50 u16 attr, u32 *val)
51{
52 return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id,
53 attr, DME_SELECTOR_INDEX_NULL, val);
54}
55
56static int gb_interface_read_ara_dme(struct gb_interface *intf)
57{
58 u32 sn0, sn1;
59 int ret;
60
61 /*
62 * Unless this is a Toshiba bridge, bail out until we have defined
63 * standard GMP attributes.
64 */
65 if (intf->ddbl1_manufacturer_id != TOSHIBA_DMID) {
66 dev_err(&intf->dev, "unknown manufacturer %08x\n",
67 intf->ddbl1_manufacturer_id);
68 return -ENODEV;
69 }
70
71 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_VID,
72 &intf->vendor_id);
73 if (ret)
74 return ret;
75
76 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_PID,
77 &intf->product_id);
78 if (ret)
79 return ret;
80
81 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN0, &sn0);
82 if (ret)
83 return ret;
84
85 ret = gb_interface_dme_attr_get(intf, DME_TOSHIBA_GMP_SN1, &sn1);
86 if (ret)
87 return ret;
88
89 intf->serial_number = (u64)sn1 << 32 | sn0;
90
91 return 0;
92}
93
94static int gb_interface_read_dme(struct gb_interface *intf)
95{
96 int ret;
97
98 /* DME attributes have already been read */
99 if (intf->dme_read)
100 return 0;
101
102 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_MANUFACTURERID,
103 &intf->ddbl1_manufacturer_id);
104 if (ret)
105 return ret;
106
107 ret = gb_interface_dme_attr_get(intf, DME_DDBL1_PRODUCTID,
108 &intf->ddbl1_product_id);
109 if (ret)
110 return ret;
111
112 if (intf->ddbl1_manufacturer_id == TOSHIBA_DMID &&
113 intf->ddbl1_product_id == TOSHIBA_ES2_BRIDGE_DPID) {
114 intf->quirks |= GB_INTERFACE_QUIRK_NO_GMP_IDS;
115 intf->quirks |= GB_INTERFACE_QUIRK_NO_INIT_STATUS;
116 }
117
118 ret = gb_interface_read_ara_dme(intf);
119 if (ret)
120 return ret;
121
122 intf->dme_read = true;
123
124 return 0;
125}
126
127static int gb_interface_route_create(struct gb_interface *intf)
128{
129 struct gb_svc *svc = intf->hd->svc;
130 u8 intf_id = intf->interface_id;
131 u8 device_id;
132 int ret;
133
134 /* Allocate an interface device id. */
135 ret = ida_simple_get(&svc->device_id_map,
136 GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
137 GFP_KERNEL);
138 if (ret < 0) {
139 dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
140 return ret;
141 }
142 device_id = ret;
143
144 ret = gb_svc_intf_device_id(svc, intf_id, device_id);
145 if (ret) {
146 dev_err(&intf->dev, "failed to set device id %u: %d\n",
147 device_id, ret);
148 goto err_ida_remove;
149 }
150
151 /* FIXME: Hard-coded AP device id. */
152 ret = gb_svc_route_create(svc, svc->ap_intf_id, GB_SVC_DEVICE_ID_AP,
153 intf_id, device_id);
154 if (ret) {
155 dev_err(&intf->dev, "failed to create route: %d\n", ret);
156 goto err_svc_id_free;
157 }
158
159 intf->device_id = device_id;
160
161 return 0;
162
163err_svc_id_free:
164 /*
165 * XXX Should we tell SVC that this id doesn't belong to interface
166 * XXX anymore.
167 */
168err_ida_remove:
169 ida_simple_remove(&svc->device_id_map, device_id);
170
171 return ret;
172}
173
174static void gb_interface_route_destroy(struct gb_interface *intf)
175{
176 struct gb_svc *svc = intf->hd->svc;
177
178 if (intf->device_id == GB_INTERFACE_DEVICE_ID_BAD)
179 return;
180
181 gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
182 ida_simple_remove(&svc->device_id_map, intf->device_id);
183 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
184}
185
186/* Locking: Caller holds the interface mutex. */
187static int gb_interface_legacy_mode_switch(struct gb_interface *intf)
188{
189 int ret;
190
191 dev_info(&intf->dev, "legacy mode switch detected\n");
192
193 /* Mark as disconnected to prevent I/O during disable. */
194 intf->disconnected = true;
195 gb_interface_disable(intf);
196 intf->disconnected = false;
197
198 ret = gb_interface_enable(intf);
199 if (ret) {
200 dev_err(&intf->dev, "failed to re-enable interface: %d\n", ret);
201 gb_interface_deactivate(intf);
202 }
203
204 return ret;
205}
206
207void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
208 u32 mailbox)
209{
210 mutex_lock(&intf->mutex);
211
212 if (result) {
213 dev_warn(&intf->dev,
214 "mailbox event with UniPro error: 0x%04x\n",
215 result);
216 goto err_disable;
217 }
218
219 if (mailbox != GB_SVC_INTF_MAILBOX_GREYBUS) {
220 dev_warn(&intf->dev,
221 "mailbox event with unexpected value: 0x%08x\n",
222 mailbox);
223 goto err_disable;
224 }
225
226 if (intf->quirks & GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH) {
227 gb_interface_legacy_mode_switch(intf);
228 goto out_unlock;
229 }
230
231 if (!intf->mode_switch) {
232 dev_warn(&intf->dev, "unexpected mailbox event: 0x%08x\n",
233 mailbox);
234 goto err_disable;
235 }
236
237 dev_info(&intf->dev, "mode switch detected\n");
238
239 complete(&intf->mode_switch_completion);
240
241out_unlock:
242 mutex_unlock(&intf->mutex);
243
244 return;
245
246err_disable:
247 gb_interface_disable(intf);
248 gb_interface_deactivate(intf);
249 mutex_unlock(&intf->mutex);
250}
251
252static void gb_interface_mode_switch_work(struct work_struct *work)
253{
254 struct gb_interface *intf;
255 struct gb_control *control;
256 unsigned long timeout;
257 int ret;
258
259 intf = container_of(work, struct gb_interface, mode_switch_work);
260
261 mutex_lock(&intf->mutex);
262 /* Make sure interface is still enabled. */
263 if (!intf->enabled) {
264 dev_dbg(&intf->dev, "mode switch aborted\n");
265 intf->mode_switch = false;
266 mutex_unlock(&intf->mutex);
267 goto out_interface_put;
268 }
269
270 /*
271 * Prepare the control device for mode switch and make sure to get an
272 * extra reference before it goes away during interface disable.
273 */
274 control = gb_control_get(intf->control);
275 gb_control_mode_switch_prepare(control);
276 gb_interface_disable(intf);
277 mutex_unlock(&intf->mutex);
278
279 timeout = msecs_to_jiffies(GB_INTERFACE_MODE_SWITCH_TIMEOUT);
280 ret = wait_for_completion_interruptible_timeout(
281 &intf->mode_switch_completion, timeout);
282
283 /* Finalise control-connection mode switch. */
284 gb_control_mode_switch_complete(control);
285 gb_control_put(control);
286
287 if (ret < 0) {
288 dev_err(&intf->dev, "mode switch interrupted\n");
289 goto err_deactivate;
290 } else if (ret == 0) {
291 dev_err(&intf->dev, "mode switch timed out\n");
292 goto err_deactivate;
293 }
294
295 /* Re-enable (re-enumerate) interface if still active. */
296 mutex_lock(&intf->mutex);
297 intf->mode_switch = false;
298 if (intf->active) {
299 ret = gb_interface_enable(intf);
300 if (ret) {
301 dev_err(&intf->dev, "failed to re-enable interface: %d\n",
302 ret);
303 gb_interface_deactivate(intf);
304 }
305 }
306 mutex_unlock(&intf->mutex);
307
308out_interface_put:
309 gb_interface_put(intf);
310
311 return;
312
313err_deactivate:
314 mutex_lock(&intf->mutex);
315 intf->mode_switch = false;
316 gb_interface_deactivate(intf);
317 mutex_unlock(&intf->mutex);
318
319 gb_interface_put(intf);
320}
321
322int gb_interface_request_mode_switch(struct gb_interface *intf)
323{
324 int ret = 0;
325
326 mutex_lock(&intf->mutex);
327 if (intf->mode_switch) {
328 ret = -EBUSY;
329 goto out_unlock;
330 }
331
332 intf->mode_switch = true;
333 reinit_completion(&intf->mode_switch_completion);
334
335 /*
336 * Get a reference to the interface device, which will be put once the
337 * mode switch is complete.
338 */
339 get_device(&intf->dev);
340
341 if (!queue_work(system_long_wq, &intf->mode_switch_work)) {
342 put_device(&intf->dev);
343 ret = -EBUSY;
344 goto out_unlock;
345 }
346
347out_unlock:
348 mutex_unlock(&intf->mutex);
349
350 return ret;
351}
352EXPORT_SYMBOL_GPL(gb_interface_request_mode_switch);
353
354/*
355 * T_TstSrcIncrement is written by the module on ES2 as a stand-in for the
356 * init-status attribute DME_TOSHIBA_INIT_STATUS. The AP needs to read and
357 * clear it after reading a non-zero value from it.
358 *
359 * FIXME: This is module-hardware dependent and needs to be extended for every
360 * type of module we want to support.
361 */
362static int gb_interface_read_and_clear_init_status(struct gb_interface *intf)
363{
364 struct gb_host_device *hd = intf->hd;
365 unsigned long bootrom_quirks;
366 unsigned long s2l_quirks;
367 int ret;
368 u32 value;
369 u16 attr;
370 u8 init_status;
371
372 /*
373 * ES2 bridges use T_TstSrcIncrement for the init status.
374 *
375 * FIXME: Remove ES2 support
376 */
377 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
378 attr = DME_T_TST_SRC_INCREMENT;
379 else
380 attr = DME_TOSHIBA_GMP_INIT_STATUS;
381
382 ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr,
383 DME_SELECTOR_INDEX_NULL, &value);
384 if (ret)
385 return ret;
386
387 /*
388 * A nonzero init status indicates the module has finished
389 * initializing.
390 */
391 if (!value) {
392 dev_err(&intf->dev, "invalid init status\n");
393 return -ENODEV;
394 }
395
396 /*
397 * Extract the init status.
398 *
399 * For ES2: We need to check lowest 8 bits of 'value'.
400 * For ES3: We need to check highest 8 bits out of 32 of 'value'.
401 *
402 * FIXME: Remove ES2 support
403 */
404 if (intf->quirks & GB_INTERFACE_QUIRK_NO_INIT_STATUS)
405 init_status = value & 0xff;
406 else
407 init_status = value >> 24;
408
409 /*
410 * Check if the interface is executing the quirky ES3 bootrom that,
411 * for example, requires E2EFC, CSD and CSV to be disabled.
412 */
413 bootrom_quirks = GB_INTERFACE_QUIRK_NO_CPORT_FEATURES |
414 GB_INTERFACE_QUIRK_FORCED_DISABLE |
415 GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH |
416 GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE;
417
418 s2l_quirks = GB_INTERFACE_QUIRK_NO_PM;
419
420 switch (init_status) {
421 case GB_INIT_BOOTROM_UNIPRO_BOOT_STARTED:
422 case GB_INIT_BOOTROM_FALLBACK_UNIPRO_BOOT_STARTED:
423 intf->quirks |= bootrom_quirks;
424 break;
425 case GB_INIT_S2_LOADER_BOOT_STARTED:
426 /* S2 Loader doesn't support runtime PM */
427 intf->quirks &= ~bootrom_quirks;
428 intf->quirks |= s2l_quirks;
429 break;
430 default:
431 intf->quirks &= ~bootrom_quirks;
432 intf->quirks &= ~s2l_quirks;
433 }
434
435 /* Clear the init status. */
436 return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr,
437 DME_SELECTOR_INDEX_NULL, 0);
438}
439
440/* interface sysfs attributes */
441#define gb_interface_attr(field, type) \
442static ssize_t field##_show(struct device *dev, \
443 struct device_attribute *attr, \
444 char *buf) \
445{ \
446 struct gb_interface *intf = to_gb_interface(dev); \
447 return scnprintf(buf, PAGE_SIZE, type"\n", intf->field); \
448} \
449static DEVICE_ATTR_RO(field)
450
451gb_interface_attr(ddbl1_manufacturer_id, "0x%08x");
452gb_interface_attr(ddbl1_product_id, "0x%08x");
453gb_interface_attr(interface_id, "%u");
454gb_interface_attr(vendor_id, "0x%08x");
455gb_interface_attr(product_id, "0x%08x");
456gb_interface_attr(serial_number, "0x%016llx");
457
458static ssize_t voltage_now_show(struct device *dev,
459 struct device_attribute *attr, char *buf)
460{
461 struct gb_interface *intf = to_gb_interface(dev);
462 int ret;
463 u32 measurement;
464
465 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
466 GB_SVC_PWRMON_TYPE_VOL,
467 &measurement);
468 if (ret) {
469 dev_err(&intf->dev, "failed to get voltage sample (%d)\n", ret);
470 return ret;
471 }
472
473 return sprintf(buf, "%u\n", measurement);
474}
475static DEVICE_ATTR_RO(voltage_now);
476
477static ssize_t current_now_show(struct device *dev,
478 struct device_attribute *attr, char *buf)
479{
480 struct gb_interface *intf = to_gb_interface(dev);
481 int ret;
482 u32 measurement;
483
484 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
485 GB_SVC_PWRMON_TYPE_CURR,
486 &measurement);
487 if (ret) {
488 dev_err(&intf->dev, "failed to get current sample (%d)\n", ret);
489 return ret;
490 }
491
492 return sprintf(buf, "%u\n", measurement);
493}
494static DEVICE_ATTR_RO(current_now);
495
496static ssize_t power_now_show(struct device *dev,
497 struct device_attribute *attr, char *buf)
498{
499 struct gb_interface *intf = to_gb_interface(dev);
500 int ret;
501 u32 measurement;
502
503 ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id,
504 GB_SVC_PWRMON_TYPE_PWR,
505 &measurement);
506 if (ret) {
507 dev_err(&intf->dev, "failed to get power sample (%d)\n", ret);
508 return ret;
509 }
510
511 return sprintf(buf, "%u\n", measurement);
512}
513static DEVICE_ATTR_RO(power_now);
514
515static ssize_t power_state_show(struct device *dev,
516 struct device_attribute *attr, char *buf)
517{
518 struct gb_interface *intf = to_gb_interface(dev);
519
520 if (intf->active)
521 return scnprintf(buf, PAGE_SIZE, "on\n");
522 else
523 return scnprintf(buf, PAGE_SIZE, "off\n");
524}
525
526static ssize_t power_state_store(struct device *dev,
527 struct device_attribute *attr, const char *buf,
528 size_t len)
529{
530 struct gb_interface *intf = to_gb_interface(dev);
531 bool activate;
532 int ret = 0;
533
534 if (kstrtobool(buf, &activate))
535 return -EINVAL;
536
537 mutex_lock(&intf->mutex);
538
539 if (activate == intf->active)
540 goto unlock;
541
542 if (activate) {
543 ret = gb_interface_activate(intf);
544 if (ret) {
545 dev_err(&intf->dev,
546 "failed to activate interface: %d\n", ret);
547 goto unlock;
548 }
549
550 ret = gb_interface_enable(intf);
551 if (ret) {
552 dev_err(&intf->dev,
553 "failed to enable interface: %d\n", ret);
554 gb_interface_deactivate(intf);
555 goto unlock;
556 }
557 } else {
558 gb_interface_disable(intf);
559 gb_interface_deactivate(intf);
560 }
561
562unlock:
563 mutex_unlock(&intf->mutex);
564
565 if (ret)
566 return ret;
567
568 return len;
569}
570static DEVICE_ATTR_RW(power_state);
571
572static const char *gb_interface_type_string(struct gb_interface *intf)
573{
574 static const char * const types[] = {
575 [GB_INTERFACE_TYPE_INVALID] = "invalid",
576 [GB_INTERFACE_TYPE_UNKNOWN] = "unknown",
577 [GB_INTERFACE_TYPE_DUMMY] = "dummy",
578 [GB_INTERFACE_TYPE_UNIPRO] = "unipro",
579 [GB_INTERFACE_TYPE_GREYBUS] = "greybus",
580 };
581
582 return types[intf->type];
583}
584
585static ssize_t interface_type_show(struct device *dev,
586 struct device_attribute *attr, char *buf)
587{
588 struct gb_interface *intf = to_gb_interface(dev);
589
590 return sprintf(buf, "%s\n", gb_interface_type_string(intf));
591}
592static DEVICE_ATTR_RO(interface_type);
593
594static struct attribute *interface_unipro_attrs[] = {
595 &dev_attr_ddbl1_manufacturer_id.attr,
596 &dev_attr_ddbl1_product_id.attr,
597 NULL
598};
599
600static struct attribute *interface_greybus_attrs[] = {
601 &dev_attr_vendor_id.attr,
602 &dev_attr_product_id.attr,
603 &dev_attr_serial_number.attr,
604 NULL
605};
606
607static struct attribute *interface_power_attrs[] = {
608 &dev_attr_voltage_now.attr,
609 &dev_attr_current_now.attr,
610 &dev_attr_power_now.attr,
611 &dev_attr_power_state.attr,
612 NULL
613};
614
615static struct attribute *interface_common_attrs[] = {
616 &dev_attr_interface_id.attr,
617 &dev_attr_interface_type.attr,
618 NULL
619};
620
621static umode_t interface_unipro_is_visible(struct kobject *kobj,
622 struct attribute *attr, int n)
623{
624 struct device *dev = container_of(kobj, struct device, kobj);
625 struct gb_interface *intf = to_gb_interface(dev);
626
627 switch (intf->type) {
628 case GB_INTERFACE_TYPE_UNIPRO:
629 case GB_INTERFACE_TYPE_GREYBUS:
630 return attr->mode;
631 default:
632 return 0;
633 }
634}
635
636static umode_t interface_greybus_is_visible(struct kobject *kobj,
637 struct attribute *attr, int n)
638{
639 struct device *dev = container_of(kobj, struct device, kobj);
640 struct gb_interface *intf = to_gb_interface(dev);
641
642 switch (intf->type) {
643 case GB_INTERFACE_TYPE_GREYBUS:
644 return attr->mode;
645 default:
646 return 0;
647 }
648}
649
650static umode_t interface_power_is_visible(struct kobject *kobj,
651 struct attribute *attr, int n)
652{
653 struct device *dev = container_of(kobj, struct device, kobj);
654 struct gb_interface *intf = to_gb_interface(dev);
655
656 switch (intf->type) {
657 case GB_INTERFACE_TYPE_UNIPRO:
658 case GB_INTERFACE_TYPE_GREYBUS:
659 return attr->mode;
660 default:
661 return 0;
662 }
663}
664
665static const struct attribute_group interface_unipro_group = {
666 .is_visible = interface_unipro_is_visible,
667 .attrs = interface_unipro_attrs,
668};
669
670static const struct attribute_group interface_greybus_group = {
671 .is_visible = interface_greybus_is_visible,
672 .attrs = interface_greybus_attrs,
673};
674
675static const struct attribute_group interface_power_group = {
676 .is_visible = interface_power_is_visible,
677 .attrs = interface_power_attrs,
678};
679
680static const struct attribute_group interface_common_group = {
681 .attrs = interface_common_attrs,
682};
683
684static const struct attribute_group *interface_groups[] = {
685 &interface_unipro_group,
686 &interface_greybus_group,
687 &interface_power_group,
688 &interface_common_group,
689 NULL
690};
691
692static void gb_interface_release(struct device *dev)
693{
694 struct gb_interface *intf = to_gb_interface(dev);
695
696 trace_gb_interface_release(intf);
697
698 kfree(intf);
699}
700
701#ifdef CONFIG_PM
702static int gb_interface_suspend(struct device *dev)
703{
704 struct gb_interface *intf = to_gb_interface(dev);
705 int ret, timesync_ret;
706
707 ret = gb_control_interface_suspend_prepare(intf->control);
708 if (ret)
709 return ret;
710
711 gb_timesync_interface_remove(intf);
712
713 ret = gb_control_suspend(intf->control);
714 if (ret)
715 goto err_hibernate_abort;
716
717 ret = gb_interface_hibernate_link(intf);
718 if (ret)
719 return ret;
720
721 /* Delay to allow interface to enter standby before disabling refclk */
722 msleep(GB_INTERFACE_SUSPEND_HIBERNATE_DELAY_MS);
723
724 ret = gb_interface_refclk_set(intf, false);
725 if (ret)
726 return ret;
727
728 return 0;
729
730err_hibernate_abort:
731 gb_control_interface_hibernate_abort(intf->control);
732
733 timesync_ret = gb_timesync_interface_add(intf);
734 if (timesync_ret) {
735 dev_err(dev, "failed to add to timesync: %d\n", timesync_ret);
736 return timesync_ret;
737 }
738
739 return ret;
740}
741
742static int gb_interface_resume(struct device *dev)
743{
744 struct gb_interface *intf = to_gb_interface(dev);
745 struct gb_svc *svc = intf->hd->svc;
746 int ret;
747
748 ret = gb_interface_refclk_set(intf, true);
749 if (ret)
750 return ret;
751
752 ret = gb_svc_intf_resume(svc, intf->interface_id);
753 if (ret)
754 return ret;
755
756 ret = gb_control_resume(intf->control);
757 if (ret)
758 return ret;
759
760 ret = gb_timesync_interface_add(intf);
761 if (ret) {
762 dev_err(dev, "failed to add to timesync: %d\n", ret);
763 return ret;
764 }
765
766 ret = gb_timesync_schedule_synchronous(intf);
767 if (ret) {
768 dev_err(dev, "failed to synchronize FrameTime: %d\n", ret);
769 return ret;
770 }
771
772 return 0;
773}
774
775static int gb_interface_runtime_idle(struct device *dev)
776{
777 pm_runtime_mark_last_busy(dev);
778 pm_request_autosuspend(dev);
779
780 return 0;
781}
782#endif
783
784static const struct dev_pm_ops gb_interface_pm_ops = {
785 SET_RUNTIME_PM_OPS(gb_interface_suspend, gb_interface_resume,
786 gb_interface_runtime_idle)
787};
788
789struct device_type greybus_interface_type = {
790 .name = "greybus_interface",
791 .release = gb_interface_release,
792 .pm = &gb_interface_pm_ops,
793};
794
795/*
796 * A Greybus module represents a user-replaceable component on a GMP
797 * phone. An interface is the physical connection on that module. A
798 * module may have more than one interface.
799 *
800 * Create a gb_interface structure to represent a discovered interface.
801 * The position of interface within the Endo is encoded in "interface_id"
802 * argument.
803 *
804 * Returns a pointer to the new interfce or a null pointer if a
805 * failure occurs due to memory exhaustion.
806 */
807struct gb_interface *gb_interface_create(struct gb_module *module,
808 u8 interface_id)
809{
810 struct gb_host_device *hd = module->hd;
811 struct gb_interface *intf;
812
813 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
814 if (!intf)
815 return NULL;
816
817 intf->hd = hd; /* XXX refcount? */
818 intf->module = module;
819 intf->interface_id = interface_id;
820 INIT_LIST_HEAD(&intf->bundles);
821 INIT_LIST_HEAD(&intf->manifest_descs);
822 mutex_init(&intf->mutex);
823 INIT_WORK(&intf->mode_switch_work, gb_interface_mode_switch_work);
824 init_completion(&intf->mode_switch_completion);
825
826 /* Invalid device id to start with */
827 intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
828
829 intf->dev.parent = &module->dev;
830 intf->dev.bus = &greybus_bus_type;
831 intf->dev.type = &greybus_interface_type;
832 intf->dev.groups = interface_groups;
833 intf->dev.dma_mask = module->dev.dma_mask;
834 device_initialize(&intf->dev);
835 dev_set_name(&intf->dev, "%s.%u", dev_name(&module->dev),
836 interface_id);
837
838 pm_runtime_set_autosuspend_delay(&intf->dev,
839 GB_INTERFACE_AUTOSUSPEND_MS);
840
841 trace_gb_interface_create(intf);
842
843 return intf;
844}
845
846static int gb_interface_vsys_set(struct gb_interface *intf, bool enable)
847{
848 struct gb_svc *svc = intf->hd->svc;
849 int ret;
850
851 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
852
853 ret = gb_svc_intf_vsys_set(svc, intf->interface_id, enable);
854 if (ret) {
855 dev_err(&intf->dev, "failed to set v_sys: %d\n", ret);
856 return ret;
857 }
858
859 return 0;
860}
861
862static int gb_interface_refclk_set(struct gb_interface *intf, bool enable)
863{
864 struct gb_svc *svc = intf->hd->svc;
865 int ret;
866
867 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
868
869 ret = gb_svc_intf_refclk_set(svc, intf->interface_id, enable);
870 if (ret) {
871 dev_err(&intf->dev, "failed to set refclk: %d\n", ret);
872 return ret;
873 }
874
875 return 0;
876}
877
878static int gb_interface_unipro_set(struct gb_interface *intf, bool enable)
879{
880 struct gb_svc *svc = intf->hd->svc;
881 int ret;
882
883 dev_dbg(&intf->dev, "%s - %d\n", __func__, enable);
884
885 ret = gb_svc_intf_unipro_set(svc, intf->interface_id, enable);
886 if (ret) {
887 dev_err(&intf->dev, "failed to set UniPro: %d\n", ret);
888 return ret;
889 }
890
891 return 0;
892}
893
894static int gb_interface_activate_operation(struct gb_interface *intf,
895 enum gb_interface_type *intf_type)
896{
897 struct gb_svc *svc = intf->hd->svc;
898 u8 type;
899 int ret;
900
901 dev_dbg(&intf->dev, "%s\n", __func__);
902
903 ret = gb_svc_intf_activate(svc, intf->interface_id, &type);
904 if (ret) {
905 dev_err(&intf->dev, "failed to activate: %d\n", ret);
906 return ret;
907 }
908
909 switch (type) {
910 case GB_SVC_INTF_TYPE_DUMMY:
911 *intf_type = GB_INTERFACE_TYPE_DUMMY;
912 /* FIXME: handle as an error for now */
913 return -ENODEV;
914 case GB_SVC_INTF_TYPE_UNIPRO:
915 *intf_type = GB_INTERFACE_TYPE_UNIPRO;
916 dev_err(&intf->dev, "interface type UniPro not supported\n");
917 /* FIXME: handle as an error for now */
918 return -ENODEV;
919 case GB_SVC_INTF_TYPE_GREYBUS:
920 *intf_type = GB_INTERFACE_TYPE_GREYBUS;
921 break;
922 default:
923 dev_err(&intf->dev, "unknown interface type: %u\n", type);
924 *intf_type = GB_INTERFACE_TYPE_UNKNOWN;
925 return -ENODEV;
926 }
927
928 return 0;
929}
930
931static int gb_interface_hibernate_link(struct gb_interface *intf)
932{
933 struct gb_svc *svc = intf->hd->svc;
934
935 return gb_svc_intf_set_power_mode_hibernate(svc, intf->interface_id);
936}
937
938static int _gb_interface_activate(struct gb_interface *intf,
939 enum gb_interface_type *type)
940{
941 int ret;
942
943 *type = GB_INTERFACE_TYPE_UNKNOWN;
944
945 if (intf->ejected || intf->removed)
946 return -ENODEV;
947
948 ret = gb_interface_vsys_set(intf, true);
949 if (ret)
950 return ret;
951
952 ret = gb_interface_refclk_set(intf, true);
953 if (ret)
954 goto err_vsys_disable;
955
956 ret = gb_interface_unipro_set(intf, true);
957 if (ret)
958 goto err_refclk_disable;
959
960 ret = gb_interface_activate_operation(intf, type);
961 if (ret) {
962 switch (*type) {
963 case GB_INTERFACE_TYPE_UNIPRO:
964 case GB_INTERFACE_TYPE_GREYBUS:
965 goto err_hibernate_link;
966 default:
967 goto err_unipro_disable;
968 }
969 }
970
971 ret = gb_interface_read_dme(intf);
972 if (ret)
973 goto err_hibernate_link;
974
975 ret = gb_interface_route_create(intf);
976 if (ret)
977 goto err_hibernate_link;
978
979 intf->active = true;
980
981 trace_gb_interface_activate(intf);
982
983 return 0;
984
985err_hibernate_link:
986 gb_interface_hibernate_link(intf);
987err_unipro_disable:
988 gb_interface_unipro_set(intf, false);
989err_refclk_disable:
990 gb_interface_refclk_set(intf, false);
991err_vsys_disable:
992 gb_interface_vsys_set(intf, false);
993
994 return ret;
995}
996
997/*
998 * At present, we assume a UniPro-only module to be a Greybus module that
999 * failed to send its mailbox poke. There is some reason to believe that this
1000 * is because of a bug in the ES3 bootrom.
1001 *
1002 * FIXME: Check if this is a Toshiba bridge before retrying?
1003 */
1004static int _gb_interface_activate_es3_hack(struct gb_interface *intf,
1005 enum gb_interface_type *type)
1006{
1007 int retries = 3;
1008 int ret;
1009
1010 while (retries--) {
1011 ret = _gb_interface_activate(intf, type);
1012 if (ret == -ENODEV && *type == GB_INTERFACE_TYPE_UNIPRO)
1013 continue;
1014
1015 break;
1016 }
1017
1018 return ret;
1019}
1020
1021/*
1022 * Activate an interface.
1023 *
1024 * Locking: Caller holds the interface mutex.
1025 */
1026int gb_interface_activate(struct gb_interface *intf)
1027{
1028 enum gb_interface_type type;
1029 int ret;
1030
1031 switch (intf->type) {
1032 case GB_INTERFACE_TYPE_INVALID:
1033 case GB_INTERFACE_TYPE_GREYBUS:
1034 ret = _gb_interface_activate_es3_hack(intf, &type);
1035 break;
1036 default:
1037 ret = _gb_interface_activate(intf, &type);
1038 }
1039
1040 /* Make sure type is detected correctly during reactivation. */
1041 if (intf->type != GB_INTERFACE_TYPE_INVALID) {
1042 if (type != intf->type) {
1043 dev_err(&intf->dev, "failed to detect interface type\n");
1044
1045 if (!ret)
1046 gb_interface_deactivate(intf);
1047
1048 return -EIO;
1049 }
1050 } else {
1051 intf->type = type;
1052 }
1053
1054 return ret;
1055}
1056
1057/*
1058 * Deactivate an interface.
1059 *
1060 * Locking: Caller holds the interface mutex.
1061 */
1062void gb_interface_deactivate(struct gb_interface *intf)
1063{
1064 if (!intf->active)
1065 return;
1066
1067 trace_gb_interface_deactivate(intf);
1068
1069 /* Abort any ongoing mode switch. */
1070 if (intf->mode_switch)
1071 complete(&intf->mode_switch_completion);
1072
1073 gb_interface_route_destroy(intf);
1074 gb_interface_hibernate_link(intf);
1075 gb_interface_unipro_set(intf, false);
1076 gb_interface_refclk_set(intf, false);
1077 gb_interface_vsys_set(intf, false);
1078
1079 intf->active = false;
1080}
1081
1082/*
1083 * Enable an interface by enabling its control connection, fetching the
1084 * manifest and other information over it, and finally registering its child
1085 * devices.
1086 *
1087 * Locking: Caller holds the interface mutex.
1088 */
1089int gb_interface_enable(struct gb_interface *intf)
1090{
1091 struct gb_control *control;
1092 struct gb_bundle *bundle, *tmp;
1093 int ret, size;
1094 void *manifest;
1095
1096 ret = gb_interface_read_and_clear_init_status(intf);
1097 if (ret) {
1098 dev_err(&intf->dev, "failed to clear init status: %d\n", ret);
1099 return ret;
1100 }
1101
1102 /* Establish control connection */
1103 control = gb_control_create(intf);
1104 if (IS_ERR(control)) {
1105 dev_err(&intf->dev, "failed to create control device: %ld\n",
1106 PTR_ERR(control));
1107 return PTR_ERR(control);
1108 }
1109 intf->control = control;
1110
1111 ret = gb_control_enable(intf->control);
1112 if (ret)
1113 goto err_put_control;
1114
1115 /* Get manifest size using control protocol on CPort */
1116 size = gb_control_get_manifest_size_operation(intf);
1117 if (size <= 0) {
1118 dev_err(&intf->dev, "failed to get manifest size: %d\n", size);
1119
1120 if (size)
1121 ret = size;
1122 else
1123 ret = -EINVAL;
1124
1125 goto err_disable_control;
1126 }
1127
1128 manifest = kmalloc(size, GFP_KERNEL);
1129 if (!manifest) {
1130 ret = -ENOMEM;
1131 goto err_disable_control;
1132 }
1133
1134 /* Get manifest using control protocol on CPort */
1135 ret = gb_control_get_manifest_operation(intf, manifest, size);
1136 if (ret) {
1137 dev_err(&intf->dev, "failed to get manifest: %d\n", ret);
1138 goto err_free_manifest;
1139 }
1140
1141 /*
1142 * Parse the manifest and build up our data structures representing
1143 * what's in it.
1144 */
1145 if (!gb_manifest_parse(intf, manifest, size)) {
1146 dev_err(&intf->dev, "failed to parse manifest\n");
1147 ret = -EINVAL;
1148 goto err_destroy_bundles;
1149 }
1150
1151 ret = gb_control_get_bundle_versions(intf->control);
1152 if (ret)
1153 goto err_destroy_bundles;
1154
1155 ret = gb_timesync_interface_add(intf);
1156 if (ret) {
1157 dev_err(&intf->dev, "failed to add to timesync: %d\n", ret);
1158 goto err_destroy_bundles;
1159 }
1160
1161 /* Register the control device and any bundles */
1162 ret = gb_control_add(intf->control);
1163 if (ret)
1164 goto err_remove_timesync;
1165
1166 pm_runtime_use_autosuspend(&intf->dev);
1167 pm_runtime_get_noresume(&intf->dev);
1168 pm_runtime_set_active(&intf->dev);
1169 pm_runtime_enable(&intf->dev);
1170
1171 list_for_each_entry_safe_reverse(bundle, tmp, &intf->bundles, links) {
1172 ret = gb_bundle_add(bundle);
1173 if (ret) {
1174 gb_bundle_destroy(bundle);
1175 continue;
1176 }
1177 }
1178
1179 kfree(manifest);
1180
1181 intf->enabled = true;
1182
1183 pm_runtime_put(&intf->dev);
1184
1185 trace_gb_interface_enable(intf);
1186
1187 return 0;
1188
1189err_remove_timesync:
1190 gb_timesync_interface_remove(intf);
1191err_destroy_bundles:
1192 list_for_each_entry_safe(bundle, tmp, &intf->bundles, links)
1193 gb_bundle_destroy(bundle);
1194err_free_manifest:
1195 kfree(manifest);
1196err_disable_control:
1197 gb_control_disable(intf->control);
1198err_put_control:
1199 gb_control_put(intf->control);
1200 intf->control = NULL;
1201
1202 return ret;
1203}
1204
1205/*
1206 * Disable an interface and destroy its bundles.
1207 *
1208 * Locking: Caller holds the interface mutex.
1209 */
1210void gb_interface_disable(struct gb_interface *intf)
1211{
1212 struct gb_bundle *bundle;
1213 struct gb_bundle *next;
1214
1215 if (!intf->enabled)
1216 return;
1217
1218 trace_gb_interface_disable(intf);
1219
1220 pm_runtime_get_sync(&intf->dev);
1221
1222 /* Set disconnected flag to avoid I/O during connection tear down. */
1223 if (intf->quirks & GB_INTERFACE_QUIRK_FORCED_DISABLE)
1224 intf->disconnected = true;
1225
1226 list_for_each_entry_safe(bundle, next, &intf->bundles, links)
1227 gb_bundle_destroy(bundle);
1228
1229 if (!intf->mode_switch && !intf->disconnected)
1230 gb_control_interface_deactivate_prepare(intf->control);
1231
1232 gb_control_del(intf->control);
1233 gb_timesync_interface_remove(intf);
1234 gb_control_disable(intf->control);
1235 gb_control_put(intf->control);
1236 intf->control = NULL;
1237
1238 intf->enabled = false;
1239
1240 pm_runtime_disable(&intf->dev);
1241 pm_runtime_set_suspended(&intf->dev);
1242 pm_runtime_dont_use_autosuspend(&intf->dev);
1243 pm_runtime_put_noidle(&intf->dev);
1244}
1245
1246/* Enable TimeSync on an Interface control connection. */
1247int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
1248 u64 frame_time, u32 strobe_delay, u32 refclk)
1249{
1250 return gb_control_timesync_enable(intf->control, count,
1251 frame_time, strobe_delay,
1252 refclk);
1253}
1254
1255/* Disable TimeSync on an Interface control connection. */
1256int gb_interface_timesync_disable(struct gb_interface *intf)
1257{
1258 return gb_control_timesync_disable(intf->control);
1259}
1260
1261/* Transmit the Authoritative FrameTime via an Interface control connection. */
1262int gb_interface_timesync_authoritative(struct gb_interface *intf,
1263 u64 *frame_time)
1264{
1265 return gb_control_timesync_authoritative(intf->control,
1266 frame_time);
1267}
1268
1269/* Register an interface. */
1270int gb_interface_add(struct gb_interface *intf)
1271{
1272 int ret;
1273
1274 ret = device_add(&intf->dev);
1275 if (ret) {
1276 dev_err(&intf->dev, "failed to register interface: %d\n", ret);
1277 return ret;
1278 }
1279
1280 trace_gb_interface_add(intf);
1281
1282 dev_info(&intf->dev, "Interface added (%s)\n",
1283 gb_interface_type_string(intf));
1284
1285 switch (intf->type) {
1286 case GB_INTERFACE_TYPE_GREYBUS:
1287 dev_info(&intf->dev, "GMP VID=0x%08x, PID=0x%08x\n",
1288 intf->vendor_id, intf->product_id);
1289 /* fall-through */
1290 case GB_INTERFACE_TYPE_UNIPRO:
1291 dev_info(&intf->dev, "DDBL1 Manufacturer=0x%08x, Product=0x%08x\n",
1292 intf->ddbl1_manufacturer_id,
1293 intf->ddbl1_product_id);
1294 break;
1295 default:
1296 break;
1297 }
1298
1299 return 0;
1300}
1301
1302/* Deregister an interface. */
1303void gb_interface_del(struct gb_interface *intf)
1304{
1305 if (device_is_registered(&intf->dev)) {
1306 trace_gb_interface_del(intf);
1307
1308 device_del(&intf->dev);
1309 dev_info(&intf->dev, "Interface removed\n");
1310 }
1311}
1312
1313void gb_interface_put(struct gb_interface *intf)
1314{
1315 put_device(&intf->dev);
1316}
diff --git a/drivers/staging/greybus/interface.h b/drivers/staging/greybus/interface.h
new file mode 100644
index 000000000000..03299d2a8be5
--- /dev/null
+++ b/drivers/staging/greybus/interface.h
@@ -0,0 +1,88 @@
1/*
2 * Greybus Interface Block code
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __INTERFACE_H
11#define __INTERFACE_H
12
13enum gb_interface_type {
14 GB_INTERFACE_TYPE_INVALID = 0,
15 GB_INTERFACE_TYPE_UNKNOWN,
16 GB_INTERFACE_TYPE_DUMMY,
17 GB_INTERFACE_TYPE_UNIPRO,
18 GB_INTERFACE_TYPE_GREYBUS,
19};
20
21#define GB_INTERFACE_QUIRK_NO_CPORT_FEATURES BIT(0)
22#define GB_INTERFACE_QUIRK_NO_INIT_STATUS BIT(1)
23#define GB_INTERFACE_QUIRK_NO_GMP_IDS BIT(2)
24#define GB_INTERFACE_QUIRK_FORCED_DISABLE BIT(3)
25#define GB_INTERFACE_QUIRK_LEGACY_MODE_SWITCH BIT(4)
26#define GB_INTERFACE_QUIRK_NO_BUNDLE_ACTIVATE BIT(5)
27#define GB_INTERFACE_QUIRK_NO_PM BIT(6)
28
29struct gb_interface {
30 struct device dev;
31 struct gb_control *control;
32
33 struct list_head bundles;
34 struct list_head module_node;
35 struct list_head manifest_descs;
36 u8 interface_id; /* Physical location within the Endo */
37 u8 device_id;
38 u8 features; /* Feature flags set in the manifest */
39
40 enum gb_interface_type type;
41
42 u32 ddbl1_manufacturer_id;
43 u32 ddbl1_product_id;
44 u32 vendor_id;
45 u32 product_id;
46 u64 serial_number;
47
48 struct gb_host_device *hd;
49 struct gb_module *module;
50
51 unsigned long quirks;
52
53 struct mutex mutex;
54
55 bool disconnected;
56
57 bool ejected;
58 bool removed;
59 bool active;
60 bool enabled;
61 bool mode_switch;
62 bool dme_read;
63
64 struct work_struct mode_switch_work;
65 struct completion mode_switch_completion;
66};
67#define to_gb_interface(d) container_of(d, struct gb_interface, dev)
68
69struct gb_interface *gb_interface_create(struct gb_module *module,
70 u8 interface_id);
71int gb_interface_activate(struct gb_interface *intf);
72void gb_interface_deactivate(struct gb_interface *intf);
73int gb_interface_enable(struct gb_interface *intf);
74void gb_interface_disable(struct gb_interface *intf);
75int gb_interface_timesync_enable(struct gb_interface *intf, u8 count,
76 u64 frame_time, u32 strobe_delay, u32 refclk);
77int gb_interface_timesync_authoritative(struct gb_interface *intf,
78 u64 *frame_time);
79int gb_interface_timesync_disable(struct gb_interface *intf);
80int gb_interface_add(struct gb_interface *intf);
81void gb_interface_del(struct gb_interface *intf);
82void gb_interface_put(struct gb_interface *intf);
83void gb_interface_mailbox_event(struct gb_interface *intf, u16 result,
84 u32 mailbox);
85
86int gb_interface_request_mode_switch(struct gb_interface *intf);
87
88#endif /* __INTERFACE_H */
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
new file mode 100644
index 000000000000..b2847feb7e86
--- /dev/null
+++ b/drivers/staging/greybus/light.c
@@ -0,0 +1,1359 @@
1/*
2 * Greybus Lights protocol driver.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/leds.h>
12#include <linux/led-class-flash.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/version.h>
16#include <media/v4l2-flash-led-class.h>
17
18#include "greybus.h"
19#include "greybus_protocols.h"
20
21#define NAMES_MAX 32
22
23struct gb_channel {
24 u8 id;
25 u32 flags;
26 u32 color;
27 char *color_name;
28 u8 fade_in;
29 u8 fade_out;
30 u32 mode;
31 char *mode_name;
32 struct attribute **attrs;
33 struct attribute_group *attr_group;
34 const struct attribute_group **attr_groups;
35 struct led_classdev *led;
36#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
37 struct led_classdev_flash fled;
38 struct led_flash_setting intensity_uA;
39 struct led_flash_setting timeout_us;
40#else
41 struct led_classdev cled;
42#endif
43 struct gb_light *light;
44 bool is_registered;
45 bool releasing;
46 bool strobe_state;
47 bool active;
48 struct mutex lock;
49};
50
51struct gb_light {
52 u8 id;
53 char *name;
54 struct gb_lights *glights;
55 u32 flags;
56 u8 channels_count;
57 struct gb_channel *channels;
58 bool has_flash;
59 bool ready;
60#if IS_REACHABLE(CONFIG_V4L2_FLASH_LED_CLASS)
61 struct v4l2_flash *v4l2_flash;
62#endif
63};
64
65struct gb_lights {
66 struct gb_connection *connection;
67 u8 lights_count;
68 struct gb_light *lights;
69 struct mutex lights_lock;
70};
71
72static void gb_lights_channel_free(struct gb_channel *channel);
73
74static struct gb_connection *get_conn_from_channel(struct gb_channel *channel)
75{
76 return channel->light->glights->connection;
77}
78
79static struct gb_connection *get_conn_from_light(struct gb_light *light)
80{
81 return light->glights->connection;
82}
83
84static bool is_channel_flash(struct gb_channel *channel)
85{
86 return !!(channel->mode & (GB_CHANNEL_MODE_FLASH | GB_CHANNEL_MODE_TORCH
87 | GB_CHANNEL_MODE_INDICATOR));
88}
89
90#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
91static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
92{
93 struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(cdev);
94
95 return container_of(fled_cdev, struct gb_channel, fled);
96}
97
98static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
99{
100 return &channel->fled.led_cdev;
101}
102
103static struct gb_channel *get_channel_from_mode(struct gb_light *light,
104 u32 mode)
105{
106 struct gb_channel *channel = NULL;
107 int i;
108
109 for (i = 0; i < light->channels_count; i++) {
110 channel = &light->channels[i];
111 if (channel && channel->mode == mode)
112 break;
113 }
114 return channel;
115}
116
117static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
118 u32 intensity)
119{
120 struct gb_connection *connection = get_conn_from_channel(channel);
121 struct gb_bundle *bundle = connection->bundle;
122 struct gb_lights_set_flash_intensity_request req;
123 int ret;
124
125 if (channel->releasing)
126 return -ESHUTDOWN;
127
128 ret = gb_pm_runtime_get_sync(bundle);
129 if (ret < 0)
130 return ret;
131
132 req.light_id = channel->light->id;
133 req.channel_id = channel->id;
134 req.intensity_uA = cpu_to_le32(intensity);
135
136 ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_INTENSITY,
137 &req, sizeof(req), NULL, 0);
138
139 gb_pm_runtime_put_autosuspend(bundle);
140
141 return ret;
142}
143
144static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
145{
146 u32 intensity;
147
148 /* If the channel is flash we need to get the attached torch channel */
149 if (channel->mode & GB_CHANNEL_MODE_FLASH)
150 channel = get_channel_from_mode(channel->light,
151 GB_CHANNEL_MODE_TORCH);
152
153 /* For not flash we need to convert brightness to intensity */
154 intensity = channel->intensity_uA.min +
155 (channel->intensity_uA.step * channel->led->brightness);
156
157 return __gb_lights_flash_intensity_set(channel, intensity);
158}
159#else
160static struct gb_channel *get_channel_from_cdev(struct led_classdev *cdev)
161{
162 return container_of(cdev, struct gb_channel, cled);
163}
164
165static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
166{
167 return &channel->cled;
168}
169
170static int __gb_lights_flash_brightness_set(struct gb_channel *channel)
171{
172 return 0;
173}
174#endif
175
176static int gb_lights_color_set(struct gb_channel *channel, u32 color);
177static int gb_lights_fade_set(struct gb_channel *channel);
178
179static void led_lock(struct led_classdev *cdev)
180{
181 mutex_lock(&cdev->led_access);
182}
183
184static void led_unlock(struct led_classdev *cdev)
185{
186 mutex_unlock(&cdev->led_access);
187}
188
189#define gb_lights_fade_attr(__dir) \
190static ssize_t fade_##__dir##_show(struct device *dev, \
191 struct device_attribute *attr, \
192 char *buf) \
193{ \
194 struct led_classdev *cdev = dev_get_drvdata(dev); \
195 struct gb_channel *channel = get_channel_from_cdev(cdev); \
196 \
197 return sprintf(buf, "%u\n", channel->fade_##__dir); \
198} \
199 \
200static ssize_t fade_##__dir##_store(struct device *dev, \
201 struct device_attribute *attr, \
202 const char *buf, size_t size) \
203{ \
204 struct led_classdev *cdev = dev_get_drvdata(dev); \
205 struct gb_channel *channel = get_channel_from_cdev(cdev); \
206 u8 fade; \
207 int ret; \
208 \
209 led_lock(cdev); \
210 if (led_sysfs_is_disabled(cdev)) { \
211 ret = -EBUSY; \
212 goto unlock; \
213 } \
214 \
215 ret = kstrtou8(buf, 0, &fade); \
216 if (ret < 0) { \
217 dev_err(dev, "could not parse fade value %d\n", ret); \
218 goto unlock; \
219 } \
220 if (channel->fade_##__dir == fade) \
221 goto unlock; \
222 channel->fade_##__dir = fade; \
223 \
224 ret = gb_lights_fade_set(channel); \
225 if (ret < 0) \
226 goto unlock; \
227 \
228 ret = size; \
229unlock: \
230 led_unlock(cdev); \
231 return ret; \
232} \
233static DEVICE_ATTR_RW(fade_##__dir)
234
235gb_lights_fade_attr(in);
236gb_lights_fade_attr(out);
237
238static ssize_t color_show(struct device *dev, struct device_attribute *attr,
239 char *buf)
240{
241 struct led_classdev *cdev = dev_get_drvdata(dev);
242 struct gb_channel *channel = get_channel_from_cdev(cdev);
243
244 return sprintf(buf, "0x%08x\n", channel->color);
245}
246
247static ssize_t color_store(struct device *dev, struct device_attribute *attr,
248 const char *buf, size_t size)
249{
250 struct led_classdev *cdev = dev_get_drvdata(dev);
251 struct gb_channel *channel = get_channel_from_cdev(cdev);
252 u32 color;
253 int ret;
254
255 led_lock(cdev);
256 if (led_sysfs_is_disabled(cdev)) {
257 ret = -EBUSY;
258 goto unlock;
259 }
260 ret = kstrtou32(buf, 0, &color);
261 if (ret < 0) {
262 dev_err(dev, "could not parse color value %d\n", ret);
263 goto unlock;
264 }
265
266 ret = gb_lights_color_set(channel, color);
267 if (ret < 0)
268 goto unlock;
269
270 channel->color = color;
271 ret = size;
272unlock:
273 led_unlock(cdev);
274 return ret;
275}
276static DEVICE_ATTR_RW(color);
277
278static int channel_attr_groups_set(struct gb_channel *channel,
279 struct led_classdev *cdev)
280{
281 int attr = 0;
282 int size = 0;
283
284 if (channel->flags & GB_LIGHT_CHANNEL_MULTICOLOR)
285 size++;
286 if (channel->flags & GB_LIGHT_CHANNEL_FADER)
287 size += 2;
288
289 if (!size)
290 return 0;
291
292 /* Set attributes based in the channel flags */
293 channel->attrs = kcalloc(size + 1, sizeof(**channel->attrs),
294 GFP_KERNEL);
295 if (!channel->attrs)
296 return -ENOMEM;
297 channel->attr_group = kcalloc(1, sizeof(*channel->attr_group),
298 GFP_KERNEL);
299 if (!channel->attr_group)
300 return -ENOMEM;
301 channel->attr_groups = kcalloc(2, sizeof(*channel->attr_groups),
302 GFP_KERNEL);
303 if (!channel->attr_groups)
304 return -ENOMEM;
305
306 if (channel->flags & GB_LIGHT_CHANNEL_MULTICOLOR)
307 channel->attrs[attr++] = &dev_attr_color.attr;
308 if (channel->flags & GB_LIGHT_CHANNEL_FADER) {
309 channel->attrs[attr++] = &dev_attr_fade_in.attr;
310 channel->attrs[attr++] = &dev_attr_fade_out.attr;
311 }
312
313 channel->attr_group->attrs = channel->attrs;
314
315 channel->attr_groups[0] = channel->attr_group;
316
317 cdev->groups = channel->attr_groups;
318
319 return 0;
320}
321
322static int gb_lights_fade_set(struct gb_channel *channel)
323{
324 struct gb_connection *connection = get_conn_from_channel(channel);
325 struct gb_bundle *bundle = connection->bundle;
326 struct gb_lights_set_fade_request req;
327 int ret;
328
329 if (channel->releasing)
330 return -ESHUTDOWN;
331
332 ret = gb_pm_runtime_get_sync(bundle);
333 if (ret < 0)
334 return ret;
335
336 req.light_id = channel->light->id;
337 req.channel_id = channel->id;
338 req.fade_in = channel->fade_in;
339 req.fade_out = channel->fade_out;
340 ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FADE,
341 &req, sizeof(req), NULL, 0);
342
343 gb_pm_runtime_put_autosuspend(bundle);
344
345 return ret;
346}
347
348static int gb_lights_color_set(struct gb_channel *channel, u32 color)
349{
350 struct gb_connection *connection = get_conn_from_channel(channel);
351 struct gb_bundle *bundle = connection->bundle;
352 struct gb_lights_set_color_request req;
353 int ret;
354
355 if (channel->releasing)
356 return -ESHUTDOWN;
357
358 ret = gb_pm_runtime_get_sync(bundle);
359 if (ret < 0)
360 return ret;
361
362 req.light_id = channel->light->id;
363 req.channel_id = channel->id;
364 req.color = cpu_to_le32(color);
365 ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_COLOR,
366 &req, sizeof(req), NULL, 0);
367
368 gb_pm_runtime_put_autosuspend(bundle);
369
370 return ret;
371}
372
373static int __gb_lights_led_brightness_set(struct gb_channel *channel)
374{
375 struct gb_lights_set_brightness_request req;
376 struct gb_connection *connection = get_conn_from_channel(channel);
377 struct gb_bundle *bundle = connection->bundle;
378 bool old_active;
379 int ret;
380
381 mutex_lock(&channel->lock);
382 ret = gb_pm_runtime_get_sync(bundle);
383 if (ret < 0)
384 goto out_unlock;
385
386 old_active = channel->active;
387
388 req.light_id = channel->light->id;
389 req.channel_id = channel->id;
390 req.brightness = (u8)channel->led->brightness;
391
392 ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_BRIGHTNESS,
393 &req, sizeof(req), NULL, 0);
394 if (ret < 0)
395 goto out_pm_put;
396
397 if (channel->led->brightness)
398 channel->active = true;
399 else
400 channel->active = false;
401
402 /* we need to keep module alive when turning to active state */
403 if (!old_active && channel->active)
404 goto out_unlock;
405
406 /*
407 * on the other hand if going to inactive we still hold a reference and
408 * need to put it, so we could go to suspend.
409 */
410 if (old_active && !channel->active)
411 gb_pm_runtime_put_autosuspend(bundle);
412
413out_pm_put:
414 gb_pm_runtime_put_autosuspend(bundle);
415out_unlock:
416 mutex_unlock(&channel->lock);
417
418 return ret;
419}
420
421static int __gb_lights_brightness_set(struct gb_channel *channel)
422{
423 int ret;
424
425 if (channel->releasing)
426 return 0;
427
428 if (is_channel_flash(channel))
429 ret = __gb_lights_flash_brightness_set(channel);
430 else
431 ret = __gb_lights_led_brightness_set(channel);
432
433 return ret;
434}
435
436static int gb_brightness_set(struct led_classdev *cdev,
437 enum led_brightness value)
438{
439 struct gb_channel *channel = get_channel_from_cdev(cdev);
440
441 channel->led->brightness = value;
442
443 return __gb_lights_brightness_set(channel);
444}
445
446static enum led_brightness gb_brightness_get(struct led_classdev *cdev)
447
448{
449 struct gb_channel *channel = get_channel_from_cdev(cdev);
450
451 return channel->led->brightness;
452}
453
454static int gb_blink_set(struct led_classdev *cdev, unsigned long *delay_on,
455 unsigned long *delay_off)
456{
457 struct gb_channel *channel = get_channel_from_cdev(cdev);
458 struct gb_connection *connection = get_conn_from_channel(channel);
459 struct gb_bundle *bundle = connection->bundle;
460 struct gb_lights_blink_request req;
461 bool old_active;
462 int ret;
463
464 if (channel->releasing)
465 return -ESHUTDOWN;
466
467 mutex_lock(&channel->lock);
468 ret = gb_pm_runtime_get_sync(bundle);
469 if (ret < 0)
470 goto out_unlock;
471
472 old_active = channel->active;
473
474 req.light_id = channel->light->id;
475 req.channel_id = channel->id;
476 req.time_on_ms = cpu_to_le16(*delay_on);
477 req.time_off_ms = cpu_to_le16(*delay_off);
478
479 ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_BLINK, &req,
480 sizeof(req), NULL, 0);
481 if (ret < 0)
482 goto out_pm_put;
483
484 if (delay_on)
485 channel->active = true;
486 else
487 channel->active = false;
488
489 /* we need to keep module alive when turning to active state */
490 if (!old_active && channel->active)
491 goto out_unlock;
492
493 /*
494 * on the other hand if going to inactive we still hold a reference and
495 * need to put it, so we could go to suspend.
496 */
497 if (old_active && !channel->active)
498 gb_pm_runtime_put_autosuspend(bundle);
499
500out_pm_put:
501 gb_pm_runtime_put_autosuspend(bundle);
502out_unlock:
503 mutex_unlock(&channel->lock);
504
505 return ret;
506}
507
508static void gb_lights_led_operations_set(struct gb_channel *channel,
509 struct led_classdev *cdev)
510{
511 cdev->brightness_get = gb_brightness_get;
512 cdev->brightness_set_blocking = gb_brightness_set;
513
514 if (channel->flags & GB_LIGHT_CHANNEL_BLINK)
515 cdev->blink_set = gb_blink_set;
516}
517
518#if IS_REACHABLE(CONFIG_V4L2_FLASH_LED_CLASS)
519/* V4L2 specific helpers */
520static const struct v4l2_flash_ops v4l2_flash_ops;
521
522static void __gb_lights_channel_v4l2_config(struct led_flash_setting *channel_s,
523 struct led_flash_setting *v4l2_s)
524{
525 v4l2_s->min = channel_s->min;
526 v4l2_s->max = channel_s->max;
527 v4l2_s->step = channel_s->step;
528 /* For v4l2 val is the default value */
529 v4l2_s->val = channel_s->max;
530}
531
532static int gb_lights_light_v4l2_register(struct gb_light *light)
533{
534 struct gb_connection *connection = get_conn_from_light(light);
535 struct device *dev = &connection->bundle->dev;
536 struct v4l2_flash_config *sd_cfg;
537 struct led_classdev_flash *fled;
538 struct led_classdev_flash *iled = NULL;
539 struct gb_channel *channel_torch, *channel_ind, *channel_flash;
540 int ret = 0;
541
542 sd_cfg = kcalloc(1, sizeof(*sd_cfg), GFP_KERNEL);
543 if (!sd_cfg)
544 return -ENOMEM;
545
546 channel_torch = get_channel_from_mode(light, GB_CHANNEL_MODE_TORCH);
547 if (channel_torch)
548 __gb_lights_channel_v4l2_config(&channel_torch->intensity_uA,
549 &sd_cfg->torch_intensity);
550
551 channel_ind = get_channel_from_mode(light, GB_CHANNEL_MODE_INDICATOR);
552 if (channel_ind) {
553 __gb_lights_channel_v4l2_config(&channel_ind->intensity_uA,
554 &sd_cfg->indicator_intensity);
555 iled = &channel_ind->fled;
556 }
557
558 channel_flash = get_channel_from_mode(light, GB_CHANNEL_MODE_FLASH);
559 WARN_ON(!channel_flash);
560
561 fled = &channel_flash->fled;
562
563 snprintf(sd_cfg->dev_name, sizeof(sd_cfg->dev_name), "%s", light->name);
564
565 /* Set the possible values to faults, in our case all faults */
566 sd_cfg->flash_faults = LED_FAULT_OVER_VOLTAGE | LED_FAULT_TIMEOUT |
567 LED_FAULT_OVER_TEMPERATURE | LED_FAULT_SHORT_CIRCUIT |
568 LED_FAULT_OVER_CURRENT | LED_FAULT_INDICATOR |
569 LED_FAULT_UNDER_VOLTAGE | LED_FAULT_INPUT_VOLTAGE |
570 LED_FAULT_LED_OVER_TEMPERATURE;
571
572 light->v4l2_flash = v4l2_flash_init(dev, NULL, fled, iled,
573 &v4l2_flash_ops, sd_cfg);
574 if (IS_ERR_OR_NULL(light->v4l2_flash)) {
575 ret = PTR_ERR(light->v4l2_flash);
576 goto out_free;
577 }
578
579 return ret;
580
581out_free:
582 kfree(sd_cfg);
583 return ret;
584}
585
586static void gb_lights_light_v4l2_unregister(struct gb_light *light)
587{
588 v4l2_flash_release(light->v4l2_flash);
589}
590#else
591static int gb_lights_light_v4l2_register(struct gb_light *light)
592{
593 struct gb_connection *connection = get_conn_from_light(light);
594
595 dev_err(&connection->bundle->dev, "no support for v4l2 subdevices\n");
596 return 0;
597}
598
599static void gb_lights_light_v4l2_unregister(struct gb_light *light)
600{
601}
602#endif
603
604#if IS_REACHABLE(CONFIG_LEDS_CLASS_FLASH)
605/* Flash specific operations */
606static int gb_lights_flash_intensity_set(struct led_classdev_flash *fcdev,
607 u32 brightness)
608{
609 struct gb_channel *channel = container_of(fcdev, struct gb_channel,
610 fled);
611 int ret;
612
613 ret = __gb_lights_flash_intensity_set(channel, brightness);
614 if (ret < 0)
615 return ret;
616
617 fcdev->brightness.val = brightness;
618
619 return 0;
620}
621
622static int gb_lights_flash_intensity_get(struct led_classdev_flash *fcdev,
623 u32 *brightness)
624{
625 *brightness = fcdev->brightness.val;
626
627 return 0;
628}
629
630static int gb_lights_flash_strobe_set(struct led_classdev_flash *fcdev,
631 bool state)
632{
633 struct gb_channel *channel = container_of(fcdev, struct gb_channel,
634 fled);
635 struct gb_connection *connection = get_conn_from_channel(channel);
636 struct gb_bundle *bundle = connection->bundle;
637 struct gb_lights_set_flash_strobe_request req;
638 int ret;
639
640 if (channel->releasing)
641 return -ESHUTDOWN;
642
643 ret = gb_pm_runtime_get_sync(bundle);
644 if (ret < 0)
645 return ret;
646
647 req.light_id = channel->light->id;
648 req.channel_id = channel->id;
649 req.state = state ? 1 : 0;
650
651 ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_STROBE,
652 &req, sizeof(req), NULL, 0);
653 if (!ret)
654 channel->strobe_state = state;
655
656 gb_pm_runtime_put_autosuspend(bundle);
657
658 return ret;
659}
660
661static int gb_lights_flash_strobe_get(struct led_classdev_flash *fcdev,
662 bool *state)
663{
664 struct gb_channel *channel = container_of(fcdev, struct gb_channel,
665 fled);
666
667 *state = channel->strobe_state;
668 return 0;
669}
670
671static int gb_lights_flash_timeout_set(struct led_classdev_flash *fcdev,
672 u32 timeout)
673{
674 struct gb_channel *channel = container_of(fcdev, struct gb_channel,
675 fled);
676 struct gb_connection *connection = get_conn_from_channel(channel);
677 struct gb_bundle *bundle = connection->bundle;
678 struct gb_lights_set_flash_timeout_request req;
679 int ret;
680
681 if (channel->releasing)
682 return -ESHUTDOWN;
683
684 ret = gb_pm_runtime_get_sync(bundle);
685 if (ret < 0)
686 return ret;
687
688 req.light_id = channel->light->id;
689 req.channel_id = channel->id;
690 req.timeout_us = cpu_to_le32(timeout);
691
692 ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_SET_FLASH_TIMEOUT,
693 &req, sizeof(req), NULL, 0);
694 if (!ret)
695 fcdev->timeout.val = timeout;
696
697 gb_pm_runtime_put_autosuspend(bundle);
698
699 return ret;
700}
701
702static int gb_lights_flash_fault_get(struct led_classdev_flash *fcdev,
703 u32 *fault)
704{
705 struct gb_channel *channel = container_of(fcdev, struct gb_channel,
706 fled);
707 struct gb_connection *connection = get_conn_from_channel(channel);
708 struct gb_bundle *bundle = connection->bundle;
709 struct gb_lights_get_flash_fault_request req;
710 struct gb_lights_get_flash_fault_response resp;
711 int ret;
712
713 if (channel->releasing)
714 return -ESHUTDOWN;
715
716 ret = gb_pm_runtime_get_sync(bundle);
717 if (ret < 0)
718 return ret;
719
720 req.light_id = channel->light->id;
721 req.channel_id = channel->id;
722
723 ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_GET_FLASH_FAULT,
724 &req, sizeof(req), &resp, sizeof(resp));
725 if (!ret)
726 *fault = le32_to_cpu(resp.fault);
727
728 gb_pm_runtime_put_autosuspend(bundle);
729
730 return ret;
731}
732
733static const struct led_flash_ops gb_lights_flash_ops = {
734 .flash_brightness_set = gb_lights_flash_intensity_set,
735 .flash_brightness_get = gb_lights_flash_intensity_get,
736 .strobe_set = gb_lights_flash_strobe_set,
737 .strobe_get = gb_lights_flash_strobe_get,
738 .timeout_set = gb_lights_flash_timeout_set,
739 .fault_get = gb_lights_flash_fault_get,
740};
741
742static int __gb_lights_channel_torch_attach(struct gb_channel *channel,
743 struct gb_channel *channel_torch)
744{
745 char *name;
746
747 /* we can only attach torch to a flash channel */
748 if (!(channel->mode & GB_CHANNEL_MODE_FLASH))
749 return 0;
750
751 /* Move torch brightness to the destination */
752 channel->led->max_brightness = channel_torch->led->max_brightness;
753
754 /* append mode name to flash name */
755 name = kasprintf(GFP_KERNEL, "%s_%s", channel->led->name,
756 channel_torch->mode_name);
757 if (!name)
758 return -ENOMEM;
759 kfree(channel->led->name);
760 channel->led->name = name;
761
762 channel_torch->led = channel->led;
763
764 return 0;
765}
766
767static int __gb_lights_flash_led_register(struct gb_channel *channel)
768{
769 struct gb_connection *connection = get_conn_from_channel(channel);
770 struct led_classdev_flash *fled = &channel->fled;
771 struct led_flash_setting *fset;
772 struct gb_channel *channel_torch;
773 int ret;
774
775 fled->ops = &gb_lights_flash_ops;
776
777 fled->led_cdev.flags |= LED_DEV_CAP_FLASH;
778
779 fset = &fled->brightness;
780 fset->min = channel->intensity_uA.min;
781 fset->max = channel->intensity_uA.max;
782 fset->step = channel->intensity_uA.step;
783 fset->val = channel->intensity_uA.max;
784
785 /* Only the flash mode have the timeout constraints settings */
786 if (channel->mode & GB_CHANNEL_MODE_FLASH) {
787 fset = &fled->timeout;
788 fset->min = channel->timeout_us.min;
789 fset->max = channel->timeout_us.max;
790 fset->step = channel->timeout_us.step;
791 fset->val = channel->timeout_us.max;
792 }
793
794 /*
795 * If light have torch mode channel, this channel will be the led
796 * classdev of the registered above flash classdev
797 */
798 channel_torch = get_channel_from_mode(channel->light,
799 GB_CHANNEL_MODE_TORCH);
800 if (channel_torch) {
801 ret = __gb_lights_channel_torch_attach(channel, channel_torch);
802 if (ret < 0)
803 goto fail;
804 }
805
806 ret = led_classdev_flash_register(&connection->bundle->dev, fled);
807 if (ret < 0)
808 goto fail;
809
810 channel->is_registered = true;
811 return 0;
812fail:
813 channel->led = NULL;
814 return ret;
815}
816
817static void __gb_lights_flash_led_unregister(struct gb_channel *channel)
818{
819 if (!channel->is_registered)
820 return;
821
822 led_classdev_flash_unregister(&channel->fled);
823}
824
825static int gb_lights_channel_flash_config(struct gb_channel *channel)
826{
827 struct gb_connection *connection = get_conn_from_channel(channel);
828 struct gb_lights_get_channel_flash_config_request req;
829 struct gb_lights_get_channel_flash_config_response conf;
830 struct led_flash_setting *fset;
831 int ret;
832
833 req.light_id = channel->light->id;
834 req.channel_id = channel->id;
835
836 ret = gb_operation_sync(connection,
837 GB_LIGHTS_TYPE_GET_CHANNEL_FLASH_CONFIG,
838 &req, sizeof(req), &conf, sizeof(conf));
839 if (ret < 0)
840 return ret;
841
842 /*
843 * Intensity constraints for flash related modes: flash, torch,
844 * indicator. They will be needed for v4l2 registration.
845 */
846 fset = &channel->intensity_uA;
847 fset->min = le32_to_cpu(conf.intensity_min_uA);
848 fset->max = le32_to_cpu(conf.intensity_max_uA);
849 fset->step = le32_to_cpu(conf.intensity_step_uA);
850
851 /*
852 * On flash type, max brightness is set as the number of intensity steps
853 * available.
854 */
855 channel->led->max_brightness = (fset->max - fset->min) / fset->step;
856
857 /* Only the flash mode have the timeout constraints settings */
858 if (channel->mode & GB_CHANNEL_MODE_FLASH) {
859 fset = &channel->timeout_us;
860 fset->min = le32_to_cpu(conf.timeout_min_us);
861 fset->max = le32_to_cpu(conf.timeout_max_us);
862 fset->step = le32_to_cpu(conf.timeout_step_us);
863 }
864
865 return 0;
866}
867#else
868static int gb_lights_channel_flash_config(struct gb_channel *channel)
869{
870 struct gb_connection *connection = get_conn_from_channel(channel);
871
872 dev_err(&connection->bundle->dev, "no support for flash devices\n");
873 return 0;
874}
875
876static int __gb_lights_flash_led_register(struct gb_channel *channel)
877{
878 return 0;
879}
880
881static void __gb_lights_flash_led_unregister(struct gb_channel *channel)
882{
883}
884
885#endif
886
887static int __gb_lights_led_register(struct gb_channel *channel)
888{
889 struct gb_connection *connection = get_conn_from_channel(channel);
890 struct led_classdev *cdev = get_channel_cdev(channel);
891 int ret;
892
893 ret = led_classdev_register(&connection->bundle->dev, cdev);
894 if (ret < 0)
895 channel->led = NULL;
896 else
897 channel->is_registered = true;
898 return ret;
899}
900
901static int gb_lights_channel_register(struct gb_channel *channel)
902{
903 /* Normal LED channel, just register in led classdev and we are done */
904 if (!is_channel_flash(channel))
905 return __gb_lights_led_register(channel);
906
907 /*
908 * Flash Type need more work, register flash classdev, indicator as
909 * flash classdev, torch will be led classdev of the flash classdev.
910 */
911 if (!(channel->mode & GB_CHANNEL_MODE_TORCH))
912 return __gb_lights_flash_led_register(channel);
913
914 return 0;
915}
916
917static void __gb_lights_led_unregister(struct gb_channel *channel)
918{
919 struct led_classdev *cdev = get_channel_cdev(channel);
920
921 if (!channel->is_registered)
922 return;
923
924 led_classdev_unregister(cdev);
925 channel->led = NULL;
926}
927
928static void gb_lights_channel_unregister(struct gb_channel *channel)
929{
930 /* The same as register, handle channels differently */
931 if (!is_channel_flash(channel)) {
932 __gb_lights_led_unregister(channel);
933 return;
934 }
935
936 if (channel->mode & GB_CHANNEL_MODE_TORCH)
937 __gb_lights_led_unregister(channel);
938 else
939 __gb_lights_flash_led_unregister(channel);
940}
941
942static int gb_lights_channel_config(struct gb_light *light,
943 struct gb_channel *channel)
944{
945 struct gb_lights_get_channel_config_response conf;
946 struct gb_lights_get_channel_config_request req;
947 struct gb_connection *connection = get_conn_from_light(light);
948 struct led_classdev *cdev = get_channel_cdev(channel);
949 char *name;
950 int ret;
951
952 req.light_id = light->id;
953 req.channel_id = channel->id;
954
955 ret = gb_operation_sync(connection, GB_LIGHTS_TYPE_GET_CHANNEL_CONFIG,
956 &req, sizeof(req), &conf, sizeof(conf));
957 if (ret < 0)
958 return ret;
959
960 channel->light = light;
961 channel->mode = le32_to_cpu(conf.mode);
962 channel->flags = le32_to_cpu(conf.flags);
963 channel->color = le32_to_cpu(conf.color);
964 channel->color_name = kstrndup(conf.color_name, NAMES_MAX, GFP_KERNEL);
965 if (!channel->color_name)
966 return -ENOMEM;
967 channel->mode_name = kstrndup(conf.mode_name, NAMES_MAX, GFP_KERNEL);
968 if (!channel->mode_name)
969 return -ENOMEM;
970
971 channel->led = cdev;
972
973 name = kasprintf(GFP_KERNEL, "%s:%s:%s", light->name,
974 channel->color_name, channel->mode_name);
975 if (!name)
976 return -ENOMEM;
977
978 cdev->name = name;
979
980 cdev->max_brightness = conf.max_brightness;
981
982 ret = channel_attr_groups_set(channel, cdev);
983 if (ret < 0)
984 return ret;
985
986 gb_lights_led_operations_set(channel, cdev);
987
988 /*
989 * If it is not a flash related channel (flash, torch or indicator) we
990 * are done here. If not, continue and fetch flash related
991 * configurations.
992 */
993 if (!is_channel_flash(channel))
994 return ret;
995
996 light->has_flash = true;
997
998 ret = gb_lights_channel_flash_config(channel);
999 if (ret < 0)
1000 return ret;
1001
1002 return ret;
1003}
1004
1005static int gb_lights_light_config(struct gb_lights *glights, u8 id)
1006{
1007 struct gb_light *light = &glights->lights[id];
1008 struct gb_lights_get_light_config_request req;
1009 struct gb_lights_get_light_config_response conf;
1010 int ret;
1011 int i;
1012
1013 light->glights = glights;
1014 light->id = id;
1015
1016 req.id = id;
1017
1018 ret = gb_operation_sync(glights->connection,
1019 GB_LIGHTS_TYPE_GET_LIGHT_CONFIG,
1020 &req, sizeof(req), &conf, sizeof(conf));
1021 if (ret < 0)
1022 return ret;
1023
1024 if (!conf.channel_count)
1025 return -EINVAL;
1026 if (!strlen(conf.name))
1027 return -EINVAL;
1028
1029 light->channels_count = conf.channel_count;
1030 light->name = kstrndup(conf.name, NAMES_MAX, GFP_KERNEL);
1031
1032 light->channels = kzalloc(light->channels_count *
1033 sizeof(struct gb_channel), GFP_KERNEL);
1034 if (!light->channels)
1035 return -ENOMEM;
1036
1037 /* First we collect all the configurations for all channels */
1038 for (i = 0; i < light->channels_count; i++) {
1039 light->channels[i].id = i;
1040 ret = gb_lights_channel_config(light, &light->channels[i]);
1041 if (ret < 0)
1042 return ret;
1043 }
1044
1045 return 0;
1046}
1047
1048static int gb_lights_light_register(struct gb_light *light)
1049{
1050 int ret;
1051 int i;
1052
1053 /*
1054 * Then, if everything went ok in getting configurations, we register
1055 * the classdev, flash classdev and v4l2 subsystem, if a flash device is
1056 * found.
1057 */
1058 for (i = 0; i < light->channels_count; i++) {
1059 ret = gb_lights_channel_register(&light->channels[i]);
1060 if (ret < 0)
1061 return ret;
1062
1063 mutex_init(&light->channels[i].lock);
1064 }
1065
1066 light->ready = true;
1067
1068 if (light->has_flash) {
1069 ret = gb_lights_light_v4l2_register(light);
1070 if (ret < 0) {
1071 light->has_flash = false;
1072 return ret;
1073 }
1074 }
1075
1076 return 0;
1077}
1078
1079static void gb_lights_channel_free(struct gb_channel *channel)
1080{
1081 kfree(channel->attrs);
1082 kfree(channel->attr_group);
1083 kfree(channel->attr_groups);
1084 kfree(channel->color_name);
1085 kfree(channel->mode_name);
1086 mutex_destroy(&channel->lock);
1087}
1088
1089static void gb_lights_channel_release(struct gb_channel *channel)
1090{
1091 channel->releasing = true;
1092
1093 gb_lights_channel_unregister(channel);
1094
1095 gb_lights_channel_free(channel);
1096}
1097
1098static void gb_lights_light_release(struct gb_light *light)
1099{
1100 int i;
1101 int count;
1102
1103 light->ready = false;
1104
1105 count = light->channels_count;
1106
1107 if (light->has_flash)
1108 gb_lights_light_v4l2_unregister(light);
1109
1110 for (i = 0; i < count; i++) {
1111 gb_lights_channel_release(&light->channels[i]);
1112 light->channels_count--;
1113 }
1114 kfree(light->channels);
1115 kfree(light->name);
1116}
1117
1118static void gb_lights_release(struct gb_lights *glights)
1119{
1120 int i;
1121
1122 if (!glights)
1123 return;
1124
1125 mutex_lock(&glights->lights_lock);
1126 if (!glights->lights)
1127 goto free_glights;
1128
1129 for (i = 0; i < glights->lights_count; i++)
1130 gb_lights_light_release(&glights->lights[i]);
1131
1132 kfree(glights->lights);
1133
1134free_glights:
1135 mutex_unlock(&glights->lights_lock);
1136 mutex_destroy(&glights->lights_lock);
1137 kfree(glights);
1138}
1139
1140static int gb_lights_get_count(struct gb_lights *glights)
1141{
1142 struct gb_lights_get_lights_response resp;
1143 int ret;
1144
1145 ret = gb_operation_sync(glights->connection, GB_LIGHTS_TYPE_GET_LIGHTS,
1146 NULL, 0, &resp, sizeof(resp));
1147 if (ret < 0)
1148 return ret;
1149
1150 if (!resp.lights_count)
1151 return -EINVAL;
1152
1153 glights->lights_count = resp.lights_count;
1154
1155 return 0;
1156}
1157
1158static int gb_lights_create_all(struct gb_lights *glights)
1159{
1160 struct gb_connection *connection = glights->connection;
1161 int ret;
1162 int i;
1163
1164 mutex_lock(&glights->lights_lock);
1165 ret = gb_lights_get_count(glights);
1166 if (ret < 0)
1167 goto out;
1168
1169 glights->lights = kzalloc(glights->lights_count *
1170 sizeof(struct gb_light), GFP_KERNEL);
1171 if (!glights->lights) {
1172 ret = -ENOMEM;
1173 goto out;
1174 }
1175
1176 for (i = 0; i < glights->lights_count; i++) {
1177 ret = gb_lights_light_config(glights, i);
1178 if (ret < 0) {
1179 dev_err(&connection->bundle->dev,
1180 "Fail to configure lights device\n");
1181 goto out;
1182 }
1183 }
1184
1185out:
1186 mutex_unlock(&glights->lights_lock);
1187 return ret;
1188}
1189
1190static int gb_lights_register_all(struct gb_lights *glights)
1191{
1192 struct gb_connection *connection = glights->connection;
1193 int ret = 0;
1194 int i;
1195
1196 mutex_lock(&glights->lights_lock);
1197 for (i = 0; i < glights->lights_count; i++) {
1198 ret = gb_lights_light_register(&glights->lights[i]);
1199 if (ret < 0) {
1200 dev_err(&connection->bundle->dev,
1201 "Fail to enable lights device\n");
1202 break;
1203 }
1204 }
1205
1206 mutex_unlock(&glights->lights_lock);
1207 return ret;
1208}
1209
1210static int gb_lights_request_handler(struct gb_operation *op)
1211{
1212 struct gb_connection *connection = op->connection;
1213 struct device *dev = &connection->bundle->dev;
1214 struct gb_lights *glights = gb_connection_get_data(connection);
1215 struct gb_light *light;
1216 struct gb_message *request;
1217 struct gb_lights_event_request *payload;
1218 int ret = 0;
1219 u8 light_id;
1220 u8 event;
1221
1222 if (op->type != GB_LIGHTS_TYPE_EVENT) {
1223 dev_err(dev, "Unsupported unsolicited event: %u\n", op->type);
1224 return -EINVAL;
1225 }
1226
1227 request = op->request;
1228
1229 if (request->payload_size < sizeof(*payload)) {
1230 dev_err(dev, "Wrong event size received (%zu < %zu)\n",
1231 request->payload_size, sizeof(*payload));
1232 return -EINVAL;
1233 }
1234
1235 payload = request->payload;
1236 light_id = payload->light_id;
1237
1238 if (light_id >= glights->lights_count ||
1239 !glights->lights[light_id].ready) {
1240 dev_err(dev, "Event received for unconfigured light id: %d\n",
1241 light_id);
1242 return -EINVAL;
1243 }
1244
1245 event = payload->event;
1246
1247 if (event & GB_LIGHTS_LIGHT_CONFIG) {
1248 light = &glights->lights[light_id];
1249
1250 mutex_lock(&glights->lights_lock);
1251 gb_lights_light_release(light);
1252 ret = gb_lights_light_config(glights, light_id);
1253 if (!ret)
1254 ret = gb_lights_light_register(light);
1255 if (ret < 0)
1256 gb_lights_light_release(light);
1257 mutex_unlock(&glights->lights_lock);
1258 }
1259
1260 return ret;
1261}
1262
1263static int gb_lights_probe(struct gb_bundle *bundle,
1264 const struct greybus_bundle_id *id)
1265{
1266 struct greybus_descriptor_cport *cport_desc;
1267 struct gb_connection *connection;
1268 struct gb_lights *glights;
1269 int ret;
1270
1271 if (bundle->num_cports != 1)
1272 return -ENODEV;
1273
1274 cport_desc = &bundle->cport_desc[0];
1275 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LIGHTS)
1276 return -ENODEV;
1277
1278 glights = kzalloc(sizeof(*glights), GFP_KERNEL);
1279 if (!glights)
1280 return -ENOMEM;
1281
1282 mutex_init(&glights->lights_lock);
1283
1284 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
1285 gb_lights_request_handler);
1286 if (IS_ERR(connection)) {
1287 ret = PTR_ERR(connection);
1288 goto out;
1289 }
1290
1291 glights->connection = connection;
1292 gb_connection_set_data(connection, glights);
1293
1294 greybus_set_drvdata(bundle, glights);
1295
1296 /* We aren't ready to receive an incoming request yet */
1297 ret = gb_connection_enable_tx(connection);
1298 if (ret)
1299 goto error_connection_destroy;
1300
1301 /*
1302 * Setup all the lights devices over this connection, if anything goes
1303 * wrong tear down all lights
1304 */
1305 ret = gb_lights_create_all(glights);
1306 if (ret < 0)
1307 goto error_connection_disable;
1308
1309 /* We are ready to receive an incoming request now, enable RX as well */
1310 ret = gb_connection_enable(connection);
1311 if (ret)
1312 goto error_connection_disable;
1313
1314 /* Enable & register lights */
1315 ret = gb_lights_register_all(glights);
1316 if (ret < 0)
1317 goto error_connection_disable;
1318
1319 gb_pm_runtime_put_autosuspend(bundle);
1320
1321 return 0;
1322
1323error_connection_disable:
1324 gb_connection_disable(connection);
1325error_connection_destroy:
1326 gb_connection_destroy(connection);
1327out:
1328 gb_lights_release(glights);
1329 return ret;
1330}
1331
1332static void gb_lights_disconnect(struct gb_bundle *bundle)
1333{
1334 struct gb_lights *glights = greybus_get_drvdata(bundle);
1335
1336 if (gb_pm_runtime_get_sync(bundle))
1337 gb_pm_runtime_get_noresume(bundle);
1338
1339 gb_connection_disable(glights->connection);
1340 gb_connection_destroy(glights->connection);
1341
1342 gb_lights_release(glights);
1343}
1344
1345static const struct greybus_bundle_id gb_lights_id_table[] = {
1346 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LIGHTS) },
1347 { }
1348};
1349MODULE_DEVICE_TABLE(greybus, gb_lights_id_table);
1350
1351static struct greybus_driver gb_lights_driver = {
1352 .name = "lights",
1353 .probe = gb_lights_probe,
1354 .disconnect = gb_lights_disconnect,
1355 .id_table = gb_lights_id_table,
1356};
1357module_greybus_driver(gb_lights_driver);
1358
1359MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/log.c b/drivers/staging/greybus/log.c
new file mode 100644
index 000000000000..70dd9e5a1cf2
--- /dev/null
+++ b/drivers/staging/greybus/log.c
@@ -0,0 +1,132 @@
1/*
2 * Greybus driver for the log protocol
3 *
4 * Copyright 2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/sizes.h>
12#include <linux/uaccess.h>
13
14#include "greybus.h"
15
16struct gb_log {
17 struct gb_connection *connection;
18};
19
20static int gb_log_request_handler(struct gb_operation *op)
21{
22 struct gb_connection *connection = op->connection;
23 struct device *dev = &connection->bundle->dev;
24 struct gb_log_send_log_request *receive;
25 u16 len;
26
27 if (op->type != GB_LOG_TYPE_SEND_LOG) {
28 dev_err(dev, "unknown request type 0x%02x\n", op->type);
29 return -EINVAL;
30 }
31
32 /* Verify size of payload */
33 if (op->request->payload_size < sizeof(*receive)) {
34 dev_err(dev, "log request too small (%zu < %zu)\n",
35 op->request->payload_size, sizeof(*receive));
36 return -EINVAL;
37 }
38 receive = op->request->payload;
39 len = le16_to_cpu(receive->len);
40 if (len != (int)(op->request->payload_size - sizeof(*receive))) {
41 dev_err(dev, "log request wrong size %d vs %d\n", len,
42 (int)(op->request->payload_size - sizeof(*receive)));
43 return -EINVAL;
44 }
45 if (len == 0) {
46 dev_err(dev, "log request of 0 bytes?\n");
47 return -EINVAL;
48 }
49
50 if (len > GB_LOG_MAX_LEN) {
51 dev_err(dev, "log request too big: %d\n", len);
52 return -EINVAL;
53 }
54
55 /* Ensure the buffer is 0 terminated */
56 receive->msg[len - 1] = '\0';
57
58 /* Print with dev_dbg() so that it can be easily turned off using
59 * dynamic debugging (and prevent any DoS) */
60 dev_dbg(dev, "%s", receive->msg);
61
62 return 0;
63}
64
65static int gb_log_probe(struct gb_bundle *bundle,
66 const struct greybus_bundle_id *id)
67{
68 struct greybus_descriptor_cport *cport_desc;
69 struct gb_connection *connection;
70 struct gb_log *log;
71 int retval;
72
73 if (bundle->num_cports != 1)
74 return -ENODEV;
75
76 cport_desc = &bundle->cport_desc[0];
77 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOG)
78 return -ENODEV;
79
80 log = kzalloc(sizeof(*log), GFP_KERNEL);
81 if (!log)
82 return -ENOMEM;
83
84 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
85 gb_log_request_handler);
86 if (IS_ERR(connection)) {
87 retval = PTR_ERR(connection);
88 goto error_free;
89 }
90
91 log->connection = connection;
92 greybus_set_drvdata(bundle, log);
93
94 retval = gb_connection_enable(connection);
95 if (retval)
96 goto error_connection_destroy;
97
98 return 0;
99
100error_connection_destroy:
101 gb_connection_destroy(connection);
102error_free:
103 kfree(log);
104 return retval;
105}
106
107static void gb_log_disconnect(struct gb_bundle *bundle)
108{
109 struct gb_log *log = greybus_get_drvdata(bundle);
110 struct gb_connection *connection = log->connection;
111
112 gb_connection_disable(connection);
113 gb_connection_destroy(connection);
114
115 kfree(log);
116}
117
118static const struct greybus_bundle_id gb_log_id_table[] = {
119 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOG) },
120 { }
121};
122MODULE_DEVICE_TABLE(greybus, gb_log_id_table);
123
124static struct greybus_driver gb_log_driver = {
125 .name = "log",
126 .probe = gb_log_probe,
127 .disconnect = gb_log_disconnect,
128 .id_table = gb_log_id_table,
129};
130module_greybus_driver(gb_log_driver);
131
132MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
new file mode 100644
index 000000000000..8b0d0dc2ed8b
--- /dev/null
+++ b/drivers/staging/greybus/loopback.c
@@ -0,0 +1,1365 @@
1/*
2 * Loopback bridge driver for the Greybus loopback module.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/slab.h>
16#include <linux/kthread.h>
17#include <linux/delay.h>
18#include <linux/random.h>
19#include <linux/sizes.h>
20#include <linux/cdev.h>
21#include <linux/fs.h>
22#include <linux/kfifo.h>
23#include <linux/debugfs.h>
24#include <linux/list_sort.h>
25#include <linux/spinlock.h>
26#include <linux/workqueue.h>
27#include <linux/atomic.h>
28#include <linux/pm_runtime.h>
29
30#include <asm/div64.h>
31
32#include "greybus.h"
33#include "connection.h"
34
35#define NSEC_PER_DAY 86400000000000ULL
36
37struct gb_loopback_stats {
38 u32 min;
39 u32 max;
40 u64 sum;
41 u32 count;
42};
43
44struct gb_loopback_device {
45 struct dentry *root;
46 u32 count;
47 size_t size_max;
48
49 /* We need to take a lock in atomic context */
50 spinlock_t lock;
51 struct list_head list;
52 struct list_head list_op_async;
53 wait_queue_head_t wq;
54};
55
56static struct gb_loopback_device gb_dev;
57
58struct gb_loopback_async_operation {
59 struct gb_loopback *gb;
60 struct gb_operation *operation;
61 struct timeval ts;
62 struct timer_list timer;
63 struct list_head entry;
64 struct work_struct work;
65 struct kref kref;
66 bool pending;
67 int (*completion)(struct gb_loopback_async_operation *op_async);
68};
69
70struct gb_loopback {
71 struct gb_connection *connection;
72
73 struct dentry *file;
74 struct kfifo kfifo_lat;
75 struct kfifo kfifo_ts;
76 struct mutex mutex;
77 struct task_struct *task;
78 struct list_head entry;
79 struct device *dev;
80 wait_queue_head_t wq;
81 wait_queue_head_t wq_completion;
82 atomic_t outstanding_operations;
83
84 /* Per connection stats */
85 struct timeval ts;
86 struct gb_loopback_stats latency;
87 struct gb_loopback_stats throughput;
88 struct gb_loopback_stats requests_per_second;
89 struct gb_loopback_stats apbridge_unipro_latency;
90 struct gb_loopback_stats gbphy_firmware_latency;
91
92 int type;
93 int async;
94 int id;
95 u32 size;
96 u32 iteration_max;
97 u32 iteration_count;
98 int us_wait;
99 u32 error;
100 u32 requests_completed;
101 u32 requests_timedout;
102 u32 timeout;
103 u32 jiffy_timeout;
104 u32 timeout_min;
105 u32 timeout_max;
106 u32 outstanding_operations_max;
107 u32 lbid;
108 u64 elapsed_nsecs;
109 u32 apbridge_latency_ts;
110 u32 gbphy_latency_ts;
111
112 u32 send_count;
113};
114
115static struct class loopback_class = {
116 .name = "gb_loopback",
117 .owner = THIS_MODULE,
118};
119static DEFINE_IDA(loopback_ida);
120
121/* Min/max values in jiffies */
122#define GB_LOOPBACK_TIMEOUT_MIN 1
123#define GB_LOOPBACK_TIMEOUT_MAX 10000
124
125#define GB_LOOPBACK_FIFO_DEFAULT 8192
126
127static unsigned kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
128module_param(kfifo_depth, uint, 0444);
129
130/* Maximum size of any one send data buffer we support */
131#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
132
133#define GB_LOOPBACK_US_WAIT_MAX 1000000
134
135/* interface sysfs attributes */
136#define gb_loopback_ro_attr(field) \
137static ssize_t field##_show(struct device *dev, \
138 struct device_attribute *attr, \
139 char *buf) \
140{ \
141 struct gb_loopback *gb = dev_get_drvdata(dev); \
142 return sprintf(buf, "%u\n", gb->field); \
143} \
144static DEVICE_ATTR_RO(field)
145
146#define gb_loopback_ro_stats_attr(name, field, type) \
147static ssize_t name##_##field##_show(struct device *dev, \
148 struct device_attribute *attr, \
149 char *buf) \
150{ \
151 struct gb_loopback *gb = dev_get_drvdata(dev); \
152 /* Report 0 for min and max if no transfer successed */ \
153 if (!gb->requests_completed) \
154 return sprintf(buf, "0\n"); \
155 return sprintf(buf, "%"#type"\n", gb->name.field); \
156} \
157static DEVICE_ATTR_RO(name##_##field)
158
159#define gb_loopback_ro_avg_attr(name) \
160static ssize_t name##_avg_show(struct device *dev, \
161 struct device_attribute *attr, \
162 char *buf) \
163{ \
164 struct gb_loopback_stats *stats; \
165 struct gb_loopback *gb; \
166 u64 avg, rem; \
167 u32 count; \
168 gb = dev_get_drvdata(dev); \
169 stats = &gb->name; \
170 count = stats->count ? stats->count : 1; \
171 avg = stats->sum + count / 2000000; /* round closest */ \
172 rem = do_div(avg, count); \
173 rem *= 1000000; \
174 do_div(rem, count); \
175 return sprintf(buf, "%llu.%06u\n", avg, (u32)rem); \
176} \
177static DEVICE_ATTR_RO(name##_avg)
178
179#define gb_loopback_stats_attrs(field) \
180 gb_loopback_ro_stats_attr(field, min, u); \
181 gb_loopback_ro_stats_attr(field, max, u); \
182 gb_loopback_ro_avg_attr(field)
183
184#define gb_loopback_attr(field, type) \
185static ssize_t field##_show(struct device *dev, \
186 struct device_attribute *attr, \
187 char *buf) \
188{ \
189 struct gb_loopback *gb = dev_get_drvdata(dev); \
190 return sprintf(buf, "%"#type"\n", gb->field); \
191} \
192static ssize_t field##_store(struct device *dev, \
193 struct device_attribute *attr, \
194 const char *buf, \
195 size_t len) \
196{ \
197 int ret; \
198 struct gb_loopback *gb = dev_get_drvdata(dev); \
199 mutex_lock(&gb->mutex); \
200 ret = sscanf(buf, "%"#type, &gb->field); \
201 if (ret != 1) \
202 len = -EINVAL; \
203 else \
204 gb_loopback_check_attr(gb, bundle); \
205 mutex_unlock(&gb->mutex); \
206 return len; \
207} \
208static DEVICE_ATTR_RW(field)
209
210#define gb_dev_loopback_ro_attr(field, conn) \
211static ssize_t field##_show(struct device *dev, \
212 struct device_attribute *attr, \
213 char *buf) \
214{ \
215 struct gb_loopback *gb = dev_get_drvdata(dev); \
216 return sprintf(buf, "%u\n", gb->field); \
217} \
218static DEVICE_ATTR_RO(field)
219
220#define gb_dev_loopback_rw_attr(field, type) \
221static ssize_t field##_show(struct device *dev, \
222 struct device_attribute *attr, \
223 char *buf) \
224{ \
225 struct gb_loopback *gb = dev_get_drvdata(dev); \
226 return sprintf(buf, "%"#type"\n", gb->field); \
227} \
228static ssize_t field##_store(struct device *dev, \
229 struct device_attribute *attr, \
230 const char *buf, \
231 size_t len) \
232{ \
233 int ret; \
234 struct gb_loopback *gb = dev_get_drvdata(dev); \
235 mutex_lock(&gb->mutex); \
236 ret = sscanf(buf, "%"#type, &gb->field); \
237 if (ret != 1) \
238 len = -EINVAL; \
239 else \
240 gb_loopback_check_attr(gb); \
241 mutex_unlock(&gb->mutex); \
242 return len; \
243} \
244static DEVICE_ATTR_RW(field)
245
246static void gb_loopback_reset_stats(struct gb_loopback *gb);
247static void gb_loopback_check_attr(struct gb_loopback *gb)
248{
249 if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
250 gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
251 if (gb->size > gb_dev.size_max)
252 gb->size = gb_dev.size_max;
253 gb->requests_timedout = 0;
254 gb->requests_completed = 0;
255 gb->iteration_count = 0;
256 gb->send_count = 0;
257 gb->error = 0;
258
259 if (kfifo_depth < gb->iteration_max) {
260 dev_warn(gb->dev,
261 "cannot log bytes %u kfifo_depth %u\n",
262 gb->iteration_max, kfifo_depth);
263 }
264 kfifo_reset_out(&gb->kfifo_lat);
265 kfifo_reset_out(&gb->kfifo_ts);
266
267 switch (gb->type) {
268 case GB_LOOPBACK_TYPE_PING:
269 case GB_LOOPBACK_TYPE_TRANSFER:
270 case GB_LOOPBACK_TYPE_SINK:
271 gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
272 if (!gb->jiffy_timeout)
273 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
274 else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
275 gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
276 gb_loopback_reset_stats(gb);
277 wake_up(&gb->wq);
278 break;
279 default:
280 gb->type = 0;
281 break;
282 }
283}
284
285/* Time to send and receive one message */
286gb_loopback_stats_attrs(latency);
287/* Number of requests sent per second on this cport */
288gb_loopback_stats_attrs(requests_per_second);
289/* Quantity of data sent and received on this cport */
290gb_loopback_stats_attrs(throughput);
291/* Latency across the UniPro link from APBridge's perspective */
292gb_loopback_stats_attrs(apbridge_unipro_latency);
293/* Firmware induced overhead in the GPBridge */
294gb_loopback_stats_attrs(gbphy_firmware_latency);
295
296/* Number of errors encountered during loop */
297gb_loopback_ro_attr(error);
298/* Number of requests successfully completed async */
299gb_loopback_ro_attr(requests_completed);
300/* Number of requests timed out async */
301gb_loopback_ro_attr(requests_timedout);
302/* Timeout minimum in useconds */
303gb_loopback_ro_attr(timeout_min);
304/* Timeout minimum in useconds */
305gb_loopback_ro_attr(timeout_max);
306
307/*
308 * Type of loopback message to send based on protocol type definitions
309 * 0 => Don't send message
310 * 2 => Send ping message continuously (message without payload)
311 * 3 => Send transfer message continuously (message with payload,
312 * payload returned in response)
313 * 4 => Send a sink message (message with payload, no payload in response)
314 */
315gb_dev_loopback_rw_attr(type, d);
316/* Size of transfer message payload: 0-4096 bytes */
317gb_dev_loopback_rw_attr(size, u);
318/* Time to wait between two messages: 0-1000 ms */
319gb_dev_loopback_rw_attr(us_wait, d);
320/* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
321gb_dev_loopback_rw_attr(iteration_max, u);
322/* The current index of the for (i = 0; i < iteration_max; i++) loop */
323gb_dev_loopback_ro_attr(iteration_count, false);
324/* A flag to indicate synchronous or asynchronous operations */
325gb_dev_loopback_rw_attr(async, u);
326/* Timeout of an individual asynchronous request */
327gb_dev_loopback_rw_attr(timeout, u);
328/* Maximum number of in-flight operations before back-off */
329gb_dev_loopback_rw_attr(outstanding_operations_max, u);
330
331static struct attribute *loopback_attrs[] = {
332 &dev_attr_latency_min.attr,
333 &dev_attr_latency_max.attr,
334 &dev_attr_latency_avg.attr,
335 &dev_attr_requests_per_second_min.attr,
336 &dev_attr_requests_per_second_max.attr,
337 &dev_attr_requests_per_second_avg.attr,
338 &dev_attr_throughput_min.attr,
339 &dev_attr_throughput_max.attr,
340 &dev_attr_throughput_avg.attr,
341 &dev_attr_apbridge_unipro_latency_min.attr,
342 &dev_attr_apbridge_unipro_latency_max.attr,
343 &dev_attr_apbridge_unipro_latency_avg.attr,
344 &dev_attr_gbphy_firmware_latency_min.attr,
345 &dev_attr_gbphy_firmware_latency_max.attr,
346 &dev_attr_gbphy_firmware_latency_avg.attr,
347 &dev_attr_type.attr,
348 &dev_attr_size.attr,
349 &dev_attr_us_wait.attr,
350 &dev_attr_iteration_count.attr,
351 &dev_attr_iteration_max.attr,
352 &dev_attr_async.attr,
353 &dev_attr_error.attr,
354 &dev_attr_requests_completed.attr,
355 &dev_attr_requests_timedout.attr,
356 &dev_attr_timeout.attr,
357 &dev_attr_outstanding_operations_max.attr,
358 &dev_attr_timeout_min.attr,
359 &dev_attr_timeout_max.attr,
360 NULL,
361};
362ATTRIBUTE_GROUPS(loopback);
363
364static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
365
366static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
367{
368 u32 lat;
369
370 do_div(elapsed_nsecs, NSEC_PER_USEC);
371 lat = elapsed_nsecs;
372 return lat;
373}
374
375static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
376{
377 if (t2 > t1)
378 return t2 - t1;
379 else
380 return NSEC_PER_DAY - t2 + t1;
381}
382
383static u64 gb_loopback_calc_latency(struct timeval *ts, struct timeval *te)
384{
385 u64 t1, t2;
386
387 t1 = timeval_to_ns(ts);
388 t2 = timeval_to_ns(te);
389
390 return __gb_loopback_calc_latency(t1, t2);
391}
392
393static void gb_loopback_push_latency_ts(struct gb_loopback *gb,
394 struct timeval *ts, struct timeval *te)
395{
396 kfifo_in(&gb->kfifo_ts, (unsigned char *)ts, sizeof(*ts));
397 kfifo_in(&gb->kfifo_ts, (unsigned char *)te, sizeof(*te));
398}
399
400static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
401 void *request, int request_size,
402 void *response, int response_size)
403{
404 struct gb_operation *operation;
405 struct timeval ts, te;
406 int ret;
407
408 do_gettimeofday(&ts);
409 operation = gb_operation_create(gb->connection, type, request_size,
410 response_size, GFP_KERNEL);
411 if (!operation)
412 return -ENOMEM;
413
414 if (request_size)
415 memcpy(operation->request->payload, request, request_size);
416
417 ret = gb_operation_request_send_sync(operation);
418 if (ret) {
419 dev_err(&gb->connection->bundle->dev,
420 "synchronous operation failed: %d\n", ret);
421 goto out_put_operation;
422 } else {
423 if (response_size == operation->response->payload_size) {
424 memcpy(response, operation->response->payload,
425 response_size);
426 } else {
427 dev_err(&gb->connection->bundle->dev,
428 "response size %zu expected %d\n",
429 operation->response->payload_size,
430 response_size);
431 ret = -EINVAL;
432 goto out_put_operation;
433 }
434 }
435
436 do_gettimeofday(&te);
437
438 /* Calculate the total time the message took */
439 gb_loopback_push_latency_ts(gb, &ts, &te);
440 gb->elapsed_nsecs = gb_loopback_calc_latency(&ts, &te);
441
442out_put_operation:
443 gb_operation_put(operation);
444
445 return ret;
446}
447
448static void __gb_loopback_async_operation_destroy(struct kref *kref)
449{
450 struct gb_loopback_async_operation *op_async;
451
452 op_async = container_of(kref, struct gb_loopback_async_operation, kref);
453
454 list_del(&op_async->entry);
455 if (op_async->operation)
456 gb_operation_put(op_async->operation);
457 atomic_dec(&op_async->gb->outstanding_operations);
458 wake_up(&op_async->gb->wq_completion);
459 kfree(op_async);
460}
461
462static void gb_loopback_async_operation_get(struct gb_loopback_async_operation
463 *op_async)
464{
465 kref_get(&op_async->kref);
466}
467
468static void gb_loopback_async_operation_put(struct gb_loopback_async_operation
469 *op_async)
470{
471 unsigned long flags;
472
473 spin_lock_irqsave(&gb_dev.lock, flags);
474 kref_put(&op_async->kref, __gb_loopback_async_operation_destroy);
475 spin_unlock_irqrestore(&gb_dev.lock, flags);
476}
477
478static struct gb_loopback_async_operation *
479 gb_loopback_operation_find(u16 id)
480{
481 struct gb_loopback_async_operation *op_async;
482 bool found = false;
483 unsigned long flags;
484
485 spin_lock_irqsave(&gb_dev.lock, flags);
486 list_for_each_entry(op_async, &gb_dev.list_op_async, entry) {
487 if (op_async->operation->id == id) {
488 gb_loopback_async_operation_get(op_async);
489 found = true;
490 break;
491 }
492 }
493 spin_unlock_irqrestore(&gb_dev.lock, flags);
494
495 return found ? op_async : NULL;
496}
497
498static void gb_loopback_async_wait_all(struct gb_loopback *gb)
499{
500 wait_event(gb->wq_completion,
501 !atomic_read(&gb->outstanding_operations));
502}
503
504static void gb_loopback_async_operation_callback(struct gb_operation *operation)
505{
506 struct gb_loopback_async_operation *op_async;
507 struct gb_loopback *gb;
508 struct timeval te;
509 bool err = false;
510
511 do_gettimeofday(&te);
512 op_async = gb_loopback_operation_find(operation->id);
513 if (!op_async)
514 return;
515
516 gb = op_async->gb;
517 mutex_lock(&gb->mutex);
518
519 if (!op_async->pending || gb_operation_result(operation)) {
520 err = true;
521 } else {
522 if (op_async->completion)
523 if (op_async->completion(op_async))
524 err = true;
525 }
526
527 if (!err) {
528 gb_loopback_push_latency_ts(gb, &op_async->ts, &te);
529 gb->elapsed_nsecs = gb_loopback_calc_latency(&op_async->ts,
530 &te);
531 }
532
533 if (op_async->pending) {
534 if (err)
535 gb->error++;
536 gb->iteration_count++;
537 op_async->pending = false;
538 del_timer_sync(&op_async->timer);
539 gb_loopback_async_operation_put(op_async);
540 gb_loopback_calculate_stats(gb, err);
541 }
542 mutex_unlock(&gb->mutex);
543
544 dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
545 operation->id);
546
547 gb_loopback_async_operation_put(op_async);
548}
549
550static void gb_loopback_async_operation_work(struct work_struct *work)
551{
552 struct gb_loopback *gb;
553 struct gb_operation *operation;
554 struct gb_loopback_async_operation *op_async;
555
556 op_async = container_of(work, struct gb_loopback_async_operation, work);
557 gb = op_async->gb;
558 operation = op_async->operation;
559
560 mutex_lock(&gb->mutex);
561 if (op_async->pending) {
562 gb->requests_timedout++;
563 gb->error++;
564 gb->iteration_count++;
565 op_async->pending = false;
566 gb_loopback_async_operation_put(op_async);
567 gb_loopback_calculate_stats(gb, true);
568 }
569 mutex_unlock(&gb->mutex);
570
571 dev_dbg(&gb->connection->bundle->dev, "timeout operation %d\n",
572 operation->id);
573
574 gb_operation_cancel(operation, -ETIMEDOUT);
575 gb_loopback_async_operation_put(op_async);
576}
577
578static void gb_loopback_async_operation_timeout(unsigned long data)
579{
580 struct gb_loopback_async_operation *op_async;
581 u16 id = data;
582
583 op_async = gb_loopback_operation_find(id);
584 if (!op_async) {
585 pr_err("operation %d not found - time out ?\n", id);
586 return;
587 }
588 schedule_work(&op_async->work);
589}
590
591static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
592 void *request, int request_size,
593 int response_size,
594 void *completion)
595{
596 struct gb_loopback_async_operation *op_async;
597 struct gb_operation *operation;
598 int ret;
599 unsigned long flags;
600
601 op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
602 if (!op_async)
603 return -ENOMEM;
604
605 INIT_WORK(&op_async->work, gb_loopback_async_operation_work);
606 init_timer(&op_async->timer);
607 kref_init(&op_async->kref);
608
609 operation = gb_operation_create(gb->connection, type, request_size,
610 response_size, GFP_KERNEL);
611 if (!operation) {
612 kfree(op_async);
613 return -ENOMEM;
614 }
615
616 if (request_size)
617 memcpy(operation->request->payload, request, request_size);
618
619 op_async->gb = gb;
620 op_async->operation = operation;
621 op_async->completion = completion;
622
623 spin_lock_irqsave(&gb_dev.lock, flags);
624 list_add_tail(&op_async->entry, &gb_dev.list_op_async);
625 spin_unlock_irqrestore(&gb_dev.lock, flags);
626
627 do_gettimeofday(&op_async->ts);
628 op_async->pending = true;
629 atomic_inc(&gb->outstanding_operations);
630 mutex_lock(&gb->mutex);
631 ret = gb_operation_request_send(operation,
632 gb_loopback_async_operation_callback,
633 GFP_KERNEL);
634 if (ret)
635 goto error;
636
637 op_async->timer.function = gb_loopback_async_operation_timeout;
638 op_async->timer.expires = jiffies + gb->jiffy_timeout;
639 op_async->timer.data = (unsigned long)operation->id;
640 add_timer(&op_async->timer);
641
642 goto done;
643error:
644 gb_loopback_async_operation_put(op_async);
645done:
646 mutex_unlock(&gb->mutex);
647 return ret;
648}
649
650static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
651{
652 struct gb_loopback_transfer_request *request;
653 int retval;
654
655 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
656 if (!request)
657 return -ENOMEM;
658
659 request->len = cpu_to_le32(len);
660 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
661 request, len + sizeof(*request),
662 NULL, 0);
663 kfree(request);
664 return retval;
665}
666
667static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
668{
669 struct gb_loopback_transfer_request *request;
670 struct gb_loopback_transfer_response *response;
671 int retval;
672
673 gb->apbridge_latency_ts = 0;
674 gb->gbphy_latency_ts = 0;
675
676 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
677 if (!request)
678 return -ENOMEM;
679 response = kmalloc(len + sizeof(*response), GFP_KERNEL);
680 if (!response) {
681 kfree(request);
682 return -ENOMEM;
683 }
684
685 memset(request->data, 0x5A, len);
686
687 request->len = cpu_to_le32(len);
688 retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
689 request, len + sizeof(*request),
690 response, len + sizeof(*response));
691 if (retval)
692 goto gb_error;
693
694 if (memcmp(request->data, response->data, len)) {
695 dev_err(&gb->connection->bundle->dev,
696 "Loopback Data doesn't match\n");
697 retval = -EREMOTEIO;
698 }
699 gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
700 gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
701
702gb_error:
703 kfree(request);
704 kfree(response);
705
706 return retval;
707}
708
709static int gb_loopback_sync_ping(struct gb_loopback *gb)
710{
711 return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
712 NULL, 0, NULL, 0);
713}
714
715static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
716{
717 struct gb_loopback_transfer_request *request;
718 int retval;
719
720 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
721 if (!request)
722 return -ENOMEM;
723
724 request->len = cpu_to_le32(len);
725 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
726 request, len + sizeof(*request),
727 0, NULL);
728 kfree(request);
729 return retval;
730}
731
732static int gb_loopback_async_transfer_complete(
733 struct gb_loopback_async_operation *op_async)
734{
735 struct gb_loopback *gb;
736 struct gb_operation *operation;
737 struct gb_loopback_transfer_request *request;
738 struct gb_loopback_transfer_response *response;
739 size_t len;
740 int retval = 0;
741
742 gb = op_async->gb;
743 operation = op_async->operation;
744 request = operation->request->payload;
745 response = operation->response->payload;
746 len = le32_to_cpu(request->len);
747
748 if (memcmp(request->data, response->data, len)) {
749 dev_err(&gb->connection->bundle->dev,
750 "Loopback Data doesn't match operation id %d\n",
751 operation->id);
752 retval = -EREMOTEIO;
753 } else {
754 gb->apbridge_latency_ts =
755 (u32)__le32_to_cpu(response->reserved0);
756 gb->gbphy_latency_ts =
757 (u32)__le32_to_cpu(response->reserved1);
758 }
759
760 return retval;
761}
762
763static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
764{
765 struct gb_loopback_transfer_request *request;
766 int retval, response_len;
767
768 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
769 if (!request)
770 return -ENOMEM;
771
772 memset(request->data, 0x5A, len);
773
774 request->len = cpu_to_le32(len);
775 response_len = sizeof(struct gb_loopback_transfer_response);
776 retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
777 request, len + sizeof(*request),
778 len + response_len,
779 gb_loopback_async_transfer_complete);
780 if (retval)
781 goto gb_error;
782
783gb_error:
784 kfree(request);
785 return retval;
786}
787
788static int gb_loopback_async_ping(struct gb_loopback *gb)
789{
790 return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
791 NULL, 0, 0, NULL);
792}
793
794static int gb_loopback_request_handler(struct gb_operation *operation)
795{
796 struct gb_connection *connection = operation->connection;
797 struct gb_loopback_transfer_request *request;
798 struct gb_loopback_transfer_response *response;
799 struct device *dev = &connection->bundle->dev;
800 size_t len;
801
802 /* By convention, the AP initiates the version operation */
803 switch (operation->type) {
804 case GB_LOOPBACK_TYPE_PING:
805 case GB_LOOPBACK_TYPE_SINK:
806 return 0;
807 case GB_LOOPBACK_TYPE_TRANSFER:
808 if (operation->request->payload_size < sizeof(*request)) {
809 dev_err(dev, "transfer request too small (%zu < %zu)\n",
810 operation->request->payload_size,
811 sizeof(*request));
812 return -EINVAL; /* -EMSGSIZE */
813 }
814 request = operation->request->payload;
815 len = le32_to_cpu(request->len);
816 if (len > gb_dev.size_max) {
817 dev_err(dev, "transfer request too large (%zu > %zu)\n",
818 len, gb_dev.size_max);
819 return -EINVAL;
820 }
821
822 if (!gb_operation_response_alloc(operation,
823 len + sizeof(*response), GFP_KERNEL)) {
824 dev_err(dev, "error allocating response\n");
825 return -ENOMEM;
826 }
827 response = operation->response->payload;
828 response->len = cpu_to_le32(len);
829 if (len)
830 memcpy(response->data, request->data, len);
831
832 return 0;
833 default:
834 dev_err(dev, "unsupported request: %u\n", operation->type);
835 return -EINVAL;
836 }
837}
838
839static void gb_loopback_reset_stats(struct gb_loopback *gb)
840{
841 struct gb_loopback_stats reset = {
842 .min = U32_MAX,
843 };
844
845 /* Reset per-connection stats */
846 memcpy(&gb->latency, &reset,
847 sizeof(struct gb_loopback_stats));
848 memcpy(&gb->throughput, &reset,
849 sizeof(struct gb_loopback_stats));
850 memcpy(&gb->requests_per_second, &reset,
851 sizeof(struct gb_loopback_stats));
852 memcpy(&gb->apbridge_unipro_latency, &reset,
853 sizeof(struct gb_loopback_stats));
854 memcpy(&gb->gbphy_firmware_latency, &reset,
855 sizeof(struct gb_loopback_stats));
856
857 /* Should be initialized at least once per transaction set */
858 gb->apbridge_latency_ts = 0;
859 gb->gbphy_latency_ts = 0;
860 memset(&gb->ts, 0, sizeof(struct timeval));
861}
862
863static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
864{
865 if (stats->min > val)
866 stats->min = val;
867 if (stats->max < val)
868 stats->max = val;
869 stats->sum += val;
870 stats->count++;
871}
872
873static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
874 u64 val, u32 count)
875{
876 stats->sum += val;
877 stats->count += count;
878
879 do_div(val, count);
880 if (stats->min > val)
881 stats->min = val;
882 if (stats->max < val)
883 stats->max = val;
884}
885
886static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
887{
888 u64 req = gb->requests_completed * USEC_PER_SEC;
889
890 gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
891}
892
893static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
894{
895 u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
896
897 switch (gb->type) {
898 case GB_LOOPBACK_TYPE_PING:
899 break;
900 case GB_LOOPBACK_TYPE_SINK:
901 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
902 gb->size;
903 break;
904 case GB_LOOPBACK_TYPE_TRANSFER:
905 aggregate_size += sizeof(struct gb_loopback_transfer_request) +
906 sizeof(struct gb_loopback_transfer_response) +
907 gb->size * 2;
908 break;
909 default:
910 return;
911 }
912
913 aggregate_size *= gb->requests_completed;
914 aggregate_size *= USEC_PER_SEC;
915 gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
916 latency);
917}
918
919static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
920{
921 u32 lat;
922
923 /* Express latency in terms of microseconds */
924 lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
925
926 /* Log latency stastic */
927 gb_loopback_update_stats(&gb->latency, lat);
928
929 /* Raw latency log on a per thread basis */
930 kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
931
932 /* Log the firmware supplied latency values */
933 gb_loopback_update_stats(&gb->apbridge_unipro_latency,
934 gb->apbridge_latency_ts);
935 gb_loopback_update_stats(&gb->gbphy_firmware_latency,
936 gb->gbphy_latency_ts);
937}
938
939static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
940{
941 u64 nlat;
942 u32 lat;
943 struct timeval te;
944
945 if (!error) {
946 gb->requests_completed++;
947 gb_loopback_calculate_latency_stats(gb);
948 }
949
950 do_gettimeofday(&te);
951 nlat = gb_loopback_calc_latency(&gb->ts, &te);
952 if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
953 lat = gb_loopback_nsec_to_usec_latency(nlat);
954
955 gb_loopback_throughput_update(gb, lat);
956 gb_loopback_requests_update(gb, lat);
957
958 if (gb->iteration_count != gb->iteration_max) {
959 gb->ts = te;
960 gb->requests_completed = 0;
961 }
962 }
963}
964
965static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
966{
967 if (!(gb->async && gb->outstanding_operations_max))
968 return;
969 wait_event_interruptible(gb->wq_completion,
970 (atomic_read(&gb->outstanding_operations) <
971 gb->outstanding_operations_max) ||
972 kthread_should_stop());
973}
974
975static int gb_loopback_fn(void *data)
976{
977 int error = 0;
978 int us_wait = 0;
979 int type;
980 int ret;
981 u32 size;
982
983 struct gb_loopback *gb = data;
984 struct gb_bundle *bundle = gb->connection->bundle;
985
986 ret = gb_pm_runtime_get_sync(bundle);
987 if (ret)
988 return ret;
989
990 while (1) {
991 if (!gb->type) {
992 gb_pm_runtime_put_autosuspend(bundle);
993 wait_event_interruptible(gb->wq, gb->type ||
994 kthread_should_stop());
995 ret = gb_pm_runtime_get_sync(bundle);
996 if (ret)
997 return ret;
998 }
999
1000 if (kthread_should_stop())
1001 break;
1002
1003 /* Limit the maximum number of in-flight async operations */
1004 gb_loopback_async_wait_to_send(gb);
1005 if (kthread_should_stop())
1006 break;
1007
1008 mutex_lock(&gb->mutex);
1009
1010 /* Optionally terminate */
1011 if (gb->send_count == gb->iteration_max) {
1012 if (gb->iteration_count == gb->iteration_max) {
1013 gb->type = 0;
1014 gb->send_count = 0;
1015 sysfs_notify(&gb->dev->kobj, NULL,
1016 "iteration_count");
1017 }
1018 mutex_unlock(&gb->mutex);
1019 continue;
1020 }
1021 size = gb->size;
1022 us_wait = gb->us_wait;
1023 type = gb->type;
1024 if (gb->ts.tv_usec == 0 && gb->ts.tv_sec == 0)
1025 do_gettimeofday(&gb->ts);
1026 mutex_unlock(&gb->mutex);
1027
1028 /* Else operations to perform */
1029 if (gb->async) {
1030 if (type == GB_LOOPBACK_TYPE_PING) {
1031 error = gb_loopback_async_ping(gb);
1032 } else if (type == GB_LOOPBACK_TYPE_TRANSFER) {
1033 error = gb_loopback_async_transfer(gb, size);
1034 } else if (type == GB_LOOPBACK_TYPE_SINK) {
1035 error = gb_loopback_async_sink(gb, size);
1036 }
1037
1038 if (error)
1039 gb->error++;
1040 } else {
1041 /* We are effectively single threaded here */
1042 if (type == GB_LOOPBACK_TYPE_PING)
1043 error = gb_loopback_sync_ping(gb);
1044 else if (type == GB_LOOPBACK_TYPE_TRANSFER)
1045 error = gb_loopback_sync_transfer(gb, size);
1046 else if (type == GB_LOOPBACK_TYPE_SINK)
1047 error = gb_loopback_sync_sink(gb, size);
1048
1049 if (error)
1050 gb->error++;
1051 gb->iteration_count++;
1052 gb_loopback_calculate_stats(gb, !!error);
1053 }
1054 gb->send_count++;
1055 if (us_wait)
1056 udelay(us_wait);
1057 }
1058
1059 gb_pm_runtime_put_autosuspend(bundle);
1060
1061 return 0;
1062}
1063
1064static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
1065 struct kfifo *kfifo,
1066 struct mutex *mutex)
1067{
1068 u32 latency;
1069 int retval;
1070
1071 if (kfifo_len(kfifo) == 0) {
1072 retval = -EAGAIN;
1073 goto done;
1074 }
1075
1076 mutex_lock(mutex);
1077 retval = kfifo_out(kfifo, &latency, sizeof(latency));
1078 if (retval > 0) {
1079 seq_printf(s, "%u", latency);
1080 retval = 0;
1081 }
1082 mutex_unlock(mutex);
1083done:
1084 return retval;
1085}
1086
1087static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
1088{
1089 struct gb_loopback *gb = s->private;
1090
1091 return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
1092 &gb->mutex);
1093}
1094
1095static int gb_loopback_latency_open(struct inode *inode, struct file *file)
1096{
1097 return single_open(file, gb_loopback_dbgfs_latency_show,
1098 inode->i_private);
1099}
1100
1101static const struct file_operations gb_loopback_debugfs_latency_ops = {
1102 .open = gb_loopback_latency_open,
1103 .read = seq_read,
1104 .llseek = seq_lseek,
1105 .release = single_release,
1106};
1107
1108static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
1109 struct list_head *lhb)
1110{
1111 struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
1112 struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
1113 struct gb_connection *ca = a->connection;
1114 struct gb_connection *cb = b->connection;
1115
1116 if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
1117 return -1;
1118 if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
1119 return 1;
1120 if (ca->bundle->id < cb->bundle->id)
1121 return -1;
1122 if (cb->bundle->id < ca->bundle->id)
1123 return 1;
1124 if (ca->intf_cport_id < cb->intf_cport_id)
1125 return -1;
1126 else if (cb->intf_cport_id < ca->intf_cport_id)
1127 return 1;
1128
1129 return 0;
1130}
1131
1132static void gb_loopback_insert_id(struct gb_loopback *gb)
1133{
1134 struct gb_loopback *gb_list;
1135 u32 new_lbid = 0;
1136
1137 /* perform an insertion sort */
1138 list_add_tail(&gb->entry, &gb_dev.list);
1139 list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
1140 list_for_each_entry(gb_list, &gb_dev.list, entry) {
1141 gb_list->lbid = 1 << new_lbid;
1142 new_lbid++;
1143 }
1144}
1145
1146#define DEBUGFS_NAMELEN 32
1147
1148static int gb_loopback_probe(struct gb_bundle *bundle,
1149 const struct greybus_bundle_id *id)
1150{
1151 struct greybus_descriptor_cport *cport_desc;
1152 struct gb_connection *connection;
1153 struct gb_loopback *gb;
1154 struct device *dev;
1155 int retval;
1156 char name[DEBUGFS_NAMELEN];
1157 unsigned long flags;
1158
1159 if (bundle->num_cports != 1)
1160 return -ENODEV;
1161
1162 cport_desc = &bundle->cport_desc[0];
1163 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
1164 return -ENODEV;
1165
1166 gb = kzalloc(sizeof(*gb), GFP_KERNEL);
1167 if (!gb)
1168 return -ENOMEM;
1169
1170 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
1171 gb_loopback_request_handler);
1172 if (IS_ERR(connection)) {
1173 retval = PTR_ERR(connection);
1174 goto out_kzalloc;
1175 }
1176
1177 gb->connection = connection;
1178 greybus_set_drvdata(bundle, gb);
1179
1180 init_waitqueue_head(&gb->wq);
1181 init_waitqueue_head(&gb->wq_completion);
1182 atomic_set(&gb->outstanding_operations, 0);
1183 gb_loopback_reset_stats(gb);
1184
1185 /* Reported values to user-space for min/max timeouts */
1186 gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
1187 gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
1188
1189 if (!gb_dev.count) {
1190 /* Calculate maximum payload */
1191 gb_dev.size_max = gb_operation_get_payload_size_max(connection);
1192 if (gb_dev.size_max <=
1193 sizeof(struct gb_loopback_transfer_request)) {
1194 retval = -EINVAL;
1195 goto out_connection_destroy;
1196 }
1197 gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
1198 }
1199
1200 /* Create per-connection sysfs and debugfs data-points */
1201 snprintf(name, sizeof(name), "raw_latency_%s",
1202 dev_name(&connection->bundle->dev));
1203 gb->file = debugfs_create_file(name, S_IFREG | S_IRUGO, gb_dev.root, gb,
1204 &gb_loopback_debugfs_latency_ops);
1205
1206 gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
1207 if (gb->id < 0) {
1208 retval = gb->id;
1209 goto out_debugfs_remove;
1210 }
1211
1212 retval = gb_connection_enable(connection);
1213 if (retval)
1214 goto out_ida_remove;
1215
1216 dev = device_create_with_groups(&loopback_class,
1217 &connection->bundle->dev,
1218 MKDEV(0, 0), gb, loopback_groups,
1219 "gb_loopback%d", gb->id);
1220 if (IS_ERR(dev)) {
1221 retval = PTR_ERR(dev);
1222 goto out_connection_disable;
1223 }
1224 gb->dev = dev;
1225
1226 /* Allocate kfifo */
1227 if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
1228 GFP_KERNEL)) {
1229 retval = -ENOMEM;
1230 goto out_conn;
1231 }
1232 if (kfifo_alloc(&gb->kfifo_ts, kfifo_depth * sizeof(struct timeval) * 2,
1233 GFP_KERNEL)) {
1234 retval = -ENOMEM;
1235 goto out_kfifo0;
1236 }
1237
1238 /* Fork worker thread */
1239 mutex_init(&gb->mutex);
1240 gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
1241 if (IS_ERR(gb->task)) {
1242 retval = PTR_ERR(gb->task);
1243 goto out_kfifo1;
1244 }
1245
1246 spin_lock_irqsave(&gb_dev.lock, flags);
1247 gb_loopback_insert_id(gb);
1248 gb_dev.count++;
1249 spin_unlock_irqrestore(&gb_dev.lock, flags);
1250
1251 gb_connection_latency_tag_enable(connection);
1252
1253 gb_pm_runtime_put_autosuspend(bundle);
1254
1255 return 0;
1256
1257out_kfifo1:
1258 kfifo_free(&gb->kfifo_ts);
1259out_kfifo0:
1260 kfifo_free(&gb->kfifo_lat);
1261out_conn:
1262 device_unregister(dev);
1263out_connection_disable:
1264 gb_connection_disable(connection);
1265out_ida_remove:
1266 ida_simple_remove(&loopback_ida, gb->id);
1267out_debugfs_remove:
1268 debugfs_remove(gb->file);
1269out_connection_destroy:
1270 gb_connection_destroy(connection);
1271out_kzalloc:
1272 kfree(gb);
1273
1274 return retval;
1275}
1276
1277static void gb_loopback_disconnect(struct gb_bundle *bundle)
1278{
1279 struct gb_loopback *gb = greybus_get_drvdata(bundle);
1280 unsigned long flags;
1281 int ret;
1282
1283 ret = gb_pm_runtime_get_sync(bundle);
1284 if (ret)
1285 gb_pm_runtime_get_noresume(bundle);
1286
1287 gb_connection_disable(gb->connection);
1288
1289 if (!IS_ERR_OR_NULL(gb->task))
1290 kthread_stop(gb->task);
1291
1292 kfifo_free(&gb->kfifo_lat);
1293 kfifo_free(&gb->kfifo_ts);
1294 gb_connection_latency_tag_disable(gb->connection);
1295 debugfs_remove(gb->file);
1296
1297 /*
1298 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1299 * is disabled at the beginning and so we can't have any more
1300 * incoming/outgoing requests.
1301 */
1302 gb_loopback_async_wait_all(gb);
1303
1304 spin_lock_irqsave(&gb_dev.lock, flags);
1305 gb_dev.count--;
1306 list_del(&gb->entry);
1307 spin_unlock_irqrestore(&gb_dev.lock, flags);
1308
1309 device_unregister(gb->dev);
1310 ida_simple_remove(&loopback_ida, gb->id);
1311
1312 gb_connection_destroy(gb->connection);
1313 kfree(gb);
1314}
1315
1316static const struct greybus_bundle_id gb_loopback_id_table[] = {
1317 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
1318 { }
1319};
1320MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
1321
1322static struct greybus_driver gb_loopback_driver = {
1323 .name = "loopback",
1324 .probe = gb_loopback_probe,
1325 .disconnect = gb_loopback_disconnect,
1326 .id_table = gb_loopback_id_table,
1327};
1328
1329static int loopback_init(void)
1330{
1331 int retval;
1332
1333 INIT_LIST_HEAD(&gb_dev.list);
1334 INIT_LIST_HEAD(&gb_dev.list_op_async);
1335 spin_lock_init(&gb_dev.lock);
1336 gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1337
1338 retval = class_register(&loopback_class);
1339 if (retval)
1340 goto err;
1341
1342 retval = greybus_register(&gb_loopback_driver);
1343 if (retval)
1344 goto err_unregister;
1345
1346 return 0;
1347
1348err_unregister:
1349 class_unregister(&loopback_class);
1350err:
1351 debugfs_remove_recursive(gb_dev.root);
1352 return retval;
1353}
1354module_init(loopback_init);
1355
1356static void __exit loopback_exit(void)
1357{
1358 debugfs_remove_recursive(gb_dev.root);
1359 greybus_deregister(&gb_loopback_driver);
1360 class_unregister(&loopback_class);
1361 ida_destroy(&loopback_ida);
1362}
1363module_exit(loopback_exit);
1364
1365MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/manifest.c b/drivers/staging/greybus/manifest.c
new file mode 100644
index 000000000000..3d1592fc94ea
--- /dev/null
+++ b/drivers/staging/greybus/manifest.c
@@ -0,0 +1,535 @@
1/*
2 * Greybus manifest parsing
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include "greybus.h"
11
12static const char *get_descriptor_type_string(u8 type)
13{
14 switch(type) {
15 case GREYBUS_TYPE_INVALID:
16 return "invalid";
17 case GREYBUS_TYPE_STRING:
18 return "string";
19 case GREYBUS_TYPE_INTERFACE:
20 return "interface";
21 case GREYBUS_TYPE_CPORT:
22 return "cport";
23 case GREYBUS_TYPE_BUNDLE:
24 return "bundle";
25 default:
26 WARN_ON(1);
27 return "unknown";
28 }
29}
30
31/*
32 * We scan the manifest once to identify where all the descriptors
33 * are. The result is a list of these manifest_desc structures. We
34 * then pick through them for what we're looking for (starting with
35 * the interface descriptor). As each is processed we remove it from
36 * the list. When we're done the list should (probably) be empty.
37 */
38struct manifest_desc {
39 struct list_head links;
40
41 size_t size;
42 void *data;
43 enum greybus_descriptor_type type;
44};
45
46static void release_manifest_descriptor(struct manifest_desc *descriptor)
47{
48 list_del(&descriptor->links);
49 kfree(descriptor);
50}
51
52static void release_manifest_descriptors(struct gb_interface *intf)
53{
54 struct manifest_desc *descriptor;
55 struct manifest_desc *next;
56
57 list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
58 release_manifest_descriptor(descriptor);
59}
60
61static void release_cport_descriptors(struct list_head *head, u8 bundle_id)
62{
63 struct manifest_desc *desc, *tmp;
64 struct greybus_descriptor_cport *desc_cport;
65
66 list_for_each_entry_safe(desc, tmp, head, links) {
67 desc_cport = desc->data;
68
69 if (desc->type != GREYBUS_TYPE_CPORT)
70 continue;
71
72 if (desc_cport->bundle == bundle_id)
73 release_manifest_descriptor(desc);
74 }
75}
76
77static struct manifest_desc *get_next_bundle_desc(struct gb_interface *intf)
78{
79 struct manifest_desc *descriptor;
80 struct manifest_desc *next;
81
82 list_for_each_entry_safe(descriptor, next, &intf->manifest_descs, links)
83 if (descriptor->type == GREYBUS_TYPE_BUNDLE)
84 return descriptor;
85
86 return NULL;
87}
88
89/*
90 * Validate the given descriptor. Its reported size must fit within
91 * the number of bytes remaining, and it must have a recognized
92 * type. Check that the reported size is at least as big as what
93 * we expect to see. (It could be bigger, perhaps for a new version
94 * of the format.)
95 *
96 * Returns the (non-zero) number of bytes consumed by the descriptor,
97 * or a negative errno.
98 */
99static int identify_descriptor(struct gb_interface *intf,
100 struct greybus_descriptor *desc, size_t size)
101{
102 struct greybus_descriptor_header *desc_header = &desc->header;
103 struct manifest_desc *descriptor;
104 size_t desc_size;
105 size_t expected_size;
106
107 if (size < sizeof(*desc_header)) {
108 dev_err(&intf->dev, "manifest too small (%zu < %zu)\n",
109 size, sizeof(*desc_header));
110 return -EINVAL; /* Must at least have header */
111 }
112
113 desc_size = le16_to_cpu(desc_header->size);
114 if (desc_size > size) {
115 dev_err(&intf->dev, "descriptor too big (%zu > %zu)\n",
116 desc_size, size);
117 return -EINVAL;
118 }
119
120 /* Descriptor needs to at least have a header */
121 expected_size = sizeof(*desc_header);
122
123 switch (desc_header->type) {
124 case GREYBUS_TYPE_STRING:
125 expected_size += sizeof(struct greybus_descriptor_string);
126 expected_size += desc->string.length;
127
128 /* String descriptors are padded to 4 byte boundaries */
129 expected_size = ALIGN(expected_size, 4);
130 break;
131 case GREYBUS_TYPE_INTERFACE:
132 expected_size += sizeof(struct greybus_descriptor_interface);
133 break;
134 case GREYBUS_TYPE_BUNDLE:
135 expected_size += sizeof(struct greybus_descriptor_bundle);
136 break;
137 case GREYBUS_TYPE_CPORT:
138 expected_size += sizeof(struct greybus_descriptor_cport);
139 break;
140 case GREYBUS_TYPE_INVALID:
141 default:
142 dev_err(&intf->dev, "invalid descriptor type (%u)\n",
143 desc_header->type);
144 return -EINVAL;
145 }
146
147 if (desc_size < expected_size) {
148 dev_err(&intf->dev, "%s descriptor too small (%zu < %zu)\n",
149 get_descriptor_type_string(desc_header->type),
150 desc_size, expected_size);
151 return -EINVAL;
152 }
153
154 /* Descriptor bigger than what we expect */
155 if (desc_size > expected_size) {
156 dev_warn(&intf->dev, "%s descriptor size mismatch (want %zu got %zu)\n",
157 get_descriptor_type_string(desc_header->type),
158 expected_size, desc_size);
159 }
160
161 descriptor = kzalloc(sizeof(*descriptor), GFP_KERNEL);
162 if (!descriptor)
163 return -ENOMEM;
164
165 descriptor->size = desc_size;
166 descriptor->data = (char *)desc + sizeof(*desc_header);
167 descriptor->type = desc_header->type;
168 list_add_tail(&descriptor->links, &intf->manifest_descs);
169
170 /* desc_size is positive and is known to fit in a signed int */
171
172 return desc_size;
173}
174
175/*
176 * Find the string descriptor having the given id, validate it, and
177 * allocate a duplicate copy of it. The duplicate has an extra byte
178 * which guarantees the returned string is NUL-terminated.
179 *
180 * String index 0 is valid (it represents "no string"), and for
181 * that a null pointer is returned.
182 *
183 * Otherwise returns a pointer to a newly-allocated copy of the
184 * descriptor string, or an error-coded pointer on failure.
185 */
186static char *gb_string_get(struct gb_interface *intf, u8 string_id)
187{
188 struct greybus_descriptor_string *desc_string;
189 struct manifest_desc *descriptor;
190 bool found = false;
191 char *string;
192
193 /* A zero string id means no string (but no error) */
194 if (!string_id)
195 return NULL;
196
197 list_for_each_entry(descriptor, &intf->manifest_descs, links) {
198 if (descriptor->type != GREYBUS_TYPE_STRING)
199 continue;
200
201 desc_string = descriptor->data;
202 if (desc_string->id == string_id) {
203 found = true;
204 break;
205 }
206 }
207 if (!found)
208 return ERR_PTR(-ENOENT);
209
210 /* Allocate an extra byte so we can guarantee it's NUL-terminated */
211 string = kmemdup(&desc_string->string, desc_string->length + 1,
212 GFP_KERNEL);
213 if (!string)
214 return ERR_PTR(-ENOMEM);
215 string[desc_string->length] = '\0';
216
217 /* Ok we've used this string, so we're done with it */
218 release_manifest_descriptor(descriptor);
219
220 return string;
221}
222
223/*
224 * Find cport descriptors in the manifest associated with the given
225 * bundle, and set up data structures for the functions that use
226 * them. Returns the number of cports set up for the bundle, or 0
227 * if there is an error.
228 */
229static u32 gb_manifest_parse_cports(struct gb_bundle *bundle)
230{
231 struct gb_interface *intf = bundle->intf;
232 struct greybus_descriptor_cport *desc_cport;
233 struct manifest_desc *desc, *next, *tmp;
234 LIST_HEAD(list);
235 u8 bundle_id = bundle->id;
236 u16 cport_id;
237 u32 count = 0;
238 int i;
239
240 /* Set up all cport descriptors associated with this bundle */
241 list_for_each_entry_safe(desc, next, &intf->manifest_descs, links) {
242 if (desc->type != GREYBUS_TYPE_CPORT)
243 continue;
244
245 desc_cport = desc->data;
246 if (desc_cport->bundle != bundle_id)
247 continue;
248
249 cport_id = le16_to_cpu(desc_cport->id);
250 if (cport_id > CPORT_ID_MAX)
251 goto exit;
252
253 /* Nothing else should have its cport_id as control cport id */
254 if (cport_id == GB_CONTROL_CPORT_ID) {
255 dev_err(&bundle->dev, "invalid cport id found (%02u)\n",
256 cport_id);
257 goto exit;
258 }
259
260 /*
261 * Found one, move it to our temporary list after checking for
262 * duplicates.
263 */
264 list_for_each_entry(tmp, &list, links) {
265 desc_cport = tmp->data;
266 if (cport_id == le16_to_cpu(desc_cport->id)) {
267 dev_err(&bundle->dev,
268 "duplicate CPort %u found\n",
269 cport_id);
270 goto exit;
271 }
272 }
273 list_move_tail(&desc->links, &list);
274 count++;
275 }
276
277 if (!count)
278 return 0;
279
280 bundle->cport_desc = kcalloc(count, sizeof(*bundle->cport_desc),
281 GFP_KERNEL);
282 if (!bundle->cport_desc)
283 goto exit;
284
285 bundle->num_cports = count;
286
287 i = 0;
288 list_for_each_entry_safe(desc, next, &list, links) {
289 desc_cport = desc->data;
290 memcpy(&bundle->cport_desc[i++], desc_cport,
291 sizeof(*desc_cport));
292
293 /* Release the cport descriptor */
294 release_manifest_descriptor(desc);
295 }
296
297 return count;
298exit:
299 release_cport_descriptors(&list, bundle_id);
300 /*
301 * Free all cports for this bundle to avoid 'excess descriptors'
302 * warnings.
303 */
304 release_cport_descriptors(&intf->manifest_descs, bundle_id);
305
306 return 0; /* Error; count should also be 0 */
307}
308
309/*
310 * Find bundle descriptors in the manifest and set up their data
311 * structures. Returns the number of bundles set up for the
312 * given interface.
313 */
314static u32 gb_manifest_parse_bundles(struct gb_interface *intf)
315{
316 struct manifest_desc *desc;
317 struct gb_bundle *bundle;
318 struct gb_bundle *bundle_next;
319 u32 count = 0;
320 u8 bundle_id;
321 u8 class;
322
323 while ((desc = get_next_bundle_desc(intf))) {
324 struct greybus_descriptor_bundle *desc_bundle;
325
326 /* Found one. Set up its bundle structure*/
327 desc_bundle = desc->data;
328 bundle_id = desc_bundle->id;
329 class = desc_bundle->class;
330
331 /* Done with this bundle descriptor */
332 release_manifest_descriptor(desc);
333
334 /* Ignore any legacy control bundles */
335 if (bundle_id == GB_CONTROL_BUNDLE_ID) {
336 dev_dbg(&intf->dev, "%s - ignoring control bundle\n",
337 __func__);
338 release_cport_descriptors(&intf->manifest_descs,
339 bundle_id);
340 continue;
341 }
342
343 /* Nothing else should have its class set to control class */
344 if (class == GREYBUS_CLASS_CONTROL) {
345 dev_err(&intf->dev,
346 "bundle %u cannot use control class\n",
347 bundle_id);
348 goto cleanup;
349 }
350
351 bundle = gb_bundle_create(intf, bundle_id, class);
352 if (!bundle)
353 goto cleanup;
354
355 /*
356 * Now go set up this bundle's functions and cports.
357 *
358 * A 'bundle' represents a device in greybus. It may require
359 * multiple cports for its functioning. If we fail to setup any
360 * cport of a bundle, we better reject the complete bundle as
361 * the device may not be able to function properly then.
362 *
363 * But, failing to setup a cport of bundle X doesn't mean that
364 * the device corresponding to bundle Y will not work properly.
365 * Bundles should be treated as separate independent devices.
366 *
367 * While parsing manifest for an interface, treat bundles as
368 * separate entities and don't reject entire interface and its
369 * bundles on failing to initialize a cport. But make sure the
370 * bundle which needs the cport, gets destroyed properly.
371 */
372 if (!gb_manifest_parse_cports(bundle)) {
373 gb_bundle_destroy(bundle);
374 continue;
375 }
376
377 count++;
378 }
379
380 return count;
381cleanup:
382 /* An error occurred; undo any changes we've made */
383 list_for_each_entry_safe(bundle, bundle_next, &intf->bundles, links) {
384 gb_bundle_destroy(bundle);
385 count--;
386 }
387 return 0; /* Error; count should also be 0 */
388}
389
390static bool gb_manifest_parse_interface(struct gb_interface *intf,
391 struct manifest_desc *interface_desc)
392{
393 struct greybus_descriptor_interface *desc_intf = interface_desc->data;
394 struct gb_control *control = intf->control;
395 char *str;
396
397 /* Handle the strings first--they can fail */
398 str = gb_string_get(intf, desc_intf->vendor_stringid);
399 if (IS_ERR(str))
400 return false;
401 control->vendor_string = str;
402
403 str = gb_string_get(intf, desc_intf->product_stringid);
404 if (IS_ERR(str))
405 goto out_free_vendor_string;
406 control->product_string = str;
407
408 /* Assign feature flags communicated via manifest */
409 intf->features = desc_intf->features;
410
411 /* Release the interface descriptor, now that we're done with it */
412 release_manifest_descriptor(interface_desc);
413
414 /* An interface must have at least one bundle descriptor */
415 if (!gb_manifest_parse_bundles(intf)) {
416 dev_err(&intf->dev, "manifest bundle descriptors not valid\n");
417 goto out_err;
418 }
419
420 return true;
421out_err:
422 kfree(control->product_string);
423 control->product_string = NULL;
424out_free_vendor_string:
425 kfree(control->vendor_string);
426 control->vendor_string = NULL;
427
428 return false;
429}
430
431/*
432 * Parse a buffer containing an interface manifest.
433 *
434 * If we find anything wrong with the content/format of the buffer
435 * we reject it.
436 *
437 * The first requirement is that the manifest's version is
438 * one we can parse.
439 *
440 * We make an initial pass through the buffer and identify all of
441 * the descriptors it contains, keeping track for each its type
442 * and the location size of its data in the buffer.
443 *
444 * Next we scan the descriptors, looking for an interface descriptor;
445 * there must be exactly one of those. When found, we record the
446 * information it contains, and then remove that descriptor (and any
447 * string descriptors it refers to) from further consideration.
448 *
449 * After that we look for the interface's bundles--there must be at
450 * least one of those.
451 *
452 * Returns true if parsing was successful, false otherwise.
453 */
454bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size)
455{
456 struct greybus_manifest *manifest;
457 struct greybus_manifest_header *header;
458 struct greybus_descriptor *desc;
459 struct manifest_desc *descriptor;
460 struct manifest_desc *interface_desc = NULL;
461 u16 manifest_size;
462 u32 found = 0;
463 bool result;
464
465 /* Manifest descriptor list should be empty here */
466 if (WARN_ON(!list_empty(&intf->manifest_descs)))
467 return false;
468
469 /* we have to have at _least_ the manifest header */
470 if (size < sizeof(*header)) {
471 dev_err(&intf->dev, "short manifest (%zu < %zu)\n",
472 size, sizeof(*header));
473 return false;
474 }
475
476 /* Make sure the size is right */
477 manifest = data;
478 header = &manifest->header;
479 manifest_size = le16_to_cpu(header->size);
480 if (manifest_size != size) {
481 dev_err(&intf->dev, "manifest size mismatch (%zu != %u)\n",
482 size, manifest_size);
483 return false;
484 }
485
486 /* Validate major/minor number */
487 if (header->version_major > GREYBUS_VERSION_MAJOR) {
488 dev_err(&intf->dev, "manifest version too new (%u.%u > %u.%u)\n",
489 header->version_major, header->version_minor,
490 GREYBUS_VERSION_MAJOR, GREYBUS_VERSION_MINOR);
491 return false;
492 }
493
494 /* OK, find all the descriptors */
495 desc = manifest->descriptors;
496 size -= sizeof(*header);
497 while (size) {
498 int desc_size;
499
500 desc_size = identify_descriptor(intf, desc, size);
501 if (desc_size < 0) {
502 result = false;
503 goto out;
504 }
505 desc = (struct greybus_descriptor *)((char *)desc + desc_size);
506 size -= desc_size;
507 }
508
509 /* There must be a single interface descriptor */
510 list_for_each_entry(descriptor, &intf->manifest_descs, links) {
511 if (descriptor->type == GREYBUS_TYPE_INTERFACE)
512 if (!found++)
513 interface_desc = descriptor;
514 }
515 if (found != 1) {
516 dev_err(&intf->dev, "manifest must have 1 interface descriptor (%u found)\n",
517 found);
518 result = false;
519 goto out;
520 }
521
522 /* Parse the manifest, starting with the interface descriptor */
523 result = gb_manifest_parse_interface(intf, interface_desc);
524
525 /*
526 * We really should have no remaining descriptors, but we
527 * don't know what newer format manifests might leave.
528 */
529 if (result && !list_empty(&intf->manifest_descs))
530 dev_info(&intf->dev, "excess descriptors in interface manifest\n");
531out:
532 release_manifest_descriptors(intf);
533
534 return result;
535}
diff --git a/drivers/staging/greybus/manifest.h b/drivers/staging/greybus/manifest.h
new file mode 100644
index 000000000000..d96428407cd7
--- /dev/null
+++ b/drivers/staging/greybus/manifest.h
@@ -0,0 +1,16 @@
1/*
2 * Greybus manifest parsing
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __MANIFEST_H
11#define __MANIFEST_H
12
13struct gb_interface;
14bool gb_manifest_parse(struct gb_interface *intf, void *data, size_t size);
15
16#endif /* __MANIFEST_H */
diff --git a/drivers/staging/greybus/module.c b/drivers/staging/greybus/module.c
new file mode 100644
index 000000000000..69f67ddbd4a3
--- /dev/null
+++ b/drivers/staging/greybus/module.c
@@ -0,0 +1,238 @@
1/*
2 * Greybus Module code
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include "greybus.h"
11#include "greybus_trace.h"
12
13
14static ssize_t eject_store(struct device *dev,
15 struct device_attribute *attr,
16 const char *buf, size_t len)
17{
18 struct gb_module *module = to_gb_module(dev);
19 struct gb_interface *intf;
20 size_t i;
21 long val;
22 int ret;
23
24 ret = kstrtol(buf, 0, &val);
25 if (ret)
26 return ret;
27
28 if (!val)
29 return len;
30
31 for (i = 0; i < module->num_interfaces; ++i) {
32 intf = module->interfaces[i];
33
34 mutex_lock(&intf->mutex);
35 /* Set flag to prevent concurrent activation. */
36 intf->ejected = true;
37 gb_interface_disable(intf);
38 gb_interface_deactivate(intf);
39 mutex_unlock(&intf->mutex);
40 }
41
42 /* Tell the SVC to eject the primary interface. */
43 ret = gb_svc_intf_eject(module->hd->svc, module->module_id);
44 if (ret)
45 return ret;
46
47 return len;
48}
49static DEVICE_ATTR_WO(eject);
50
51static ssize_t module_id_show(struct device *dev,
52 struct device_attribute *attr, char *buf)
53{
54 struct gb_module *module = to_gb_module(dev);
55
56 return sprintf(buf, "%u\n", module->module_id);
57}
58static DEVICE_ATTR_RO(module_id);
59
60static ssize_t num_interfaces_show(struct device *dev,
61 struct device_attribute *attr, char *buf)
62{
63 struct gb_module *module = to_gb_module(dev);
64
65 return sprintf(buf, "%zu\n", module->num_interfaces);
66}
67static DEVICE_ATTR_RO(num_interfaces);
68
69static struct attribute *module_attrs[] = {
70 &dev_attr_eject.attr,
71 &dev_attr_module_id.attr,
72 &dev_attr_num_interfaces.attr,
73 NULL,
74};
75ATTRIBUTE_GROUPS(module);
76
77static void gb_module_release(struct device *dev)
78{
79 struct gb_module *module = to_gb_module(dev);
80
81 trace_gb_module_release(module);
82
83 kfree(module);
84}
85
86struct device_type greybus_module_type = {
87 .name = "greybus_module",
88 .release = gb_module_release,
89};
90
91struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
92 size_t num_interfaces)
93{
94 struct gb_interface *intf;
95 struct gb_module *module;
96 int i;
97
98 module = kzalloc(sizeof(*module) + num_interfaces * sizeof(intf),
99 GFP_KERNEL);
100 if (!module)
101 return NULL;
102
103 module->hd = hd;
104 module->module_id = module_id;
105 module->num_interfaces = num_interfaces;
106
107 module->dev.parent = &hd->dev;
108 module->dev.bus = &greybus_bus_type;
109 module->dev.type = &greybus_module_type;
110 module->dev.groups = module_groups;
111 module->dev.dma_mask = hd->dev.dma_mask;
112 device_initialize(&module->dev);
113 dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id);
114
115 trace_gb_module_create(module);
116
117 for (i = 0; i < num_interfaces; ++i) {
118 intf = gb_interface_create(module, module_id + i);
119 if (!intf) {
120 dev_err(&module->dev, "failed to create interface %u\n",
121 module_id + i);
122 goto err_put_interfaces;
123 }
124 module->interfaces[i] = intf;
125 }
126
127 return module;
128
129err_put_interfaces:
130 for (--i; i > 0; --i)
131 gb_interface_put(module->interfaces[i]);
132
133 put_device(&module->dev);
134
135 return NULL;
136}
137
138/*
139 * Register and enable an interface after first attempting to activate it.
140 */
141static void gb_module_register_interface(struct gb_interface *intf)
142{
143 struct gb_module *module = intf->module;
144 u8 intf_id = intf->interface_id;
145 int ret;
146
147 mutex_lock(&intf->mutex);
148
149 ret = gb_interface_activate(intf);
150 if (ret) {
151 if (intf->type != GB_INTERFACE_TYPE_DUMMY) {
152 dev_err(&module->dev,
153 "failed to activate interface %u: %d\n",
154 intf_id, ret);
155 }
156
157 gb_interface_add(intf);
158 goto err_unlock;
159 }
160
161 ret = gb_interface_add(intf);
162 if (ret)
163 goto err_interface_deactivate;
164
165 ret = gb_interface_enable(intf);
166 if (ret) {
167 dev_err(&module->dev, "failed to enable interface %u: %d\n",
168 intf_id, ret);
169 goto err_interface_deactivate;
170 }
171
172 mutex_unlock(&intf->mutex);
173
174 return;
175
176err_interface_deactivate:
177 gb_interface_deactivate(intf);
178err_unlock:
179 mutex_unlock(&intf->mutex);
180}
181
182static void gb_module_deregister_interface(struct gb_interface *intf)
183{
184 /* Mark as disconnected to prevent I/O during disable. */
185 if (intf->module->disconnected)
186 intf->disconnected = true;
187
188 mutex_lock(&intf->mutex);
189 intf->removed = true;
190 gb_interface_disable(intf);
191 gb_interface_deactivate(intf);
192 mutex_unlock(&intf->mutex);
193
194 gb_interface_del(intf);
195}
196
197/* Register a module and its interfaces. */
198int gb_module_add(struct gb_module *module)
199{
200 size_t i;
201 int ret;
202
203 ret = device_add(&module->dev);
204 if (ret) {
205 dev_err(&module->dev, "failed to register module: %d\n", ret);
206 return ret;
207 }
208
209 trace_gb_module_add(module);
210
211 for (i = 0; i < module->num_interfaces; ++i)
212 gb_module_register_interface(module->interfaces[i]);
213
214 return 0;
215}
216
217/* Deregister a module and its interfaces. */
218void gb_module_del(struct gb_module *module)
219{
220 size_t i;
221
222 for (i = 0; i < module->num_interfaces; ++i)
223 gb_module_deregister_interface(module->interfaces[i]);
224
225 trace_gb_module_del(module);
226
227 device_del(&module->dev);
228}
229
230void gb_module_put(struct gb_module *module)
231{
232 size_t i;
233
234 for (i = 0; i < module->num_interfaces; ++i)
235 gb_interface_put(module->interfaces[i]);
236
237 put_device(&module->dev);
238}
diff --git a/drivers/staging/greybus/module.h b/drivers/staging/greybus/module.h
new file mode 100644
index 000000000000..88a97ce04243
--- /dev/null
+++ b/drivers/staging/greybus/module.h
@@ -0,0 +1,34 @@
1/*
2 * Greybus Module code
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __MODULE_H
11#define __MODULE_H
12
13struct gb_module {
14 struct device dev;
15 struct gb_host_device *hd;
16
17 struct list_head hd_node;
18
19 u8 module_id;
20 size_t num_interfaces;
21
22 bool disconnected;
23
24 struct gb_interface *interfaces[0];
25};
26#define to_gb_module(d) container_of(d, struct gb_module, dev)
27
28struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id,
29 size_t num_interfaces);
30int gb_module_add(struct gb_module *module);
31void gb_module_del(struct gb_module *module);
32void gb_module_put(struct gb_module *module);
33
34#endif /* __MODULE_H */
diff --git a/drivers/staging/greybus/operation.c b/drivers/staging/greybus/operation.c
new file mode 100644
index 000000000000..0123109a1070
--- /dev/null
+++ b/drivers/staging/greybus/operation.c
@@ -0,0 +1,1239 @@
1/*
2 * Greybus operations
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/sched.h>
14#include <linux/wait.h>
15#include <linux/workqueue.h>
16
17#include "greybus.h"
18#include "greybus_trace.h"
19
20static struct kmem_cache *gb_operation_cache;
21static struct kmem_cache *gb_message_cache;
22
23/* Workqueue to handle Greybus operation completions. */
24static struct workqueue_struct *gb_operation_completion_wq;
25
26/* Wait queue for synchronous cancellations. */
27static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
28
29/*
30 * Protects updates to operation->errno.
31 */
32static DEFINE_SPINLOCK(gb_operations_lock);
33
34static int gb_operation_response_send(struct gb_operation *operation,
35 int errno);
36
37/*
38 * Increment operation active count and add to connection list unless the
39 * connection is going away.
40 *
41 * Caller holds operation reference.
42 */
43static int gb_operation_get_active(struct gb_operation *operation)
44{
45 struct gb_connection *connection = operation->connection;
46 unsigned long flags;
47
48 spin_lock_irqsave(&connection->lock, flags);
49 switch (connection->state) {
50 case GB_CONNECTION_STATE_ENABLED:
51 break;
52 case GB_CONNECTION_STATE_ENABLED_TX:
53 if (gb_operation_is_incoming(operation))
54 goto err_unlock;
55 break;
56 case GB_CONNECTION_STATE_DISCONNECTING:
57 if (!gb_operation_is_core(operation))
58 goto err_unlock;
59 break;
60 default:
61 goto err_unlock;
62 }
63
64 if (operation->active++ == 0)
65 list_add_tail(&operation->links, &connection->operations);
66
67 trace_gb_operation_get_active(operation);
68
69 spin_unlock_irqrestore(&connection->lock, flags);
70
71 return 0;
72
73err_unlock:
74 spin_unlock_irqrestore(&connection->lock, flags);
75
76 return -ENOTCONN;
77}
78
79/* Caller holds operation reference. */
80static void gb_operation_put_active(struct gb_operation *operation)
81{
82 struct gb_connection *connection = operation->connection;
83 unsigned long flags;
84
85 spin_lock_irqsave(&connection->lock, flags);
86
87 trace_gb_operation_put_active(operation);
88
89 if (--operation->active == 0) {
90 list_del(&operation->links);
91 if (atomic_read(&operation->waiters))
92 wake_up(&gb_operation_cancellation_queue);
93 }
94 spin_unlock_irqrestore(&connection->lock, flags);
95}
96
97static bool gb_operation_is_active(struct gb_operation *operation)
98{
99 struct gb_connection *connection = operation->connection;
100 unsigned long flags;
101 bool ret;
102
103 spin_lock_irqsave(&connection->lock, flags);
104 ret = operation->active;
105 spin_unlock_irqrestore(&connection->lock, flags);
106
107 return ret;
108}
109
110/*
111 * Set an operation's result.
112 *
113 * Initially an outgoing operation's errno value is -EBADR.
114 * If no error occurs before sending the request message the only
115 * valid value operation->errno can be set to is -EINPROGRESS,
116 * indicating the request has been (or rather is about to be) sent.
117 * At that point nobody should be looking at the result until the
118 * response arrives.
119 *
120 * The first time the result gets set after the request has been
121 * sent, that result "sticks." That is, if two concurrent threads
122 * race to set the result, the first one wins. The return value
123 * tells the caller whether its result was recorded; if not the
124 * caller has nothing more to do.
125 *
126 * The result value -EILSEQ is reserved to signal an implementation
127 * error; if it's ever observed, the code performing the request has
128 * done something fundamentally wrong. It is an error to try to set
129 * the result to -EBADR, and attempts to do so result in a warning,
130 * and -EILSEQ is used instead. Similarly, the only valid result
131 * value to set for an operation in initial state is -EINPROGRESS.
132 * Attempts to do otherwise will also record a (successful) -EILSEQ
133 * operation result.
134 */
135static bool gb_operation_result_set(struct gb_operation *operation, int result)
136{
137 unsigned long flags;
138 int prev;
139
140 if (result == -EINPROGRESS) {
141 /*
142 * -EINPROGRESS is used to indicate the request is
143 * in flight. It should be the first result value
144 * set after the initial -EBADR. Issue a warning
145 * and record an implementation error if it's
146 * set at any other time.
147 */
148 spin_lock_irqsave(&gb_operations_lock, flags);
149 prev = operation->errno;
150 if (prev == -EBADR)
151 operation->errno = result;
152 else
153 operation->errno = -EILSEQ;
154 spin_unlock_irqrestore(&gb_operations_lock, flags);
155 WARN_ON(prev != -EBADR);
156
157 return true;
158 }
159
160 /*
161 * The first result value set after a request has been sent
162 * will be the final result of the operation. Subsequent
163 * attempts to set the result are ignored.
164 *
165 * Note that -EBADR is a reserved "initial state" result
166 * value. Attempts to set this value result in a warning,
167 * and the result code is set to -EILSEQ instead.
168 */
169 if (WARN_ON(result == -EBADR))
170 result = -EILSEQ; /* Nobody should be setting -EBADR */
171
172 spin_lock_irqsave(&gb_operations_lock, flags);
173 prev = operation->errno;
174 if (prev == -EINPROGRESS)
175 operation->errno = result; /* First and final result */
176 spin_unlock_irqrestore(&gb_operations_lock, flags);
177
178 return prev == -EINPROGRESS;
179}
180
181int gb_operation_result(struct gb_operation *operation)
182{
183 int result = operation->errno;
184
185 WARN_ON(result == -EBADR);
186 WARN_ON(result == -EINPROGRESS);
187
188 return result;
189}
190EXPORT_SYMBOL_GPL(gb_operation_result);
191
192/*
193 * Looks up an outgoing operation on a connection and returns a refcounted
194 * pointer if found, or NULL otherwise.
195 */
196static struct gb_operation *
197gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
198{
199 struct gb_operation *operation;
200 unsigned long flags;
201 bool found = false;
202
203 spin_lock_irqsave(&connection->lock, flags);
204 list_for_each_entry(operation, &connection->operations, links)
205 if (operation->id == operation_id &&
206 !gb_operation_is_incoming(operation)) {
207 gb_operation_get(operation);
208 found = true;
209 break;
210 }
211 spin_unlock_irqrestore(&connection->lock, flags);
212
213 return found ? operation : NULL;
214}
215
216static int gb_message_send(struct gb_message *message, gfp_t gfp)
217{
218 struct gb_connection *connection = message->operation->connection;
219
220 trace_gb_message_send(message);
221 return connection->hd->driver->message_send(connection->hd,
222 connection->hd_cport_id,
223 message,
224 gfp);
225}
226
227/*
228 * Cancel a message we have passed to the host device layer to be sent.
229 */
230static void gb_message_cancel(struct gb_message *message)
231{
232 struct gb_host_device *hd = message->operation->connection->hd;
233
234 hd->driver->message_cancel(message);
235}
236
237static void gb_operation_request_handle(struct gb_operation *operation)
238{
239 struct gb_connection *connection = operation->connection;
240 int status;
241 int ret;
242
243 if (connection->handler) {
244 status = connection->handler(operation);
245 } else {
246 dev_err(&connection->hd->dev,
247 "%s: unexpected incoming request of type 0x%02x\n",
248 connection->name, operation->type);
249
250 status = -EPROTONOSUPPORT;
251 }
252
253 ret = gb_operation_response_send(operation, status);
254 if (ret) {
255 dev_err(&connection->hd->dev,
256 "%s: failed to send response %d for type 0x%02x: %d\n",
257 connection->name, status, operation->type, ret);
258 return;
259 }
260}
261
262/*
263 * Process operation work.
264 *
265 * For incoming requests, call the protocol request handler. The operation
266 * result should be -EINPROGRESS at this point.
267 *
268 * For outgoing requests, the operation result value should have
269 * been set before queueing this. The operation callback function
270 * allows the original requester to know the request has completed
271 * and its result is available.
272 */
273static void gb_operation_work(struct work_struct *work)
274{
275 struct gb_operation *operation;
276
277 operation = container_of(work, struct gb_operation, work);
278
279 if (gb_operation_is_incoming(operation))
280 gb_operation_request_handle(operation);
281 else
282 operation->callback(operation);
283
284 gb_operation_put_active(operation);
285 gb_operation_put(operation);
286}
287
288static void gb_operation_message_init(struct gb_host_device *hd,
289 struct gb_message *message, u16 operation_id,
290 size_t payload_size, u8 type)
291{
292 struct gb_operation_msg_hdr *header;
293
294 header = message->buffer;
295
296 message->header = header;
297 message->payload = payload_size ? header + 1 : NULL;
298 message->payload_size = payload_size;
299
300 /*
301 * The type supplied for incoming message buffers will be
302 * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
303 * arriving data so there's no need to initialize the message header.
304 */
305 if (type != GB_REQUEST_TYPE_INVALID) {
306 u16 message_size = (u16)(sizeof(*header) + payload_size);
307
308 /*
309 * For a request, the operation id gets filled in
310 * when the message is sent. For a response, it
311 * will be copied from the request by the caller.
312 *
313 * The result field in a request message must be
314 * zero. It will be set just prior to sending for
315 * a response.
316 */
317 header->size = cpu_to_le16(message_size);
318 header->operation_id = 0;
319 header->type = type;
320 header->result = 0;
321 }
322}
323
324/*
325 * Allocate a message to be used for an operation request or response.
326 * Both types of message contain a common header. The request message
327 * for an outgoing operation is outbound, as is the response message
328 * for an incoming operation. The message header for an outbound
329 * message is partially initialized here.
330 *
331 * The headers for inbound messages don't need to be initialized;
332 * they'll be filled in by arriving data.
333 *
334 * Our message buffers have the following layout:
335 * message header \_ these combined are
336 * message payload / the message size
337 */
338static struct gb_message *
339gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
340 size_t payload_size, gfp_t gfp_flags)
341{
342 struct gb_message *message;
343 struct gb_operation_msg_hdr *header;
344 size_t message_size = payload_size + sizeof(*header);
345
346 if (message_size > hd->buffer_size_max) {
347 dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
348 message_size, hd->buffer_size_max);
349 return NULL;
350 }
351
352 /* Allocate the message structure and buffer. */
353 message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
354 if (!message)
355 return NULL;
356
357 message->buffer = kzalloc(message_size, gfp_flags);
358 if (!message->buffer)
359 goto err_free_message;
360
361 /* Initialize the message. Operation id is filled in later. */
362 gb_operation_message_init(hd, message, 0, payload_size, type);
363
364 return message;
365
366err_free_message:
367 kmem_cache_free(gb_message_cache, message);
368
369 return NULL;
370}
371
372static void gb_operation_message_free(struct gb_message *message)
373{
374 kfree(message->buffer);
375 kmem_cache_free(gb_message_cache, message);
376}
377
378/*
379 * Map an enum gb_operation_status value (which is represented in a
380 * message as a single byte) to an appropriate Linux negative errno.
381 */
382static int gb_operation_status_map(u8 status)
383{
384 switch (status) {
385 case GB_OP_SUCCESS:
386 return 0;
387 case GB_OP_INTERRUPTED:
388 return -EINTR;
389 case GB_OP_TIMEOUT:
390 return -ETIMEDOUT;
391 case GB_OP_NO_MEMORY:
392 return -ENOMEM;
393 case GB_OP_PROTOCOL_BAD:
394 return -EPROTONOSUPPORT;
395 case GB_OP_OVERFLOW:
396 return -EMSGSIZE;
397 case GB_OP_INVALID:
398 return -EINVAL;
399 case GB_OP_RETRY:
400 return -EAGAIN;
401 case GB_OP_NONEXISTENT:
402 return -ENODEV;
403 case GB_OP_MALFUNCTION:
404 return -EILSEQ;
405 case GB_OP_UNKNOWN_ERROR:
406 default:
407 return -EIO;
408 }
409}
410
411/*
412 * Map a Linux errno value (from operation->errno) into the value
413 * that should represent it in a response message status sent
414 * over the wire. Returns an enum gb_operation_status value (which
415 * is represented in a message as a single byte).
416 */
417static u8 gb_operation_errno_map(int errno)
418{
419 switch (errno) {
420 case 0:
421 return GB_OP_SUCCESS;
422 case -EINTR:
423 return GB_OP_INTERRUPTED;
424 case -ETIMEDOUT:
425 return GB_OP_TIMEOUT;
426 case -ENOMEM:
427 return GB_OP_NO_MEMORY;
428 case -EPROTONOSUPPORT:
429 return GB_OP_PROTOCOL_BAD;
430 case -EMSGSIZE:
431 return GB_OP_OVERFLOW; /* Could be underflow too */
432 case -EINVAL:
433 return GB_OP_INVALID;
434 case -EAGAIN:
435 return GB_OP_RETRY;
436 case -EILSEQ:
437 return GB_OP_MALFUNCTION;
438 case -ENODEV:
439 return GB_OP_NONEXISTENT;
440 case -EIO:
441 default:
442 return GB_OP_UNKNOWN_ERROR;
443 }
444}
445
446bool gb_operation_response_alloc(struct gb_operation *operation,
447 size_t response_size, gfp_t gfp)
448{
449 struct gb_host_device *hd = operation->connection->hd;
450 struct gb_operation_msg_hdr *request_header;
451 struct gb_message *response;
452 u8 type;
453
454 type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
455 response = gb_operation_message_alloc(hd, type, response_size, gfp);
456 if (!response)
457 return false;
458 response->operation = operation;
459
460 /*
461 * Size and type get initialized when the message is
462 * allocated. The errno will be set before sending. All
463 * that's left is the operation id, which we copy from the
464 * request message header (as-is, in little-endian order).
465 */
466 request_header = operation->request->header;
467 response->header->operation_id = request_header->operation_id;
468 operation->response = response;
469
470 return true;
471}
472EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
473
474/*
475 * Create a Greybus operation to be sent over the given connection.
476 * The request buffer will be big enough for a payload of the given
477 * size.
478 *
479 * For outgoing requests, the request message's header will be
480 * initialized with the type of the request and the message size.
481 * Outgoing operations must also specify the response buffer size,
482 * which must be sufficient to hold all expected response data. The
483 * response message header will eventually be overwritten, so there's
484 * no need to initialize it here.
485 *
486 * Request messages for incoming operations can arrive in interrupt
487 * context, so they must be allocated with GFP_ATOMIC. In this case
488 * the request buffer will be immediately overwritten, so there is
489 * no need to initialize the message header. Responsibility for
490 * allocating a response buffer lies with the incoming request
491 * handler for a protocol. So we don't allocate that here.
492 *
493 * Returns a pointer to the new operation or a null pointer if an
494 * error occurs.
495 */
496static struct gb_operation *
497gb_operation_create_common(struct gb_connection *connection, u8 type,
498 size_t request_size, size_t response_size,
499 unsigned long op_flags, gfp_t gfp_flags)
500{
501 struct gb_host_device *hd = connection->hd;
502 struct gb_operation *operation;
503
504 operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
505 if (!operation)
506 return NULL;
507 operation->connection = connection;
508
509 operation->request = gb_operation_message_alloc(hd, type, request_size,
510 gfp_flags);
511 if (!operation->request)
512 goto err_cache;
513 operation->request->operation = operation;
514
515 /* Allocate the response buffer for outgoing operations */
516 if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
517 if (!gb_operation_response_alloc(operation, response_size,
518 gfp_flags)) {
519 goto err_request;
520 }
521 }
522
523 operation->flags = op_flags;
524 operation->type = type;
525 operation->errno = -EBADR; /* Initial value--means "never set" */
526
527 INIT_WORK(&operation->work, gb_operation_work);
528 init_completion(&operation->completion);
529 kref_init(&operation->kref);
530 atomic_set(&operation->waiters, 0);
531
532 return operation;
533
534err_request:
535 gb_operation_message_free(operation->request);
536err_cache:
537 kmem_cache_free(gb_operation_cache, operation);
538
539 return NULL;
540}
541
542/*
543 * Create a new operation associated with the given connection. The
544 * request and response sizes provided are the number of bytes
545 * required to hold the request/response payload only. Both of
546 * these are allowed to be 0. Note that 0x00 is reserved as an
547 * invalid operation type for all protocols, and this is enforced
548 * here.
549 */
550struct gb_operation *
551gb_operation_create_flags(struct gb_connection *connection,
552 u8 type, size_t request_size,
553 size_t response_size, unsigned long flags,
554 gfp_t gfp)
555{
556 struct gb_operation *operation;
557
558 if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
559 return NULL;
560 if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
561 type &= ~GB_MESSAGE_TYPE_RESPONSE;
562
563 if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
564 flags &= GB_OPERATION_FLAG_USER_MASK;
565
566 operation = gb_operation_create_common(connection, type,
567 request_size, response_size,
568 flags, gfp);
569 if (operation)
570 trace_gb_operation_create(operation);
571
572 return operation;
573}
574EXPORT_SYMBOL_GPL(gb_operation_create_flags);
575
576struct gb_operation *
577gb_operation_create_core(struct gb_connection *connection,
578 u8 type, size_t request_size,
579 size_t response_size, unsigned long flags,
580 gfp_t gfp)
581{
582 struct gb_operation *operation;
583
584 flags |= GB_OPERATION_FLAG_CORE;
585
586 operation = gb_operation_create_common(connection, type,
587 request_size, response_size,
588 flags, gfp);
589 if (operation)
590 trace_gb_operation_create_core(operation);
591
592 return operation;
593}
594/* Do not export this function. */
595
596size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
597{
598 struct gb_host_device *hd = connection->hd;
599
600 return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
601}
602EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
603
604static struct gb_operation *
605gb_operation_create_incoming(struct gb_connection *connection, u16 id,
606 u8 type, void *data, size_t size)
607{
608 struct gb_operation *operation;
609 size_t request_size;
610 unsigned long flags = GB_OPERATION_FLAG_INCOMING;
611
612 /* Caller has made sure we at least have a message header. */
613 request_size = size - sizeof(struct gb_operation_msg_hdr);
614
615 if (!id)
616 flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
617
618 operation = gb_operation_create_common(connection, type,
619 request_size,
620 GB_REQUEST_TYPE_INVALID,
621 flags, GFP_ATOMIC);
622 if (!operation)
623 return NULL;
624
625 operation->id = id;
626 memcpy(operation->request->header, data, size);
627 trace_gb_operation_create_incoming(operation);
628
629 return operation;
630}
631
632/*
633 * Get an additional reference on an operation.
634 */
635void gb_operation_get(struct gb_operation *operation)
636{
637 kref_get(&operation->kref);
638}
639EXPORT_SYMBOL_GPL(gb_operation_get);
640
641/*
642 * Destroy a previously created operation.
643 */
644static void _gb_operation_destroy(struct kref *kref)
645{
646 struct gb_operation *operation;
647
648 operation = container_of(kref, struct gb_operation, kref);
649
650 trace_gb_operation_destroy(operation);
651
652 if (operation->response)
653 gb_operation_message_free(operation->response);
654 gb_operation_message_free(operation->request);
655
656 kmem_cache_free(gb_operation_cache, operation);
657}
658
659/*
660 * Drop a reference on an operation, and destroy it when the last
661 * one is gone.
662 */
663void gb_operation_put(struct gb_operation *operation)
664{
665 if (WARN_ON(!operation))
666 return;
667
668 kref_put(&operation->kref, _gb_operation_destroy);
669}
670EXPORT_SYMBOL_GPL(gb_operation_put);
671
672/* Tell the requester we're done */
673static void gb_operation_sync_callback(struct gb_operation *operation)
674{
675 complete(&operation->completion);
676}
677
678/**
679 * gb_operation_request_send() - send an operation request message
680 * @operation: the operation to initiate
681 * @callback: the operation completion callback
682 * @gfp: the memory flags to use for any allocations
683 *
684 * The caller has filled in any payload so the request message is ready to go.
685 * The callback function supplied will be called when the response message has
686 * arrived, a unidirectional request has been sent, or the operation is
687 * cancelled, indicating that the operation is complete. The callback function
688 * can fetch the result of the operation using gb_operation_result() if
689 * desired.
690 *
691 * Return: 0 if the request was successfully queued in the host-driver queues,
692 * or a negative errno.
693 */
694int gb_operation_request_send(struct gb_operation *operation,
695 gb_operation_callback callback,
696 gfp_t gfp)
697{
698 struct gb_connection *connection = operation->connection;
699 struct gb_operation_msg_hdr *header;
700 unsigned int cycle;
701 int ret;
702
703 if (gb_connection_is_offloaded(connection))
704 return -EBUSY;
705
706 if (!callback)
707 return -EINVAL;
708
709 /*
710 * Record the callback function, which is executed in
711 * non-atomic (workqueue) context when the final result
712 * of an operation has been set.
713 */
714 operation->callback = callback;
715
716 /*
717 * Assign the operation's id, and store it in the request header.
718 * Zero is a reserved operation id for unidirectional operations.
719 */
720 if (gb_operation_is_unidirectional(operation)) {
721 operation->id = 0;
722 } else {
723 cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
724 operation->id = (u16)(cycle % U16_MAX + 1);
725 }
726
727 header = operation->request->header;
728 header->operation_id = cpu_to_le16(operation->id);
729
730 gb_operation_result_set(operation, -EINPROGRESS);
731
732 /*
733 * Get an extra reference on the operation. It'll be dropped when the
734 * operation completes.
735 */
736 gb_operation_get(operation);
737 ret = gb_operation_get_active(operation);
738 if (ret)
739 goto err_put;
740
741 ret = gb_message_send(operation->request, gfp);
742 if (ret)
743 goto err_put_active;
744
745 return 0;
746
747err_put_active:
748 gb_operation_put_active(operation);
749err_put:
750 gb_operation_put(operation);
751
752 return ret;
753}
754EXPORT_SYMBOL_GPL(gb_operation_request_send);
755
756/*
757 * Send a synchronous operation. This function is expected to
758 * block, returning only when the response has arrived, (or when an
759 * error is detected. The return value is the result of the
760 * operation.
761 */
762int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
763 unsigned int timeout)
764{
765 int ret;
766 unsigned long timeout_jiffies;
767
768 ret = gb_operation_request_send(operation, gb_operation_sync_callback,
769 GFP_KERNEL);
770 if (ret)
771 return ret;
772
773 if (timeout)
774 timeout_jiffies = msecs_to_jiffies(timeout);
775 else
776 timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
777
778 ret = wait_for_completion_interruptible_timeout(&operation->completion,
779 timeout_jiffies);
780 if (ret < 0) {
781 /* Cancel the operation if interrupted */
782 gb_operation_cancel(operation, -ECANCELED);
783 } else if (ret == 0) {
784 /* Cancel the operation if op timed out */
785 gb_operation_cancel(operation, -ETIMEDOUT);
786 }
787
788 return gb_operation_result(operation);
789}
790EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
791
792/*
793 * Send a response for an incoming operation request. A non-zero
794 * errno indicates a failed operation.
795 *
796 * If there is any response payload, the incoming request handler is
797 * responsible for allocating the response message. Otherwise the
798 * it can simply supply the result errno; this function will
799 * allocate the response message if necessary.
800 */
801static int gb_operation_response_send(struct gb_operation *operation,
802 int errno)
803{
804 struct gb_connection *connection = operation->connection;
805 int ret;
806
807 if (!operation->response &&
808 !gb_operation_is_unidirectional(operation)) {
809 if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
810 return -ENOMEM;
811 }
812
813 /* Record the result */
814 if (!gb_operation_result_set(operation, errno)) {
815 dev_err(&connection->hd->dev, "request result already set\n");
816 return -EIO; /* Shouldn't happen */
817 }
818
819 /* Sender of request does not care about response. */
820 if (gb_operation_is_unidirectional(operation))
821 return 0;
822
823 /* Reference will be dropped when message has been sent. */
824 gb_operation_get(operation);
825 ret = gb_operation_get_active(operation);
826 if (ret)
827 goto err_put;
828
829 /* Fill in the response header and send it */
830 operation->response->header->result = gb_operation_errno_map(errno);
831
832 ret = gb_message_send(operation->response, GFP_KERNEL);
833 if (ret)
834 goto err_put_active;
835
836 return 0;
837
838err_put_active:
839 gb_operation_put_active(operation);
840err_put:
841 gb_operation_put(operation);
842
843 return ret;
844}
845
846/*
847 * This function is called when a message send request has completed.
848 */
849void greybus_message_sent(struct gb_host_device *hd,
850 struct gb_message *message, int status)
851{
852 struct gb_operation *operation = message->operation;
853 struct gb_connection *connection = operation->connection;
854
855 /*
856 * If the message was a response, we just need to drop our
857 * reference to the operation. If an error occurred, report
858 * it.
859 *
860 * For requests, if there's no error and the operation in not
861 * unidirectional, there's nothing more to do until the response
862 * arrives. If an error occurred attempting to send it, or if the
863 * operation is unidrectional, record the result of the operation and
864 * schedule its completion.
865 */
866 if (message == operation->response) {
867 if (status) {
868 dev_err(&connection->hd->dev,
869 "%s: error sending response 0x%02x: %d\n",
870 connection->name, operation->type, status);
871 }
872
873 gb_operation_put_active(operation);
874 gb_operation_put(operation);
875 } else if (status || gb_operation_is_unidirectional(operation)) {
876 if (gb_operation_result_set(operation, status)) {
877 queue_work(gb_operation_completion_wq,
878 &operation->work);
879 }
880 }
881}
882EXPORT_SYMBOL_GPL(greybus_message_sent);
883
884/*
885 * We've received data on a connection, and it doesn't look like a
886 * response, so we assume it's a request.
887 *
888 * This is called in interrupt context, so just copy the incoming
889 * data into the request buffer and handle the rest via workqueue.
890 */
891static void gb_connection_recv_request(struct gb_connection *connection,
892 const struct gb_operation_msg_hdr *header,
893 void *data, size_t size)
894{
895 struct gb_operation *operation;
896 u16 operation_id;
897 u8 type;
898 int ret;
899
900 operation_id = le16_to_cpu(header->operation_id);
901 type = header->type;
902
903 operation = gb_operation_create_incoming(connection, operation_id,
904 type, data, size);
905 if (!operation) {
906 dev_err(&connection->hd->dev,
907 "%s: can't create incoming operation\n",
908 connection->name);
909 return;
910 }
911
912 ret = gb_operation_get_active(operation);
913 if (ret) {
914 gb_operation_put(operation);
915 return;
916 }
917 trace_gb_message_recv_request(operation->request);
918
919 /*
920 * The initial reference to the operation will be dropped when the
921 * request handler returns.
922 */
923 if (gb_operation_result_set(operation, -EINPROGRESS))
924 queue_work(connection->wq, &operation->work);
925}
926
927/*
928 * We've received data that appears to be an operation response
929 * message. Look up the operation, and record that we've received
930 * its response.
931 *
932 * This is called in interrupt context, so just copy the incoming
933 * data into the response buffer and handle the rest via workqueue.
934 */
935static void gb_connection_recv_response(struct gb_connection *connection,
936 const struct gb_operation_msg_hdr *header,
937 void *data, size_t size)
938{
939 struct gb_operation *operation;
940 struct gb_message *message;
941 size_t message_size;
942 u16 operation_id;
943 int errno;
944
945 operation_id = le16_to_cpu(header->operation_id);
946
947 if (!operation_id) {
948 dev_err_ratelimited(&connection->hd->dev,
949 "%s: invalid response id 0 received\n",
950 connection->name);
951 return;
952 }
953
954 operation = gb_operation_find_outgoing(connection, operation_id);
955 if (!operation) {
956 dev_err_ratelimited(&connection->hd->dev,
957 "%s: unexpected response id 0x%04x received\n",
958 connection->name, operation_id);
959 return;
960 }
961
962 errno = gb_operation_status_map(header->result);
963 message = operation->response;
964 message_size = sizeof(*header) + message->payload_size;
965 if (!errno && size > message_size) {
966 dev_err_ratelimited(&connection->hd->dev,
967 "%s: malformed response 0x%02x received (%zu > %zu)\n",
968 connection->name, header->type,
969 size, message_size);
970 errno = -EMSGSIZE;
971 } else if (!errno && size < message_size) {
972 if (gb_operation_short_response_allowed(operation)) {
973 message->payload_size = size - sizeof(*header);
974 } else {
975 dev_err_ratelimited(&connection->hd->dev,
976 "%s: short response 0x%02x received (%zu < %zu)\n",
977 connection->name, header->type,
978 size, message_size);
979 errno = -EMSGSIZE;
980 }
981 }
982
983 /* We must ignore the payload if a bad status is returned */
984 if (errno)
985 size = sizeof(*header);
986
987 /* The rest will be handled in work queue context */
988 if (gb_operation_result_set(operation, errno)) {
989 memcpy(message->buffer, data, size);
990
991 trace_gb_message_recv_response(message);
992
993 queue_work(gb_operation_completion_wq, &operation->work);
994 }
995
996 gb_operation_put(operation);
997}
998
999/*
1000 * Handle data arriving on a connection. As soon as we return the
1001 * supplied data buffer will be reused (so unless we do something
1002 * with, it's effectively dropped).
1003 */
1004void gb_connection_recv(struct gb_connection *connection,
1005 void *data, size_t size)
1006{
1007 struct gb_operation_msg_hdr header;
1008 struct device *dev = &connection->hd->dev;
1009 size_t msg_size;
1010
1011 if (connection->state == GB_CONNECTION_STATE_DISABLED ||
1012 gb_connection_is_offloaded(connection)) {
1013 dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
1014 connection->name, size);
1015 return;
1016 }
1017
1018 if (size < sizeof(header)) {
1019 dev_err_ratelimited(dev, "%s: short message received\n",
1020 connection->name);
1021 return;
1022 }
1023
1024 /* Use memcpy as data may be unaligned */
1025 memcpy(&header, data, sizeof(header));
1026 msg_size = le16_to_cpu(header.size);
1027 if (size < msg_size) {
1028 dev_err_ratelimited(dev,
1029 "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
1030 connection->name,
1031 le16_to_cpu(header.operation_id),
1032 header.type, size, msg_size);
1033 return; /* XXX Should still complete operation */
1034 }
1035
1036 if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
1037 gb_connection_recv_response(connection, &header, data,
1038 msg_size);
1039 } else {
1040 gb_connection_recv_request(connection, &header, data,
1041 msg_size);
1042 }
1043}
1044
1045/*
1046 * Cancel an outgoing operation synchronously, and record the given error to
1047 * indicate why.
1048 */
1049void gb_operation_cancel(struct gb_operation *operation, int errno)
1050{
1051 if (WARN_ON(gb_operation_is_incoming(operation)))
1052 return;
1053
1054 if (gb_operation_result_set(operation, errno)) {
1055 gb_message_cancel(operation->request);
1056 queue_work(gb_operation_completion_wq, &operation->work);
1057 }
1058 trace_gb_message_cancel_outgoing(operation->request);
1059
1060 atomic_inc(&operation->waiters);
1061 wait_event(gb_operation_cancellation_queue,
1062 !gb_operation_is_active(operation));
1063 atomic_dec(&operation->waiters);
1064}
1065EXPORT_SYMBOL_GPL(gb_operation_cancel);
1066
1067/*
1068 * Cancel an incoming operation synchronously. Called during connection tear
1069 * down.
1070 */
1071void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
1072{
1073 if (WARN_ON(!gb_operation_is_incoming(operation)))
1074 return;
1075
1076 if (!gb_operation_is_unidirectional(operation)) {
1077 /*
1078 * Make sure the request handler has submitted the response
1079 * before cancelling it.
1080 */
1081 flush_work(&operation->work);
1082 if (!gb_operation_result_set(operation, errno))
1083 gb_message_cancel(operation->response);
1084 }
1085 trace_gb_message_cancel_incoming(operation->response);
1086
1087 atomic_inc(&operation->waiters);
1088 wait_event(gb_operation_cancellation_queue,
1089 !gb_operation_is_active(operation));
1090 atomic_dec(&operation->waiters);
1091}
1092
1093/**
1094 * gb_operation_sync_timeout() - implement a "simple" synchronous operation
1095 * @connection: the Greybus connection to send this to
1096 * @type: the type of operation to send
1097 * @request: pointer to a memory buffer to copy the request from
1098 * @request_size: size of @request
1099 * @response: pointer to a memory buffer to copy the response to
1100 * @response_size: the size of @response.
1101 * @timeout: operation timeout in milliseconds
1102 *
1103 * This function implements a simple synchronous Greybus operation. It sends
1104 * the provided operation request and waits (sleeps) until the corresponding
1105 * operation response message has been successfully received, or an error
1106 * occurs. @request and @response are buffers to hold the request and response
1107 * data respectively, and if they are not NULL, their size must be specified in
1108 * @request_size and @response_size.
1109 *
1110 * If a response payload is to come back, and @response is not NULL,
1111 * @response_size number of bytes will be copied into @response if the operation
1112 * is successful.
1113 *
1114 * If there is an error, the response buffer is left alone.
1115 */
1116int gb_operation_sync_timeout(struct gb_connection *connection, int type,
1117 void *request, int request_size,
1118 void *response, int response_size,
1119 unsigned int timeout)
1120{
1121 struct gb_operation *operation;
1122 int ret;
1123
1124 if ((response_size && !response) ||
1125 (request_size && !request))
1126 return -EINVAL;
1127
1128 operation = gb_operation_create(connection, type,
1129 request_size, response_size,
1130 GFP_KERNEL);
1131 if (!operation)
1132 return -ENOMEM;
1133
1134 if (request_size)
1135 memcpy(operation->request->payload, request, request_size);
1136
1137 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1138 if (ret) {
1139 dev_err(&connection->hd->dev,
1140 "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
1141 connection->name, operation->id, type, ret);
1142 } else {
1143 if (response_size) {
1144 memcpy(response, operation->response->payload,
1145 response_size);
1146 }
1147 }
1148
1149 gb_operation_put(operation);
1150
1151 return ret;
1152}
1153EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
1154
1155/**
1156 * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
1157 * @connection: connection to use
1158 * @type: type of operation to send
1159 * @request: memory buffer to copy the request from
1160 * @request_size: size of @request
1161 * @timeout: send timeout in milliseconds
1162 *
1163 * Initiate a unidirectional operation by sending a request message and
1164 * waiting for it to be acknowledged as sent by the host device.
1165 *
1166 * Note that successful send of a unidirectional operation does not imply that
1167 * the request as actually reached the remote end of the connection.
1168 */
1169int gb_operation_unidirectional_timeout(struct gb_connection *connection,
1170 int type, void *request, int request_size,
1171 unsigned int timeout)
1172{
1173 struct gb_operation *operation;
1174 int ret;
1175
1176 if (request_size && !request)
1177 return -EINVAL;
1178
1179 operation = gb_operation_create_flags(connection, type,
1180 request_size, 0,
1181 GB_OPERATION_FLAG_UNIDIRECTIONAL,
1182 GFP_KERNEL);
1183 if (!operation)
1184 return -ENOMEM;
1185
1186 if (request_size)
1187 memcpy(operation->request->payload, request, request_size);
1188
1189 ret = gb_operation_request_send_sync_timeout(operation, timeout);
1190 if (ret) {
1191 dev_err(&connection->hd->dev,
1192 "%s: unidirectional operation of type 0x%02x failed: %d\n",
1193 connection->name, type, ret);
1194 }
1195
1196 gb_operation_put(operation);
1197
1198 return ret;
1199}
1200EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
1201
1202int __init gb_operation_init(void)
1203{
1204 gb_message_cache = kmem_cache_create("gb_message_cache",
1205 sizeof(struct gb_message), 0, 0, NULL);
1206 if (!gb_message_cache)
1207 return -ENOMEM;
1208
1209 gb_operation_cache = kmem_cache_create("gb_operation_cache",
1210 sizeof(struct gb_operation), 0, 0, NULL);
1211 if (!gb_operation_cache)
1212 goto err_destroy_message_cache;
1213
1214 gb_operation_completion_wq = alloc_workqueue("greybus_completion",
1215 0, 0);
1216 if (!gb_operation_completion_wq)
1217 goto err_destroy_operation_cache;
1218
1219 return 0;
1220
1221err_destroy_operation_cache:
1222 kmem_cache_destroy(gb_operation_cache);
1223 gb_operation_cache = NULL;
1224err_destroy_message_cache:
1225 kmem_cache_destroy(gb_message_cache);
1226 gb_message_cache = NULL;
1227
1228 return -ENOMEM;
1229}
1230
1231void gb_operation_exit(void)
1232{
1233 destroy_workqueue(gb_operation_completion_wq);
1234 gb_operation_completion_wq = NULL;
1235 kmem_cache_destroy(gb_operation_cache);
1236 gb_operation_cache = NULL;
1237 kmem_cache_destroy(gb_message_cache);
1238 gb_message_cache = NULL;
1239}
diff --git a/drivers/staging/greybus/operation.h b/drivers/staging/greybus/operation.h
new file mode 100644
index 000000000000..de09a2c7de54
--- /dev/null
+++ b/drivers/staging/greybus/operation.h
@@ -0,0 +1,210 @@
1/*
2 * Greybus operations
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __OPERATION_H
11#define __OPERATION_H
12
13#include <linux/completion.h>
14
15struct gb_operation;
16
17/* The default amount of time a request is given to complete */
18#define GB_OPERATION_TIMEOUT_DEFAULT 1000 /* milliseconds */
19
20/*
21 * The top bit of the type in an operation message header indicates
22 * whether the message is a request (bit clear) or response (bit set)
23 */
24#define GB_MESSAGE_TYPE_RESPONSE ((u8)0x80)
25
26enum gb_operation_result {
27 GB_OP_SUCCESS = 0x00,
28 GB_OP_INTERRUPTED = 0x01,
29 GB_OP_TIMEOUT = 0x02,
30 GB_OP_NO_MEMORY = 0x03,
31 GB_OP_PROTOCOL_BAD = 0x04,
32 GB_OP_OVERFLOW = 0x05,
33 GB_OP_INVALID = 0x06,
34 GB_OP_RETRY = 0x07,
35 GB_OP_NONEXISTENT = 0x08,
36 GB_OP_UNKNOWN_ERROR = 0xfe,
37 GB_OP_MALFUNCTION = 0xff,
38};
39
40#define GB_OPERATION_MESSAGE_SIZE_MIN sizeof(struct gb_operation_msg_hdr)
41#define GB_OPERATION_MESSAGE_SIZE_MAX U16_MAX
42
43/*
44 * Protocol code should only examine the payload and payload_size fields, and
45 * host-controller drivers may use the hcpriv field. All other fields are
46 * intended to be private to the operations core code.
47 */
48struct gb_message {
49 struct gb_operation *operation;
50 struct gb_operation_msg_hdr *header;
51
52 void *payload;
53 size_t payload_size;
54
55 void *buffer;
56
57 void *hcpriv;
58};
59
60#define GB_OPERATION_FLAG_INCOMING BIT(0)
61#define GB_OPERATION_FLAG_UNIDIRECTIONAL BIT(1)
62#define GB_OPERATION_FLAG_SHORT_RESPONSE BIT(2)
63#define GB_OPERATION_FLAG_CORE BIT(3)
64
65#define GB_OPERATION_FLAG_USER_MASK (GB_OPERATION_FLAG_SHORT_RESPONSE | \
66 GB_OPERATION_FLAG_UNIDIRECTIONAL)
67
68/*
69 * A Greybus operation is a remote procedure call performed over a
70 * connection between two UniPro interfaces.
71 *
72 * Every operation consists of a request message sent to the other
73 * end of the connection coupled with a reply message returned to
74 * the sender. Every operation has a type, whose interpretation is
75 * dependent on the protocol associated with the connection.
76 *
77 * Only four things in an operation structure are intended to be
78 * directly usable by protocol handlers: the operation's connection
79 * pointer; the operation type; the request message payload (and
80 * size); and the response message payload (and size). Note that a
81 * message with a 0-byte payload has a null message payload pointer.
82 *
83 * In addition, every operation has a result, which is an errno
84 * value. Protocol handlers access the operation result using
85 * gb_operation_result().
86 */
87typedef void (*gb_operation_callback)(struct gb_operation *);
88struct gb_operation {
89 struct gb_connection *connection;
90 struct gb_message *request;
91 struct gb_message *response;
92
93 unsigned long flags;
94 u8 type;
95 u16 id;
96 int errno; /* Operation result */
97
98 struct work_struct work;
99 gb_operation_callback callback;
100 struct completion completion;
101
102 struct kref kref;
103 atomic_t waiters;
104
105 int active;
106 struct list_head links; /* connection->operations */
107};
108
109static inline bool
110gb_operation_is_incoming(struct gb_operation *operation)
111{
112 return operation->flags & GB_OPERATION_FLAG_INCOMING;
113}
114
115static inline bool
116gb_operation_is_unidirectional(struct gb_operation *operation)
117{
118 return operation->flags & GB_OPERATION_FLAG_UNIDIRECTIONAL;
119}
120
121static inline bool
122gb_operation_short_response_allowed(struct gb_operation *operation)
123{
124 return operation->flags & GB_OPERATION_FLAG_SHORT_RESPONSE;
125}
126
127static inline bool gb_operation_is_core(struct gb_operation *operation)
128{
129 return operation->flags & GB_OPERATION_FLAG_CORE;
130}
131
132void gb_connection_recv(struct gb_connection *connection,
133 void *data, size_t size);
134
135int gb_operation_result(struct gb_operation *operation);
136
137size_t gb_operation_get_payload_size_max(struct gb_connection *connection);
138struct gb_operation *
139gb_operation_create_flags(struct gb_connection *connection,
140 u8 type, size_t request_size,
141 size_t response_size, unsigned long flags,
142 gfp_t gfp);
143
144static inline struct gb_operation *
145gb_operation_create(struct gb_connection *connection,
146 u8 type, size_t request_size,
147 size_t response_size, gfp_t gfp)
148{
149 return gb_operation_create_flags(connection, type, request_size,
150 response_size, 0, gfp);
151}
152
153struct gb_operation *
154gb_operation_create_core(struct gb_connection *connection,
155 u8 type, size_t request_size,
156 size_t response_size, unsigned long flags,
157 gfp_t gfp);
158
159void gb_operation_get(struct gb_operation *operation);
160void gb_operation_put(struct gb_operation *operation);
161
162bool gb_operation_response_alloc(struct gb_operation *operation,
163 size_t response_size, gfp_t gfp);
164
165int gb_operation_request_send(struct gb_operation *operation,
166 gb_operation_callback callback,
167 gfp_t gfp);
168int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
169 unsigned int timeout);
170static inline int
171gb_operation_request_send_sync(struct gb_operation *operation)
172{
173 return gb_operation_request_send_sync_timeout(operation,
174 GB_OPERATION_TIMEOUT_DEFAULT);
175}
176
177void gb_operation_cancel(struct gb_operation *operation, int errno);
178void gb_operation_cancel_incoming(struct gb_operation *operation, int errno);
179
180void greybus_message_sent(struct gb_host_device *hd,
181 struct gb_message *message, int status);
182
183int gb_operation_sync_timeout(struct gb_connection *connection, int type,
184 void *request, int request_size,
185 void *response, int response_size,
186 unsigned int timeout);
187int gb_operation_unidirectional_timeout(struct gb_connection *connection,
188 int type, void *request, int request_size,
189 unsigned int timeout);
190
191static inline int gb_operation_sync(struct gb_connection *connection, int type,
192 void *request, int request_size,
193 void *response, int response_size)
194{
195 return gb_operation_sync_timeout(connection, type,
196 request, request_size, response, response_size,
197 GB_OPERATION_TIMEOUT_DEFAULT);
198}
199
200static inline int gb_operation_unidirectional(struct gb_connection *connection,
201 int type, void *request, int request_size)
202{
203 return gb_operation_unidirectional_timeout(connection, type,
204 request, request_size, GB_OPERATION_TIMEOUT_DEFAULT);
205}
206
207int gb_operation_init(void);
208void gb_operation_exit(void);
209
210#endif /* !__OPERATION_H */
diff --git a/drivers/staging/greybus/power_supply.c b/drivers/staging/greybus/power_supply.c
new file mode 100644
index 000000000000..e85c988b7034
--- /dev/null
+++ b/drivers/staging/greybus/power_supply.c
@@ -0,0 +1,1141 @@
1/*
2 * Power Supply driver for a Greybus module.
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/power_supply.h>
13#include <linux/slab.h>
14
15#include "greybus.h"
16
17#define PROP_MAX 32
18
19struct gb_power_supply_prop {
20 enum power_supply_property prop;
21 u8 gb_prop;
22 int val;
23 int previous_val;
24 bool is_writeable;
25};
26
27struct gb_power_supply {
28 u8 id;
29 bool registered;
30 struct power_supply *psy;
31 struct power_supply_desc desc;
32 char name[64];
33 struct gb_power_supplies *supplies;
34 struct delayed_work work;
35 char *manufacturer;
36 char *model_name;
37 char *serial_number;
38 u8 type;
39 u8 properties_count;
40 u8 properties_count_str;
41 unsigned long last_update;
42 u8 cache_invalid;
43 unsigned int update_interval;
44 bool changed;
45 struct gb_power_supply_prop *props;
46 enum power_supply_property *props_raw;
47 bool pm_acquired;
48 struct mutex supply_lock;
49};
50
51struct gb_power_supplies {
52 struct gb_connection *connection;
53 u8 supplies_count;
54 struct gb_power_supply *supply;
55 struct mutex supplies_lock;
56};
57
58#define to_gb_power_supply(x) power_supply_get_drvdata(x)
59
60/*
61 * General power supply properties that could be absent from various reasons,
62 * like kernel versions or vendor specific versions
63 */
64#ifndef POWER_SUPPLY_PROP_VOLTAGE_BOOT
65 #define POWER_SUPPLY_PROP_VOLTAGE_BOOT -1
66#endif
67#ifndef POWER_SUPPLY_PROP_CURRENT_BOOT
68 #define POWER_SUPPLY_PROP_CURRENT_BOOT -1
69#endif
70#ifndef POWER_SUPPLY_PROP_CALIBRATE
71 #define POWER_SUPPLY_PROP_CALIBRATE -1
72#endif
73
74/* cache time in milliseconds, if cache_time is set to 0 cache is disable */
75static unsigned int cache_time = 1000;
76/*
77 * update interval initial and maximum value, between the two will
78 * back-off exponential
79 */
80static unsigned int update_interval_init = 1 * HZ;
81static unsigned int update_interval_max = 30 * HZ;
82
83struct gb_power_supply_changes {
84 enum power_supply_property prop;
85 u32 tolerance_change;
86 void (*prop_changed)(struct gb_power_supply *gbpsy,
87 struct gb_power_supply_prop *prop);
88};
89
90static void gb_power_supply_state_change(struct gb_power_supply *gbpsy,
91 struct gb_power_supply_prop *prop);
92
93static const struct gb_power_supply_changes psy_props_changes[] = {
94 { .prop = GB_POWER_SUPPLY_PROP_STATUS,
95 .tolerance_change = 0,
96 .prop_changed = gb_power_supply_state_change,
97 },
98 { .prop = GB_POWER_SUPPLY_PROP_TEMP,
99 .tolerance_change = 500,
100 .prop_changed = NULL,
101 },
102 { .prop = GB_POWER_SUPPLY_PROP_ONLINE,
103 .tolerance_change = 0,
104 .prop_changed = NULL,
105 },
106};
107
108static int get_psp_from_gb_prop(int gb_prop, enum power_supply_property *psp)
109{
110 int prop;
111
112 switch (gb_prop) {
113 case GB_POWER_SUPPLY_PROP_STATUS:
114 prop = POWER_SUPPLY_PROP_STATUS;
115 break;
116 case GB_POWER_SUPPLY_PROP_CHARGE_TYPE:
117 prop = POWER_SUPPLY_PROP_CHARGE_TYPE;
118 break;
119 case GB_POWER_SUPPLY_PROP_HEALTH:
120 prop = POWER_SUPPLY_PROP_HEALTH;
121 break;
122 case GB_POWER_SUPPLY_PROP_PRESENT:
123 prop = POWER_SUPPLY_PROP_PRESENT;
124 break;
125 case GB_POWER_SUPPLY_PROP_ONLINE:
126 prop = POWER_SUPPLY_PROP_ONLINE;
127 break;
128 case GB_POWER_SUPPLY_PROP_AUTHENTIC:
129 prop = POWER_SUPPLY_PROP_AUTHENTIC;
130 break;
131 case GB_POWER_SUPPLY_PROP_TECHNOLOGY:
132 prop = POWER_SUPPLY_PROP_TECHNOLOGY;
133 break;
134 case GB_POWER_SUPPLY_PROP_CYCLE_COUNT:
135 prop = POWER_SUPPLY_PROP_CYCLE_COUNT;
136 break;
137 case GB_POWER_SUPPLY_PROP_VOLTAGE_MAX:
138 prop = POWER_SUPPLY_PROP_VOLTAGE_MAX;
139 break;
140 case GB_POWER_SUPPLY_PROP_VOLTAGE_MIN:
141 prop = POWER_SUPPLY_PROP_VOLTAGE_MIN;
142 break;
143 case GB_POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
144 prop = POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN;
145 break;
146 case GB_POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
147 prop = POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN;
148 break;
149 case GB_POWER_SUPPLY_PROP_VOLTAGE_NOW:
150 prop = POWER_SUPPLY_PROP_VOLTAGE_NOW;
151 break;
152 case GB_POWER_SUPPLY_PROP_VOLTAGE_AVG:
153 prop = POWER_SUPPLY_PROP_VOLTAGE_AVG;
154 break;
155 case GB_POWER_SUPPLY_PROP_VOLTAGE_OCV:
156 prop = POWER_SUPPLY_PROP_VOLTAGE_OCV;
157 break;
158 case GB_POWER_SUPPLY_PROP_VOLTAGE_BOOT:
159 prop = POWER_SUPPLY_PROP_VOLTAGE_BOOT;
160 break;
161 case GB_POWER_SUPPLY_PROP_CURRENT_MAX:
162 prop = POWER_SUPPLY_PROP_CURRENT_MAX;
163 break;
164 case GB_POWER_SUPPLY_PROP_CURRENT_NOW:
165 prop = POWER_SUPPLY_PROP_CURRENT_NOW;
166 break;
167 case GB_POWER_SUPPLY_PROP_CURRENT_AVG:
168 prop = POWER_SUPPLY_PROP_CURRENT_AVG;
169 break;
170 case GB_POWER_SUPPLY_PROP_CURRENT_BOOT:
171 prop = POWER_SUPPLY_PROP_CURRENT_BOOT;
172 break;
173 case GB_POWER_SUPPLY_PROP_POWER_NOW:
174 prop = POWER_SUPPLY_PROP_POWER_NOW;
175 break;
176 case GB_POWER_SUPPLY_PROP_POWER_AVG:
177 prop = POWER_SUPPLY_PROP_POWER_AVG;
178 break;
179 case GB_POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
180 prop = POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN;
181 break;
182 case GB_POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN:
183 prop = POWER_SUPPLY_PROP_CHARGE_EMPTY_DESIGN;
184 break;
185 case GB_POWER_SUPPLY_PROP_CHARGE_FULL:
186 prop = POWER_SUPPLY_PROP_CHARGE_FULL;
187 break;
188 case GB_POWER_SUPPLY_PROP_CHARGE_EMPTY:
189 prop = POWER_SUPPLY_PROP_CHARGE_EMPTY;
190 break;
191 case GB_POWER_SUPPLY_PROP_CHARGE_NOW:
192 prop = POWER_SUPPLY_PROP_CHARGE_NOW;
193 break;
194 case GB_POWER_SUPPLY_PROP_CHARGE_AVG:
195 prop = POWER_SUPPLY_PROP_CHARGE_AVG;
196 break;
197 case GB_POWER_SUPPLY_PROP_CHARGE_COUNTER:
198 prop = POWER_SUPPLY_PROP_CHARGE_COUNTER;
199 break;
200 case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
201 prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT;
202 break;
203 case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
204 prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX;
205 break;
206 case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
207 prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE;
208 break;
209 case GB_POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX:
210 prop = POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX;
211 break;
212 case GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT:
213 prop = POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT;
214 break;
215 case GB_POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX:
216 prop = POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX;
217 break;
218 case GB_POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
219 prop = POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT;
220 break;
221 case GB_POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
222 prop = POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN;
223 break;
224 case GB_POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN:
225 prop = POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN;
226 break;
227 case GB_POWER_SUPPLY_PROP_ENERGY_FULL:
228 prop = POWER_SUPPLY_PROP_ENERGY_FULL;
229 break;
230 case GB_POWER_SUPPLY_PROP_ENERGY_EMPTY:
231 prop = POWER_SUPPLY_PROP_ENERGY_EMPTY;
232 break;
233 case GB_POWER_SUPPLY_PROP_ENERGY_NOW:
234 prop = POWER_SUPPLY_PROP_ENERGY_NOW;
235 break;
236 case GB_POWER_SUPPLY_PROP_ENERGY_AVG:
237 prop = POWER_SUPPLY_PROP_ENERGY_AVG;
238 break;
239 case GB_POWER_SUPPLY_PROP_CAPACITY:
240 prop = POWER_SUPPLY_PROP_CAPACITY;
241 break;
242 case GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN:
243 prop = POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN;
244 break;
245 case GB_POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX:
246 prop = POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX;
247 break;
248 case GB_POWER_SUPPLY_PROP_CAPACITY_LEVEL:
249 prop = POWER_SUPPLY_PROP_CAPACITY_LEVEL;
250 break;
251 case GB_POWER_SUPPLY_PROP_TEMP:
252 prop = POWER_SUPPLY_PROP_TEMP;
253 break;
254 case GB_POWER_SUPPLY_PROP_TEMP_MAX:
255 prop = POWER_SUPPLY_PROP_TEMP_MAX;
256 break;
257 case GB_POWER_SUPPLY_PROP_TEMP_MIN:
258 prop = POWER_SUPPLY_PROP_TEMP_MIN;
259 break;
260 case GB_POWER_SUPPLY_PROP_TEMP_ALERT_MIN:
261 prop = POWER_SUPPLY_PROP_TEMP_ALERT_MIN;
262 break;
263 case GB_POWER_SUPPLY_PROP_TEMP_ALERT_MAX:
264 prop = POWER_SUPPLY_PROP_TEMP_ALERT_MAX;
265 break;
266 case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT:
267 prop = POWER_SUPPLY_PROP_TEMP_AMBIENT;
268 break;
269 case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN:
270 prop = POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MIN;
271 break;
272 case GB_POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX:
273 prop = POWER_SUPPLY_PROP_TEMP_AMBIENT_ALERT_MAX;
274 break;
275 case GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
276 prop = POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW;
277 break;
278 case GB_POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
279 prop = POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG;
280 break;
281 case GB_POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
282 prop = POWER_SUPPLY_PROP_TIME_TO_FULL_NOW;
283 break;
284 case GB_POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
285 prop = POWER_SUPPLY_PROP_TIME_TO_FULL_AVG;
286 break;
287 case GB_POWER_SUPPLY_PROP_TYPE:
288 prop = POWER_SUPPLY_PROP_TYPE;
289 break;
290 case GB_POWER_SUPPLY_PROP_SCOPE:
291 prop = POWER_SUPPLY_PROP_SCOPE;
292 break;
293 case GB_POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
294 prop = POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT;
295 break;
296 case GB_POWER_SUPPLY_PROP_CALIBRATE:
297 prop = POWER_SUPPLY_PROP_CALIBRATE;
298 break;
299 default:
300 prop = -1;
301 break;
302 }
303
304 if (prop < 0)
305 return prop;
306
307 *psp = (enum power_supply_property)prop;
308
309 return 0;
310}
311
312static struct gb_connection *get_conn_from_psy(struct gb_power_supply *gbpsy)
313{
314 return gbpsy->supplies->connection;
315}
316
317static struct gb_power_supply_prop *get_psy_prop(struct gb_power_supply *gbpsy,
318 enum power_supply_property psp)
319{
320 int i;
321
322 for (i = 0; i < gbpsy->properties_count; i++)
323 if (gbpsy->props[i].prop == psp)
324 return &gbpsy->props[i];
325 return NULL;
326}
327
328static int is_psy_prop_writeable(struct gb_power_supply *gbpsy,
329 enum power_supply_property psp)
330{
331 struct gb_power_supply_prop *prop;
332
333 prop = get_psy_prop(gbpsy, psp);
334 if (!prop)
335 return -ENOENT;
336 return prop->is_writeable ? 1 : 0;
337}
338
339static int is_prop_valint(enum power_supply_property psp)
340{
341 return ((psp < POWER_SUPPLY_PROP_MODEL_NAME) ? 1 : 0);
342}
343
344static void next_interval(struct gb_power_supply *gbpsy)
345{
346 if (gbpsy->update_interval == update_interval_max)
347 return;
348
349 /* do some exponential back-off in the update interval */
350 gbpsy->update_interval *= 2;
351 if (gbpsy->update_interval > update_interval_max)
352 gbpsy->update_interval = update_interval_max;
353}
354
355static void __gb_power_supply_changed(struct gb_power_supply *gbpsy)
356{
357 power_supply_changed(gbpsy->psy);
358}
359
360static void gb_power_supply_state_change(struct gb_power_supply *gbpsy,
361 struct gb_power_supply_prop *prop)
362{
363 struct gb_connection *connection = get_conn_from_psy(gbpsy);
364 int ret;
365
366 /*
367 * Check gbpsy->pm_acquired to make sure only one pair of 'get_sync'
368 * and 'put_autosuspend' runtime pm call for state property change.
369 */
370 mutex_lock(&gbpsy->supply_lock);
371
372 if ((prop->val == GB_POWER_SUPPLY_STATUS_CHARGING) &&
373 !gbpsy->pm_acquired) {
374 ret = gb_pm_runtime_get_sync(connection->bundle);
375 if (ret)
376 dev_err(&connection->bundle->dev,
377 "Fail to set wake lock for charging state\n");
378 else
379 gbpsy->pm_acquired = true;
380 } else {
381 if (gbpsy->pm_acquired) {
382 ret = gb_pm_runtime_put_autosuspend(connection->bundle);
383 if (ret)
384 dev_err(&connection->bundle->dev,
385 "Fail to set wake unlock for none charging\n");
386 else
387 gbpsy->pm_acquired = false;
388 }
389 }
390
391 mutex_unlock(&gbpsy->supply_lock);
392}
393
394static void check_changed(struct gb_power_supply *gbpsy,
395 struct gb_power_supply_prop *prop)
396{
397 const struct gb_power_supply_changes *psyc;
398 int val = prop->val;
399 int prev_val = prop->previous_val;
400 bool changed = false;
401 int i;
402
403 for (i = 0; i < ARRAY_SIZE(psy_props_changes); i++) {
404 psyc = &psy_props_changes[i];
405 if (prop->prop == psyc->prop) {
406 if (!psyc->tolerance_change)
407 changed = true;
408 else if (val < prev_val &&
409 prev_val - val > psyc->tolerance_change)
410 changed = true;
411 else if (val > prev_val &&
412 val - prev_val > psyc->tolerance_change)
413 changed = true;
414
415 if (changed && psyc->prop_changed)
416 psyc->prop_changed(gbpsy, prop);
417
418 if (changed)
419 gbpsy->changed = true;
420 break;
421 }
422 }
423}
424
425static int total_props(struct gb_power_supply *gbpsy)
426{
427 /* this return the intval plus the strval properties */
428 return (gbpsy->properties_count + gbpsy->properties_count_str);
429}
430
431static void prop_append(struct gb_power_supply *gbpsy,
432 enum power_supply_property prop)
433{
434 enum power_supply_property *new_props_raw;
435
436 gbpsy->properties_count_str++;
437 new_props_raw = krealloc(gbpsy->props_raw, total_props(gbpsy) *
438 sizeof(enum power_supply_property),
439 GFP_KERNEL);
440 if (!new_props_raw)
441 return;
442 gbpsy->props_raw = new_props_raw;
443 gbpsy->props_raw[total_props(gbpsy) - 1] = prop;
444}
445
446static int __gb_power_supply_set_name(char *init_name, char *name, size_t len)
447{
448 unsigned int i = 0;
449 int ret = 0;
450 struct power_supply *psy;
451
452 if (!strlen(init_name))
453 init_name = "gb_power_supply";
454 strlcpy(name, init_name, len);
455
456 while ((ret < len) && (psy = power_supply_get_by_name(name))) {
457 power_supply_put(psy);
458
459 ret = snprintf(name, len, "%s_%u", init_name, ++i);
460 }
461 if (ret >= len)
462 return -ENOMEM;
463 return i;
464}
465
466static void _gb_power_supply_append_props(struct gb_power_supply *gbpsy)
467{
468 if (strlen(gbpsy->manufacturer))
469 prop_append(gbpsy, POWER_SUPPLY_PROP_MANUFACTURER);
470 if (strlen(gbpsy->model_name))
471 prop_append(gbpsy, POWER_SUPPLY_PROP_MODEL_NAME);
472 if (strlen(gbpsy->serial_number))
473 prop_append(gbpsy, POWER_SUPPLY_PROP_SERIAL_NUMBER);
474}
475
476static int gb_power_supply_description_get(struct gb_power_supply *gbpsy)
477{
478 struct gb_connection *connection = get_conn_from_psy(gbpsy);
479 struct gb_power_supply_get_description_request req;
480 struct gb_power_supply_get_description_response resp;
481 int ret;
482
483 req.psy_id = gbpsy->id;
484
485 ret = gb_operation_sync(connection,
486 GB_POWER_SUPPLY_TYPE_GET_DESCRIPTION,
487 &req, sizeof(req), &resp, sizeof(resp));
488 if (ret < 0)
489 return ret;
490
491 gbpsy->manufacturer = kstrndup(resp.manufacturer, PROP_MAX, GFP_KERNEL);
492 if (!gbpsy->manufacturer)
493 return -ENOMEM;
494 gbpsy->model_name = kstrndup(resp.model, PROP_MAX, GFP_KERNEL);
495 if (!gbpsy->model_name)
496 return -ENOMEM;
497 gbpsy->serial_number = kstrndup(resp.serial_number, PROP_MAX,
498 GFP_KERNEL);
499 if (!gbpsy->serial_number)
500 return -ENOMEM;
501
502 gbpsy->type = le16_to_cpu(resp.type);
503 gbpsy->properties_count = resp.properties_count;
504
505 return 0;
506}
507
508static int gb_power_supply_prop_descriptors_get(struct gb_power_supply *gbpsy)
509{
510 struct gb_connection *connection = get_conn_from_psy(gbpsy);
511 struct gb_power_supply_get_property_descriptors_request *req;
512 struct gb_power_supply_get_property_descriptors_response *resp;
513 struct gb_operation *op;
514 u8 props_count = gbpsy->properties_count;
515 enum power_supply_property psp;
516 int ret;
517 int i, r = 0;
518
519 if (props_count == 0)
520 return 0;
521
522 op = gb_operation_create(connection,
523 GB_POWER_SUPPLY_TYPE_GET_PROP_DESCRIPTORS,
524 sizeof(req), sizeof(*resp) + props_count *
525 sizeof(struct gb_power_supply_props_desc),
526 GFP_KERNEL);
527 if (!op)
528 return -ENOMEM;
529
530 req = op->request->payload;
531 req->psy_id = gbpsy->id;
532
533 ret = gb_operation_request_send_sync(op);
534 if (ret < 0)
535 goto out_put_operation;
536
537 resp = op->response->payload;
538
539 /* validate received properties */
540 for (i = 0; i < props_count; i++) {
541 ret = get_psp_from_gb_prop(resp->props[i].property, &psp);
542 if (ret < 0) {
543 dev_warn(&connection->bundle->dev,
544 "greybus property %u it is not supported by this kernel, dropped\n",
545 resp->props[i].property);
546 gbpsy->properties_count--;
547 }
548 }
549
550 gbpsy->props = kcalloc(gbpsy->properties_count, sizeof(*gbpsy->props),
551 GFP_KERNEL);
552 if (!gbpsy->props) {
553 ret = -ENOMEM;
554 goto out_put_operation;
555 }
556
557 gbpsy->props_raw = kcalloc(gbpsy->properties_count,
558 sizeof(*gbpsy->props_raw), GFP_KERNEL);
559 if (!gbpsy->props_raw) {
560 ret = -ENOMEM;
561 goto out_put_operation;
562 }
563
564 /* Store available properties, skip the ones we do not support */
565 for (i = 0; i < props_count; i++) {
566 ret = get_psp_from_gb_prop(resp->props[i].property, &psp);
567 if (ret < 0) {
568 r++;
569 continue;
570 }
571 gbpsy->props[i - r].prop = psp;
572 gbpsy->props[i - r].gb_prop = resp->props[i].property;
573 gbpsy->props_raw[i - r] = psp;
574 if (resp->props[i].is_writeable)
575 gbpsy->props[i - r].is_writeable = true;
576 }
577
578 /*
579 * now append the properties that we already got information in the
580 * get_description operation. (char * ones)
581 */
582 _gb_power_supply_append_props(gbpsy);
583
584 ret = 0;
585out_put_operation:
586 gb_operation_put(op);
587
588 return ret;
589}
590
591static int __gb_power_supply_property_update(struct gb_power_supply *gbpsy,
592 enum power_supply_property psp)
593{
594 struct gb_connection *connection = get_conn_from_psy(gbpsy);
595 struct gb_power_supply_prop *prop;
596 struct gb_power_supply_get_property_request req;
597 struct gb_power_supply_get_property_response resp;
598 int val;
599 int ret;
600
601 prop = get_psy_prop(gbpsy, psp);
602 if (!prop)
603 return -EINVAL;
604 req.psy_id = gbpsy->id;
605 req.property = prop->gb_prop;
606
607 ret = gb_operation_sync(connection, GB_POWER_SUPPLY_TYPE_GET_PROPERTY,
608 &req, sizeof(req), &resp, sizeof(resp));
609 if (ret < 0)
610 return ret;
611
612 val = le32_to_cpu(resp.prop_val);
613 if (val == prop->val)
614 return 0;
615
616 prop->previous_val = prop->val;
617 prop->val = val;
618
619 check_changed(gbpsy, prop);
620
621 return 0;
622}
623
624static int __gb_power_supply_property_get(struct gb_power_supply *gbpsy,
625 enum power_supply_property psp,
626 union power_supply_propval *val)
627{
628 struct gb_power_supply_prop *prop;
629
630 prop = get_psy_prop(gbpsy, psp);
631 if (!prop)
632 return -EINVAL;
633
634 val->intval = prop->val;
635 return 0;
636}
637
638static int __gb_power_supply_property_strval_get(struct gb_power_supply *gbpsy,
639 enum power_supply_property psp,
640 union power_supply_propval *val)
641{
642 switch (psp) {
643 case POWER_SUPPLY_PROP_MODEL_NAME:
644 val->strval = gbpsy->model_name;
645 break;
646 case POWER_SUPPLY_PROP_MANUFACTURER:
647 val->strval = gbpsy->manufacturer;
648 break;
649 case POWER_SUPPLY_PROP_SERIAL_NUMBER:
650 val->strval = gbpsy->serial_number;
651 break;
652 default:
653 break;
654 }
655
656 return 0;
657}
658
659static int _gb_power_supply_property_get(struct gb_power_supply *gbpsy,
660 enum power_supply_property psp,
661 union power_supply_propval *val)
662{
663 struct gb_connection *connection = get_conn_from_psy(gbpsy);
664 int ret;
665
666 /*
667 * Properties of type const char *, were already fetched on
668 * get_description operation and should be cached in gb
669 */
670 if (is_prop_valint(psp))
671 ret = __gb_power_supply_property_get(gbpsy, psp, val);
672 else
673 ret = __gb_power_supply_property_strval_get(gbpsy, psp, val);
674
675 if (ret < 0)
676 dev_err(&connection->bundle->dev, "get property %u\n", psp);
677
678 return 0;
679}
680
681static int is_cache_valid(struct gb_power_supply *gbpsy)
682{
683 /* check if cache is good enough or it has expired */
684 if (gbpsy->cache_invalid) {
685 gbpsy->cache_invalid = 0;
686 return 0;
687 }
688
689 if (gbpsy->last_update &&
690 time_is_after_jiffies(gbpsy->last_update +
691 msecs_to_jiffies(cache_time)))
692 return 1;
693
694 return 0;
695}
696
697static int gb_power_supply_status_get(struct gb_power_supply *gbpsy)
698{
699 struct gb_connection *connection = get_conn_from_psy(gbpsy);
700 int ret = 0;
701 int i;
702
703 if (is_cache_valid(gbpsy))
704 return 0;
705
706 ret = gb_pm_runtime_get_sync(connection->bundle);
707 if (ret)
708 return ret;
709
710 for (i = 0; i < gbpsy->properties_count; i++) {
711 ret = __gb_power_supply_property_update(gbpsy,
712 gbpsy->props[i].prop);
713 if (ret < 0)
714 break;
715 }
716
717 if (ret == 0)
718 gbpsy->last_update = jiffies;
719
720 gb_pm_runtime_put_autosuspend(connection->bundle);
721 return ret;
722}
723
724static void gb_power_supply_status_update(struct gb_power_supply *gbpsy)
725{
726 /* check if there a change that need to be reported */
727 gb_power_supply_status_get(gbpsy);
728
729 if (!gbpsy->changed)
730 return;
731
732 gbpsy->update_interval = update_interval_init;
733 __gb_power_supply_changed(gbpsy);
734 gbpsy->changed = false;
735}
736
737static void gb_power_supply_work(struct work_struct *work)
738{
739 struct gb_power_supply *gbpsy = container_of(work,
740 struct gb_power_supply,
741 work.work);
742
743 /*
744 * if the poll interval is not set, disable polling, this is helpful
745 * specially at unregister time.
746 */
747 if (!gbpsy->update_interval)
748 return;
749
750 gb_power_supply_status_update(gbpsy);
751 next_interval(gbpsy);
752 schedule_delayed_work(&gbpsy->work, gbpsy->update_interval);
753}
754
755static int get_property(struct power_supply *b,
756 enum power_supply_property psp,
757 union power_supply_propval *val)
758{
759 struct gb_power_supply *gbpsy = to_gb_power_supply(b);
760
761 gb_power_supply_status_get(gbpsy);
762
763 return _gb_power_supply_property_get(gbpsy, psp, val);
764}
765
766static int gb_power_supply_property_set(struct gb_power_supply *gbpsy,
767 enum power_supply_property psp,
768 int val)
769{
770 struct gb_connection *connection = get_conn_from_psy(gbpsy);
771 struct gb_power_supply_prop *prop;
772 struct gb_power_supply_set_property_request req;
773 int ret;
774
775 ret = gb_pm_runtime_get_sync(connection->bundle);
776 if (ret)
777 return ret;
778
779 prop = get_psy_prop(gbpsy, psp);
780 if (!prop) {
781 ret = -EINVAL;
782 goto out;
783 }
784
785 req.psy_id = gbpsy->id;
786 req.property = prop->gb_prop;
787 req.prop_val = cpu_to_le32((s32)val);
788
789 ret = gb_operation_sync(connection, GB_POWER_SUPPLY_TYPE_SET_PROPERTY,
790 &req, sizeof(req), NULL, 0);
791 if (ret < 0)
792 goto out;
793
794 /* cache immediately the new value */
795 prop->val = val;
796
797out:
798 gb_pm_runtime_put_autosuspend(connection->bundle);
799 return ret;
800}
801
802static int set_property(struct power_supply *b,
803 enum power_supply_property psp,
804 const union power_supply_propval *val)
805{
806 struct gb_power_supply *gbpsy = to_gb_power_supply(b);
807
808 return gb_power_supply_property_set(gbpsy, psp, val->intval);
809}
810
811static int property_is_writeable(struct power_supply *b,
812 enum power_supply_property psp)
813{
814 struct gb_power_supply *gbpsy = to_gb_power_supply(b);
815
816 return is_psy_prop_writeable(gbpsy, psp);
817}
818
819static int gb_power_supply_register(struct gb_power_supply *gbpsy)
820{
821 struct gb_connection *connection = get_conn_from_psy(gbpsy);
822 struct power_supply_config cfg = {};
823
824 cfg.drv_data = gbpsy;
825
826 gbpsy->desc.name = gbpsy->name;
827 gbpsy->desc.type = gbpsy->type;
828 gbpsy->desc.properties = gbpsy->props_raw;
829 gbpsy->desc.num_properties = total_props(gbpsy);
830 gbpsy->desc.get_property = get_property;
831 gbpsy->desc.set_property = set_property;
832 gbpsy->desc.property_is_writeable = property_is_writeable;
833
834 gbpsy->psy = power_supply_register(&connection->bundle->dev,
835 &gbpsy->desc, &cfg);
836 return PTR_ERR_OR_ZERO(gbpsy->psy);
837}
838
839static void _gb_power_supply_free(struct gb_power_supply *gbpsy)
840{
841 kfree(gbpsy->serial_number);
842 kfree(gbpsy->model_name);
843 kfree(gbpsy->manufacturer);
844 kfree(gbpsy->props_raw);
845 kfree(gbpsy->props);
846}
847
848static void _gb_power_supply_release(struct gb_power_supply *gbpsy)
849{
850 gbpsy->update_interval = 0;
851
852 cancel_delayed_work_sync(&gbpsy->work);
853
854 if (gbpsy->registered)
855 power_supply_unregister(gbpsy->psy);
856
857 _gb_power_supply_free(gbpsy);
858}
859
860static void _gb_power_supplies_release(struct gb_power_supplies *supplies)
861{
862 int i;
863
864 if (!supplies->supply)
865 return;
866
867 mutex_lock(&supplies->supplies_lock);
868 for (i = 0; i < supplies->supplies_count; i++)
869 _gb_power_supply_release(&supplies->supply[i]);
870 kfree(supplies->supply);
871 mutex_unlock(&supplies->supplies_lock);
872 kfree(supplies);
873}
874
875static int gb_power_supplies_get_count(struct gb_power_supplies *supplies)
876{
877 struct gb_power_supply_get_supplies_response resp;
878 int ret;
879
880 ret = gb_operation_sync(supplies->connection,
881 GB_POWER_SUPPLY_TYPE_GET_SUPPLIES,
882 NULL, 0, &resp, sizeof(resp));
883 if (ret < 0)
884 return ret;
885
886 if (!resp.supplies_count)
887 return -EINVAL;
888
889 supplies->supplies_count = resp.supplies_count;
890
891 return ret;
892}
893
894static int gb_power_supply_config(struct gb_power_supplies *supplies, int id)
895{
896 struct gb_power_supply *gbpsy = &supplies->supply[id];
897 int ret;
898
899 gbpsy->supplies = supplies;
900 gbpsy->id = id;
901
902 ret = gb_power_supply_description_get(gbpsy);
903 if (ret < 0)
904 return ret;
905
906 return gb_power_supply_prop_descriptors_get(gbpsy);
907}
908
909static int gb_power_supply_enable(struct gb_power_supply *gbpsy)
910{
911 int ret;
912
913 /* guarantee that we have an unique name, before register */
914 ret = __gb_power_supply_set_name(gbpsy->model_name, gbpsy->name,
915 sizeof(gbpsy->name));
916 if (ret < 0)
917 return ret;
918
919 mutex_init(&gbpsy->supply_lock);
920
921 ret = gb_power_supply_register(gbpsy);
922 if (ret < 0)
923 return ret;
924
925 gbpsy->update_interval = update_interval_init;
926 INIT_DELAYED_WORK(&gbpsy->work, gb_power_supply_work);
927 schedule_delayed_work(&gbpsy->work, 0);
928
929 /* everything went fine, mark it for release code to know */
930 gbpsy->registered = true;
931
932 return 0;
933}
934
935static int gb_power_supplies_setup(struct gb_power_supplies *supplies)
936{
937 struct gb_connection *connection = supplies->connection;
938 int ret;
939 int i;
940
941 mutex_lock(&supplies->supplies_lock);
942
943 ret = gb_power_supplies_get_count(supplies);
944 if (ret < 0)
945 goto out;
946
947 supplies->supply = kzalloc(supplies->supplies_count *
948 sizeof(struct gb_power_supply),
949 GFP_KERNEL);
950
951 if (!supplies->supply) {
952 ret = -ENOMEM;
953 goto out;
954 }
955
956 for (i = 0; i < supplies->supplies_count; i++) {
957 ret = gb_power_supply_config(supplies, i);
958 if (ret < 0) {
959 dev_err(&connection->bundle->dev,
960 "Fail to configure supplies devices\n");
961 goto out;
962 }
963 }
964out:
965 mutex_unlock(&supplies->supplies_lock);
966 return ret;
967}
968
969static int gb_power_supplies_register(struct gb_power_supplies *supplies)
970{
971 struct gb_connection *connection = supplies->connection;
972 int ret = 0;
973 int i;
974
975 mutex_lock(&supplies->supplies_lock);
976
977 for (i = 0; i < supplies->supplies_count; i++) {
978 ret = gb_power_supply_enable(&supplies->supply[i]);
979 if (ret < 0) {
980 dev_err(&connection->bundle->dev,
981 "Fail to enable supplies devices\n");
982 break;
983 }
984 }
985
986 mutex_unlock(&supplies->supplies_lock);
987 return ret;
988}
989
990static int gb_supplies_request_handler(struct gb_operation *op)
991{
992 struct gb_connection *connection = op->connection;
993 struct gb_power_supplies *supplies = gb_connection_get_data(connection);
994 struct gb_power_supply *gbpsy;
995 struct gb_message *request;
996 struct gb_power_supply_event_request *payload;
997 u8 psy_id;
998 u8 event;
999 int ret = 0;
1000
1001 if (op->type != GB_POWER_SUPPLY_TYPE_EVENT) {
1002 dev_err(&connection->bundle->dev,
1003 "Unsupported unsolicited event: %u\n", op->type);
1004 return -EINVAL;
1005 }
1006
1007 request = op->request;
1008
1009 if (request->payload_size < sizeof(*payload)) {
1010 dev_err(&connection->bundle->dev,
1011 "Wrong event size received (%zu < %zu)\n",
1012 request->payload_size, sizeof(*payload));
1013 return -EINVAL;
1014 }
1015
1016 payload = request->payload;
1017 psy_id = payload->psy_id;
1018 mutex_lock(&supplies->supplies_lock);
1019 if (psy_id >= supplies->supplies_count ||
1020 !supplies->supply[psy_id].registered) {
1021 dev_err(&connection->bundle->dev,
1022 "Event received for unconfigured power_supply id: %d\n",
1023 psy_id);
1024 ret = -EINVAL;
1025 goto out_unlock;
1026 }
1027
1028 event = payload->event;
1029 /*
1030 * we will only handle events after setup is done and before release is
1031 * running. For that just check update_interval.
1032 */
1033 gbpsy = &supplies->supply[psy_id];
1034 if (!gbpsy->update_interval) {
1035 ret = -ESHUTDOWN;
1036 goto out_unlock;
1037 }
1038
1039 if (event & GB_POWER_SUPPLY_UPDATE) {
1040 /*
1041 * we need to make sure we invalidate cache, if not no new
1042 * values for the properties will be fetch and the all propose
1043 * of this event is missed
1044 */
1045 gbpsy->cache_invalid = 1;
1046 gb_power_supply_status_update(gbpsy);
1047 }
1048
1049out_unlock:
1050 mutex_unlock(&supplies->supplies_lock);
1051 return ret;
1052}
1053
1054static int gb_power_supply_probe(struct gb_bundle *bundle,
1055 const struct greybus_bundle_id *id)
1056{
1057 struct greybus_descriptor_cport *cport_desc;
1058 struct gb_connection *connection;
1059 struct gb_power_supplies *supplies;
1060 int ret;
1061
1062 if (bundle->num_cports != 1)
1063 return -ENODEV;
1064
1065 cport_desc = &bundle->cport_desc[0];
1066 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_POWER_SUPPLY)
1067 return -ENODEV;
1068
1069 supplies = kzalloc(sizeof(*supplies), GFP_KERNEL);
1070 if (!supplies)
1071 return -ENOMEM;
1072
1073 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
1074 gb_supplies_request_handler);
1075 if (IS_ERR(connection)) {
1076 ret = PTR_ERR(connection);
1077 goto out;
1078 }
1079
1080 supplies->connection = connection;
1081 gb_connection_set_data(connection, supplies);
1082
1083 mutex_init(&supplies->supplies_lock);
1084
1085 greybus_set_drvdata(bundle, supplies);
1086
1087 /* We aren't ready to receive an incoming request yet */
1088 ret = gb_connection_enable_tx(connection);
1089 if (ret)
1090 goto error_connection_destroy;
1091
1092 ret = gb_power_supplies_setup(supplies);
1093 if (ret < 0)
1094 goto error_connection_disable;
1095
1096 /* We are ready to receive an incoming request now, enable RX as well */
1097 ret = gb_connection_enable(connection);
1098 if (ret)
1099 goto error_connection_disable;
1100
1101 ret = gb_power_supplies_register(supplies);
1102 if (ret < 0)
1103 goto error_connection_disable;
1104
1105 gb_pm_runtime_put_autosuspend(bundle);
1106 return 0;
1107
1108error_connection_disable:
1109 gb_connection_disable(connection);
1110error_connection_destroy:
1111 gb_connection_destroy(connection);
1112out:
1113 _gb_power_supplies_release(supplies);
1114 return ret;
1115}
1116
1117static void gb_power_supply_disconnect(struct gb_bundle *bundle)
1118{
1119 struct gb_power_supplies *supplies = greybus_get_drvdata(bundle);
1120
1121 gb_connection_disable(supplies->connection);
1122 gb_connection_destroy(supplies->connection);
1123
1124 _gb_power_supplies_release(supplies);
1125}
1126
1127static const struct greybus_bundle_id gb_power_supply_id_table[] = {
1128 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_POWER_SUPPLY) },
1129 { }
1130};
1131MODULE_DEVICE_TABLE(greybus, gb_power_supply_id_table);
1132
1133static struct greybus_driver gb_power_supply_driver = {
1134 .name = "power_supply",
1135 .probe = gb_power_supply_probe,
1136 .disconnect = gb_power_supply_disconnect,
1137 .id_table = gb_power_supply_id_table,
1138};
1139module_greybus_driver(gb_power_supply_driver);
1140
1141MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/pwm.c b/drivers/staging/greybus/pwm.c
new file mode 100644
index 000000000000..c4bf3298ba07
--- /dev/null
+++ b/drivers/staging/greybus/pwm.c
@@ -0,0 +1,338 @@
1/*
2 * PWM Greybus driver.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/pwm.h>
14
15#include "greybus.h"
16#include "gbphy.h"
17
18struct gb_pwm_chip {
19 struct gb_connection *connection;
20 u8 pwm_max; /* max pwm number */
21
22 struct pwm_chip chip;
23 struct pwm_chip *pwm;
24};
25#define pwm_chip_to_gb_pwm_chip(chip) \
26 container_of(chip, struct gb_pwm_chip, chip)
27
28
29static int gb_pwm_count_operation(struct gb_pwm_chip *pwmc)
30{
31 struct gb_pwm_count_response response;
32 int ret;
33
34 ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_PWM_COUNT,
35 NULL, 0, &response, sizeof(response));
36 if (ret)
37 return ret;
38 pwmc->pwm_max = response.count;
39 return 0;
40}
41
42static int gb_pwm_activate_operation(struct gb_pwm_chip *pwmc,
43 u8 which)
44{
45 struct gb_pwm_activate_request request;
46 struct gbphy_device *gbphy_dev;
47 int ret;
48
49 if (which > pwmc->pwm_max)
50 return -EINVAL;
51
52 request.which = which;
53
54 gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
55 ret = gbphy_runtime_get_sync(gbphy_dev);
56 if (ret)
57 return ret;
58
59 ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ACTIVATE,
60 &request, sizeof(request), NULL, 0);
61
62 gbphy_runtime_put_autosuspend(gbphy_dev);
63
64 return ret;
65}
66
67static int gb_pwm_deactivate_operation(struct gb_pwm_chip *pwmc,
68 u8 which)
69{
70 struct gb_pwm_deactivate_request request;
71 struct gbphy_device *gbphy_dev;
72 int ret;
73
74 if (which > pwmc->pwm_max)
75 return -EINVAL;
76
77 request.which = which;
78
79 gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
80 ret = gbphy_runtime_get_sync(gbphy_dev);
81 if (ret)
82 return ret;
83
84 ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DEACTIVATE,
85 &request, sizeof(request), NULL, 0);
86
87 gbphy_runtime_put_autosuspend(gbphy_dev);
88
89 return ret;
90}
91
92static int gb_pwm_config_operation(struct gb_pwm_chip *pwmc,
93 u8 which, u32 duty, u32 period)
94{
95 struct gb_pwm_config_request request;
96 struct gbphy_device *gbphy_dev;
97 int ret;
98
99 if (which > pwmc->pwm_max)
100 return -EINVAL;
101
102 request.which = which;
103 request.duty = cpu_to_le32(duty);
104 request.period = cpu_to_le32(period);
105
106 gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
107 ret = gbphy_runtime_get_sync(gbphy_dev);
108 if (ret)
109 return ret;
110
111 ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_CONFIG,
112 &request, sizeof(request), NULL, 0);
113
114 gbphy_runtime_put_autosuspend(gbphy_dev);
115
116 return ret;
117}
118
119static int gb_pwm_set_polarity_operation(struct gb_pwm_chip *pwmc,
120 u8 which, u8 polarity)
121{
122 struct gb_pwm_polarity_request request;
123 struct gbphy_device *gbphy_dev;
124 int ret;
125
126 if (which > pwmc->pwm_max)
127 return -EINVAL;
128
129 request.which = which;
130 request.polarity = polarity;
131
132 gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
133 ret = gbphy_runtime_get_sync(gbphy_dev);
134 if (ret)
135 return ret;
136
137 ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_POLARITY,
138 &request, sizeof(request), NULL, 0);
139
140 gbphy_runtime_put_autosuspend(gbphy_dev);
141
142 return ret;
143}
144
145static int gb_pwm_enable_operation(struct gb_pwm_chip *pwmc,
146 u8 which)
147{
148 struct gb_pwm_enable_request request;
149 struct gbphy_device *gbphy_dev;
150 int ret;
151
152 if (which > pwmc->pwm_max)
153 return -EINVAL;
154
155 request.which = which;
156
157 gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
158 ret = gbphy_runtime_get_sync(gbphy_dev);
159 if (ret)
160 return ret;
161
162 ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_ENABLE,
163 &request, sizeof(request), NULL, 0);
164 if (ret)
165 gbphy_runtime_put_autosuspend(gbphy_dev);
166
167 return ret;
168}
169
170static int gb_pwm_disable_operation(struct gb_pwm_chip *pwmc,
171 u8 which)
172{
173 struct gb_pwm_disable_request request;
174 struct gbphy_device *gbphy_dev;
175 int ret;
176
177 if (which > pwmc->pwm_max)
178 return -EINVAL;
179
180 request.which = which;
181
182 ret = gb_operation_sync(pwmc->connection, GB_PWM_TYPE_DISABLE,
183 &request, sizeof(request), NULL, 0);
184
185 gbphy_dev = to_gbphy_dev(pwmc->chip.dev);
186 gbphy_runtime_put_autosuspend(gbphy_dev);
187
188 return ret;
189}
190
191static int gb_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
192{
193 struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
194
195 return gb_pwm_activate_operation(pwmc, pwm->hwpwm);
196};
197
198static void gb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
199{
200 struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
201
202 if (pwm_is_enabled(pwm))
203 dev_warn(chip->dev, "freeing PWM device without disabling\n");
204
205 gb_pwm_deactivate_operation(pwmc, pwm->hwpwm);
206}
207
208static int gb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
209 int duty_ns, int period_ns)
210{
211 struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
212
213 return gb_pwm_config_operation(pwmc, pwm->hwpwm, duty_ns, period_ns);
214};
215
216static int gb_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
217 enum pwm_polarity polarity)
218{
219 struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
220
221 return gb_pwm_set_polarity_operation(pwmc, pwm->hwpwm, polarity);
222};
223
224static int gb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
225{
226 struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
227
228 return gb_pwm_enable_operation(pwmc, pwm->hwpwm);
229};
230
231static void gb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
232{
233 struct gb_pwm_chip *pwmc = pwm_chip_to_gb_pwm_chip(chip);
234
235 gb_pwm_disable_operation(pwmc, pwm->hwpwm);
236};
237
238static const struct pwm_ops gb_pwm_ops = {
239 .request = gb_pwm_request,
240 .free = gb_pwm_free,
241 .config = gb_pwm_config,
242 .set_polarity = gb_pwm_set_polarity,
243 .enable = gb_pwm_enable,
244 .disable = gb_pwm_disable,
245 .owner = THIS_MODULE,
246};
247
248static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
249 const struct gbphy_device_id *id)
250{
251 struct gb_connection *connection;
252 struct gb_pwm_chip *pwmc;
253 struct pwm_chip *pwm;
254 int ret;
255
256 pwmc = kzalloc(sizeof(*pwmc), GFP_KERNEL);
257 if (!pwmc)
258 return -ENOMEM;
259
260 connection = gb_connection_create(gbphy_dev->bundle,
261 le16_to_cpu(gbphy_dev->cport_desc->id),
262 NULL);
263 if (IS_ERR(connection)) {
264 ret = PTR_ERR(connection);
265 goto exit_pwmc_free;
266 }
267
268 pwmc->connection = connection;
269 gb_connection_set_data(connection, pwmc);
270 gb_gbphy_set_data(gbphy_dev, pwmc);
271
272 ret = gb_connection_enable(connection);
273 if (ret)
274 goto exit_connection_destroy;
275
276 /* Query number of pwms present */
277 ret = gb_pwm_count_operation(pwmc);
278 if (ret)
279 goto exit_connection_disable;
280
281 pwm = &pwmc->chip;
282
283 pwm->dev = &gbphy_dev->dev;
284 pwm->ops = &gb_pwm_ops;
285 pwm->base = -1; /* Allocate base dynamically */
286 pwm->npwm = pwmc->pwm_max + 1;
287 pwm->can_sleep = true; /* FIXME */
288
289 ret = pwmchip_add(pwm);
290 if (ret) {
291 dev_err(&gbphy_dev->dev,
292 "failed to register PWM: %d\n", ret);
293 goto exit_connection_disable;
294 }
295
296 gbphy_runtime_put_autosuspend(gbphy_dev);
297 return 0;
298
299exit_connection_disable:
300 gb_connection_disable(connection);
301exit_connection_destroy:
302 gb_connection_destroy(connection);
303exit_pwmc_free:
304 kfree(pwmc);
305 return ret;
306}
307
308static void gb_pwm_remove(struct gbphy_device *gbphy_dev)
309{
310 struct gb_pwm_chip *pwmc = gb_gbphy_get_data(gbphy_dev);
311 struct gb_connection *connection = pwmc->connection;
312 int ret;
313
314 ret = gbphy_runtime_get_sync(gbphy_dev);
315 if (ret)
316 gbphy_runtime_get_noresume(gbphy_dev);
317
318 pwmchip_remove(&pwmc->chip);
319 gb_connection_disable(connection);
320 gb_connection_destroy(connection);
321 kfree(pwmc);
322}
323
324static const struct gbphy_device_id gb_pwm_id_table[] = {
325 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_PWM) },
326 { },
327};
328MODULE_DEVICE_TABLE(gbphy, gb_pwm_id_table);
329
330static struct gbphy_driver pwm_driver = {
331 .name = "pwm",
332 .probe = gb_pwm_probe,
333 .remove = gb_pwm_remove,
334 .id_table = gb_pwm_id_table,
335};
336
337module_gbphy_driver(pwm_driver);
338MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
new file mode 100644
index 000000000000..729d25811568
--- /dev/null
+++ b/drivers/staging/greybus/raw.c
@@ -0,0 +1,381 @@
1/*
2 * Greybus driver for the Raw protocol
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sizes.h>
13#include <linux/cdev.h>
14#include <linux/fs.h>
15#include <linux/idr.h>
16#include <linux/uaccess.h>
17
18#include "greybus.h"
19
20struct gb_raw {
21 struct gb_connection *connection;
22
23 struct list_head list;
24 int list_data;
25 struct mutex list_lock;
26 dev_t dev;
27 struct cdev cdev;
28 struct device *device;
29};
30
31struct raw_data {
32 struct list_head entry;
33 u32 len;
34 u8 data[0];
35};
36
37static struct class *raw_class;
38static int raw_major;
39static const struct file_operations raw_fops;
40static DEFINE_IDA(minors);
41
42/* Number of minor devices this driver supports */
43#define NUM_MINORS 256
44
45/* Maximum size of any one send data buffer we support */
46#define MAX_PACKET_SIZE (PAGE_SIZE * 2)
47
48/*
49 * Maximum size of the data in the receive buffer we allow before we start to
50 * drop messages on the floor
51 */
52#define MAX_DATA_SIZE (MAX_PACKET_SIZE * 8)
53
54/*
55 * Add the raw data message to the list of received messages.
56 */
57static int receive_data(struct gb_raw *raw, u32 len, u8 *data)
58{
59 struct raw_data *raw_data;
60 struct device *dev = &raw->connection->bundle->dev;
61 int retval = 0;
62
63 if (len > MAX_PACKET_SIZE) {
64 dev_err(dev, "Too big of a data packet, rejected\n");
65 return -EINVAL;
66 }
67
68 mutex_lock(&raw->list_lock);
69 if ((raw->list_data + len) > MAX_DATA_SIZE) {
70 dev_err(dev, "Too much data in receive buffer, now dropping packets\n");
71 retval = -EINVAL;
72 goto exit;
73 }
74
75 raw_data = kmalloc(sizeof(*raw_data) + len, GFP_KERNEL);
76 if (!raw_data) {
77 retval = -ENOMEM;
78 goto exit;
79 }
80
81 raw->list_data += len;
82 raw_data->len = len;
83 memcpy(&raw_data->data[0], data, len);
84
85 list_add_tail(&raw_data->entry, &raw->list);
86exit:
87 mutex_unlock(&raw->list_lock);
88 return retval;
89}
90
91static int gb_raw_request_handler(struct gb_operation *op)
92{
93 struct gb_connection *connection = op->connection;
94 struct device *dev = &connection->bundle->dev;
95 struct gb_raw *raw = greybus_get_drvdata(connection->bundle);
96 struct gb_raw_send_request *receive;
97 u32 len;
98
99 if (op->type != GB_RAW_TYPE_SEND) {
100 dev_err(dev, "unknown request type 0x%02x\n", op->type);
101 return -EINVAL;
102 }
103
104 /* Verify size of payload */
105 if (op->request->payload_size < sizeof(*receive)) {
106 dev_err(dev, "raw receive request too small (%zu < %zu)\n",
107 op->request->payload_size, sizeof(*receive));
108 return -EINVAL;
109 }
110 receive = op->request->payload;
111 len = le32_to_cpu(receive->len);
112 if (len != (int)(op->request->payload_size - sizeof(__le32))) {
113 dev_err(dev, "raw receive request wrong size %d vs %d\n", len,
114 (int)(op->request->payload_size - sizeof(__le32)));
115 return -EINVAL;
116 }
117 if (len == 0) {
118 dev_err(dev, "raw receive request of 0 bytes?\n");
119 return -EINVAL;
120 }
121
122 return receive_data(raw, len, receive->data);
123}
124
125static int gb_raw_send(struct gb_raw *raw, u32 len, const char __user *data)
126{
127 struct gb_connection *connection = raw->connection;
128 struct gb_raw_send_request *request;
129 int retval;
130
131 request = kmalloc(len + sizeof(*request), GFP_KERNEL);
132 if (!request)
133 return -ENOMEM;
134
135 if (copy_from_user(&request->data[0], data, len)) {
136 kfree(request);
137 return -EFAULT;
138 }
139
140 request->len = cpu_to_le32(len);
141
142 retval = gb_operation_sync(connection, GB_RAW_TYPE_SEND,
143 request, len + sizeof(*request),
144 NULL, 0);
145
146 kfree(request);
147 return retval;
148}
149
150static int gb_raw_probe(struct gb_bundle *bundle,
151 const struct greybus_bundle_id *id)
152{
153 struct greybus_descriptor_cport *cport_desc;
154 struct gb_connection *connection;
155 struct gb_raw *raw;
156 int retval;
157 int minor;
158
159 if (bundle->num_cports != 1)
160 return -ENODEV;
161
162 cport_desc = &bundle->cport_desc[0];
163 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_RAW)
164 return -ENODEV;
165
166 raw = kzalloc(sizeof(*raw), GFP_KERNEL);
167 if (!raw)
168 return -ENOMEM;
169
170 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
171 gb_raw_request_handler);
172 if (IS_ERR(connection)) {
173 retval = PTR_ERR(connection);
174 goto error_free;
175 }
176
177 INIT_LIST_HEAD(&raw->list);
178 mutex_init(&raw->list_lock);
179
180 raw->connection = connection;
181 greybus_set_drvdata(bundle, raw);
182
183 minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
184 if (minor < 0) {
185 retval = minor;
186 goto error_connection_destroy;
187 }
188
189 raw->dev = MKDEV(raw_major, minor);
190 cdev_init(&raw->cdev, &raw_fops);
191
192 retval = gb_connection_enable(connection);
193 if (retval)
194 goto error_remove_ida;
195
196 retval = cdev_add(&raw->cdev, raw->dev, 1);
197 if (retval)
198 goto error_connection_disable;
199
200 raw->device = device_create(raw_class, &connection->bundle->dev,
201 raw->dev, raw, "gb!raw%d", minor);
202 if (IS_ERR(raw->device)) {
203 retval = PTR_ERR(raw->device);
204 goto error_del_cdev;
205 }
206
207 return 0;
208
209error_del_cdev:
210 cdev_del(&raw->cdev);
211
212error_connection_disable:
213 gb_connection_disable(connection);
214
215error_remove_ida:
216 ida_simple_remove(&minors, minor);
217
218error_connection_destroy:
219 gb_connection_destroy(connection);
220
221error_free:
222 kfree(raw);
223 return retval;
224}
225
226static void gb_raw_disconnect(struct gb_bundle *bundle)
227{
228 struct gb_raw *raw = greybus_get_drvdata(bundle);
229 struct gb_connection *connection = raw->connection;
230 struct raw_data *raw_data;
231 struct raw_data *temp;
232
233 // FIXME - handle removing a connection when the char device node is open.
234 device_destroy(raw_class, raw->dev);
235 cdev_del(&raw->cdev);
236 gb_connection_disable(connection);
237 ida_simple_remove(&minors, MINOR(raw->dev));
238 gb_connection_destroy(connection);
239
240 mutex_lock(&raw->list_lock);
241 list_for_each_entry_safe(raw_data, temp, &raw->list, entry) {
242 list_del(&raw_data->entry);
243 kfree(raw_data);
244 }
245 mutex_unlock(&raw->list_lock);
246
247 kfree(raw);
248}
249
250/*
251 * Character device node interfaces.
252 *
253 * Note, we are using read/write to only allow a single read/write per message.
254 * This means for read(), you have to provide a big enough buffer for the full
255 * message to be copied into. If the buffer isn't big enough, the read() will
256 * fail with -ENOSPC.
257 */
258
259static int raw_open(struct inode *inode, struct file *file)
260{
261 struct cdev *cdev = inode->i_cdev;
262 struct gb_raw *raw = container_of(cdev, struct gb_raw, cdev);
263
264 file->private_data = raw;
265 return 0;
266}
267
268static ssize_t raw_write(struct file *file, const char __user *buf,
269 size_t count, loff_t *ppos)
270{
271 struct gb_raw *raw = file->private_data;
272 int retval;
273
274 if (!count)
275 return 0;
276
277 if (count > MAX_PACKET_SIZE)
278 return -E2BIG;
279
280 retval = gb_raw_send(raw, count, buf);
281 if (retval)
282 return retval;
283
284 return count;
285}
286
287static ssize_t raw_read(struct file *file, char __user *buf, size_t count,
288 loff_t *ppos)
289{
290 struct gb_raw *raw = file->private_data;
291 int retval = 0;
292 struct raw_data *raw_data;
293
294 mutex_lock(&raw->list_lock);
295 if (list_empty(&raw->list))
296 goto exit;
297
298 raw_data = list_first_entry(&raw->list, struct raw_data, entry);
299 if (raw_data->len > count) {
300 retval = -ENOSPC;
301 goto exit;
302 }
303
304 if (copy_to_user(buf, &raw_data->data[0], raw_data->len)) {
305 retval = -EFAULT;
306 goto exit;
307 }
308
309 list_del(&raw_data->entry);
310 raw->list_data -= raw_data->len;
311 retval = raw_data->len;
312 kfree(raw_data);
313
314exit:
315 mutex_unlock(&raw->list_lock);
316 return retval;
317}
318
319static const struct file_operations raw_fops = {
320 .owner = THIS_MODULE,
321 .write = raw_write,
322 .read = raw_read,
323 .open = raw_open,
324 .llseek = noop_llseek,
325};
326
327static const struct greybus_bundle_id gb_raw_id_table[] = {
328 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_RAW) },
329 { }
330};
331MODULE_DEVICE_TABLE(greybus, gb_raw_id_table);
332
333static struct greybus_driver gb_raw_driver = {
334 .name = "raw",
335 .probe = gb_raw_probe,
336 .disconnect = gb_raw_disconnect,
337 .id_table = gb_raw_id_table,
338};
339
340static int raw_init(void)
341{
342 dev_t dev;
343 int retval;
344
345 raw_class = class_create(THIS_MODULE, "gb_raw");
346 if (IS_ERR(raw_class)) {
347 retval = PTR_ERR(raw_class);
348 goto error_class;
349 }
350
351 retval = alloc_chrdev_region(&dev, 0, NUM_MINORS, "gb_raw");
352 if (retval < 0)
353 goto error_chrdev;
354
355 raw_major = MAJOR(dev);
356
357 retval = greybus_register(&gb_raw_driver);
358 if (retval)
359 goto error_gb;
360
361 return 0;
362
363error_gb:
364 unregister_chrdev_region(dev, NUM_MINORS);
365error_chrdev:
366 class_destroy(raw_class);
367error_class:
368 return retval;
369}
370module_init(raw_init);
371
372static void __exit raw_exit(void)
373{
374 greybus_deregister(&gb_raw_driver);
375 unregister_chrdev_region(MKDEV(raw_major, 0), NUM_MINORS);
376 class_destroy(raw_class);
377 ida_destroy(&minors);
378}
379module_exit(raw_exit);
380
381MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/sdio.c b/drivers/staging/greybus/sdio.c
new file mode 100644
index 000000000000..c7133b1c7fd8
--- /dev/null
+++ b/drivers/staging/greybus/sdio.c
@@ -0,0 +1,884 @@
1/*
2 * SD/MMC Greybus driver.
3 *
4 * Copyright 2014-2015 Google Inc.
5 * Copyright 2014-2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/mmc/core.h>
12#include <linux/mmc/host.h>
13#include <linux/mmc/mmc.h>
14#include <linux/scatterlist.h>
15#include <linux/workqueue.h>
16
17#include "greybus.h"
18#include "gbphy.h"
19
20struct gb_sdio_host {
21 struct gb_connection *connection;
22 struct gbphy_device *gbphy_dev;
23 struct mmc_host *mmc;
24 struct mmc_request *mrq;
25 struct mutex lock; /* lock for this host */
26 size_t data_max;
27 spinlock_t xfer; /* lock to cancel ongoing transfer */
28 bool xfer_stop;
29 struct workqueue_struct *mrq_workqueue;
30 struct work_struct mrqwork;
31 u8 queued_events;
32 bool removed;
33 bool card_present;
34 bool read_only;
35};
36
37
38#define GB_SDIO_RSP_R1_R5_R6_R7 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
39 GB_SDIO_RSP_OPCODE)
40#define GB_SDIO_RSP_R3_R4 (GB_SDIO_RSP_PRESENT)
41#define GB_SDIO_RSP_R2 (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
42 GB_SDIO_RSP_136)
43#define GB_SDIO_RSP_R1B (GB_SDIO_RSP_PRESENT | GB_SDIO_RSP_CRC | \
44 GB_SDIO_RSP_OPCODE | GB_SDIO_RSP_BUSY)
45
46/* kernel vdd starts at 0x80 and we need to translate to greybus ones 0x01 */
47#define GB_SDIO_VDD_SHIFT 8
48
49#ifndef MMC_CAP2_CORE_RUNTIME_PM
50#define MMC_CAP2_CORE_RUNTIME_PM 0
51#endif
52
53static inline bool single_op(struct mmc_command *cmd)
54{
55 uint32_t opcode = cmd->opcode;
56
57 return opcode == MMC_WRITE_BLOCK ||
58 opcode == MMC_READ_SINGLE_BLOCK;
59}
60
61static void _gb_sdio_set_host_caps(struct gb_sdio_host *host, u32 r)
62{
63 u32 caps = 0;
64 u32 caps2 = 0;
65
66 caps = ((r & GB_SDIO_CAP_NONREMOVABLE) ? MMC_CAP_NONREMOVABLE : 0) |
67 ((r & GB_SDIO_CAP_4_BIT_DATA) ? MMC_CAP_4_BIT_DATA : 0) |
68 ((r & GB_SDIO_CAP_8_BIT_DATA) ? MMC_CAP_8_BIT_DATA : 0) |
69 ((r & GB_SDIO_CAP_MMC_HS) ? MMC_CAP_MMC_HIGHSPEED : 0) |
70 ((r & GB_SDIO_CAP_SD_HS) ? MMC_CAP_SD_HIGHSPEED : 0) |
71 ((r & GB_SDIO_CAP_ERASE) ? MMC_CAP_ERASE : 0) |
72 ((r & GB_SDIO_CAP_1_2V_DDR) ? MMC_CAP_1_2V_DDR : 0) |
73 ((r & GB_SDIO_CAP_1_8V_DDR) ? MMC_CAP_1_8V_DDR : 0) |
74 ((r & GB_SDIO_CAP_POWER_OFF_CARD) ? MMC_CAP_POWER_OFF_CARD : 0) |
75 ((r & GB_SDIO_CAP_UHS_SDR12) ? MMC_CAP_UHS_SDR12 : 0) |
76 ((r & GB_SDIO_CAP_UHS_SDR25) ? MMC_CAP_UHS_SDR25 : 0) |
77 ((r & GB_SDIO_CAP_UHS_SDR50) ? MMC_CAP_UHS_SDR50 : 0) |
78 ((r & GB_SDIO_CAP_UHS_SDR104) ? MMC_CAP_UHS_SDR104 : 0) |
79 ((r & GB_SDIO_CAP_UHS_DDR50) ? MMC_CAP_UHS_DDR50 : 0) |
80 ((r & GB_SDIO_CAP_DRIVER_TYPE_A) ? MMC_CAP_DRIVER_TYPE_A : 0) |
81 ((r & GB_SDIO_CAP_DRIVER_TYPE_C) ? MMC_CAP_DRIVER_TYPE_C : 0) |
82 ((r & GB_SDIO_CAP_DRIVER_TYPE_D) ? MMC_CAP_DRIVER_TYPE_D : 0);
83
84 caps2 = ((r & GB_SDIO_CAP_HS200_1_2V) ? MMC_CAP2_HS200_1_2V_SDR : 0) |
85 ((r & GB_SDIO_CAP_HS400_1_2V) ? MMC_CAP2_HS400_1_2V : 0) |
86 ((r & GB_SDIO_CAP_HS400_1_8V) ? MMC_CAP2_HS400_1_8V : 0) |
87 ((r & GB_SDIO_CAP_HS200_1_8V) ? MMC_CAP2_HS200_1_8V_SDR : 0);
88
89 host->mmc->caps = caps;
90 host->mmc->caps2 = caps2 | MMC_CAP2_CORE_RUNTIME_PM;
91
92 if (caps & MMC_CAP_NONREMOVABLE)
93 host->card_present = true;
94}
95
96static u32 _gb_sdio_get_host_ocr(u32 ocr)
97{
98 return (((ocr & GB_SDIO_VDD_165_195) ? MMC_VDD_165_195 : 0) |
99 ((ocr & GB_SDIO_VDD_20_21) ? MMC_VDD_20_21 : 0) |
100 ((ocr & GB_SDIO_VDD_21_22) ? MMC_VDD_21_22 : 0) |
101 ((ocr & GB_SDIO_VDD_22_23) ? MMC_VDD_22_23 : 0) |
102 ((ocr & GB_SDIO_VDD_23_24) ? MMC_VDD_23_24 : 0) |
103 ((ocr & GB_SDIO_VDD_24_25) ? MMC_VDD_24_25 : 0) |
104 ((ocr & GB_SDIO_VDD_25_26) ? MMC_VDD_25_26 : 0) |
105 ((ocr & GB_SDIO_VDD_26_27) ? MMC_VDD_26_27 : 0) |
106 ((ocr & GB_SDIO_VDD_27_28) ? MMC_VDD_27_28 : 0) |
107 ((ocr & GB_SDIO_VDD_28_29) ? MMC_VDD_28_29 : 0) |
108 ((ocr & GB_SDIO_VDD_29_30) ? MMC_VDD_29_30 : 0) |
109 ((ocr & GB_SDIO_VDD_30_31) ? MMC_VDD_30_31 : 0) |
110 ((ocr & GB_SDIO_VDD_31_32) ? MMC_VDD_31_32 : 0) |
111 ((ocr & GB_SDIO_VDD_32_33) ? MMC_VDD_32_33 : 0) |
112 ((ocr & GB_SDIO_VDD_33_34) ? MMC_VDD_33_34 : 0) |
113 ((ocr & GB_SDIO_VDD_34_35) ? MMC_VDD_34_35 : 0) |
114 ((ocr & GB_SDIO_VDD_35_36) ? MMC_VDD_35_36 : 0)
115 );
116}
117
118static int gb_sdio_get_caps(struct gb_sdio_host *host)
119{
120 struct gb_sdio_get_caps_response response;
121 struct mmc_host *mmc = host->mmc;
122 u16 data_max;
123 u32 blksz;
124 u32 ocr;
125 u32 r;
126 int ret;
127
128 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_GET_CAPABILITIES,
129 NULL, 0, &response, sizeof(response));
130 if (ret < 0)
131 return ret;
132 r = le32_to_cpu(response.caps);
133
134 _gb_sdio_set_host_caps(host, r);
135
136 /* get the max block size that could fit our payload */
137 data_max = gb_operation_get_payload_size_max(host->connection);
138 data_max = min(data_max - sizeof(struct gb_sdio_transfer_request),
139 data_max - sizeof(struct gb_sdio_transfer_response));
140
141 blksz = min_t(u16, le16_to_cpu(response.max_blk_size), data_max);
142 blksz = max_t(u32, 512, blksz);
143
144 mmc->max_blk_size = rounddown_pow_of_two(blksz);
145 mmc->max_blk_count = le16_to_cpu(response.max_blk_count);
146 host->data_max = data_max;
147
148 /* get ocr supported values */
149 ocr = _gb_sdio_get_host_ocr(le32_to_cpu(response.ocr));
150 mmc->ocr_avail = ocr;
151 mmc->ocr_avail_sdio = mmc->ocr_avail;
152 mmc->ocr_avail_sd = mmc->ocr_avail;
153 mmc->ocr_avail_mmc = mmc->ocr_avail;
154
155 /* get frequency range values */
156 mmc->f_min = le32_to_cpu(response.f_min);
157 mmc->f_max = le32_to_cpu(response.f_max);
158
159 return 0;
160}
161
162static void _gb_queue_event(struct gb_sdio_host *host, u8 event)
163{
164 if (event & GB_SDIO_CARD_INSERTED)
165 host->queued_events &= ~GB_SDIO_CARD_REMOVED;
166 else if (event & GB_SDIO_CARD_REMOVED)
167 host->queued_events &= ~GB_SDIO_CARD_INSERTED;
168
169 host->queued_events |= event;
170}
171
172static int _gb_sdio_process_events(struct gb_sdio_host *host, u8 event)
173{
174 u8 state_changed = 0;
175
176 if (event & GB_SDIO_CARD_INSERTED) {
177 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
178 return 0;
179 if (host->card_present)
180 return 0;
181 host->card_present = true;
182 state_changed = 1;
183 }
184
185 if (event & GB_SDIO_CARD_REMOVED) {
186 if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
187 return 0;
188 if (!(host->card_present))
189 return 0;
190 host->card_present = false;
191 state_changed = 1;
192 }
193
194 if (event & GB_SDIO_WP) {
195 host->read_only = true;
196 }
197
198 if (state_changed) {
199 dev_info(mmc_dev(host->mmc), "card %s now event\n",
200 (host->card_present ? "inserted" : "removed"));
201 mmc_detect_change(host->mmc, 0);
202 }
203
204 return 0;
205}
206
207static int gb_sdio_request_handler(struct gb_operation *op)
208{
209 struct gb_sdio_host *host = gb_connection_get_data(op->connection);
210 struct gb_message *request;
211 struct gb_sdio_event_request *payload;
212 u8 type = op->type;
213 int ret = 0;
214 u8 event;
215
216 if (type != GB_SDIO_TYPE_EVENT) {
217 dev_err(mmc_dev(host->mmc),
218 "unsupported unsolicited event: %u\n", type);
219 return -EINVAL;
220 }
221
222 request = op->request;
223
224 if (request->payload_size < sizeof(*payload)) {
225 dev_err(mmc_dev(host->mmc), "wrong event size received (%zu < %zu)\n",
226 request->payload_size, sizeof(*payload));
227 return -EINVAL;
228 }
229
230 payload = request->payload;
231 event = payload->event;
232
233 if (host->removed)
234 _gb_queue_event(host, event);
235 else
236 ret = _gb_sdio_process_events(host, event);
237
238 return ret;
239}
240
241static int gb_sdio_set_ios(struct gb_sdio_host *host,
242 struct gb_sdio_set_ios_request *request)
243{
244 int ret;
245
246 ret = gbphy_runtime_get_sync(host->gbphy_dev);
247 if (ret)
248 return ret;
249
250 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_SET_IOS, request,
251 sizeof(*request), NULL, 0);
252
253 gbphy_runtime_put_autosuspend(host->gbphy_dev);
254
255 return ret;
256}
257
258static int _gb_sdio_send(struct gb_sdio_host *host, struct mmc_data *data,
259 size_t len, u16 nblocks, off_t skip)
260{
261 struct gb_sdio_transfer_request *request;
262 struct gb_sdio_transfer_response *response;
263 struct gb_operation *operation;
264 struct scatterlist *sg = data->sg;
265 unsigned int sg_len = data->sg_len;
266 size_t copied;
267 u16 send_blksz;
268 u16 send_blocks;
269 int ret;
270
271 WARN_ON(len > host->data_max);
272
273 operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
274 len + sizeof(*request),
275 sizeof(*response), GFP_KERNEL);
276 if (!operation)
277 return -ENOMEM;
278
279 request = operation->request->payload;
280 request->data_flags = (data->flags >> 8);
281 request->data_blocks = cpu_to_le16(nblocks);
282 request->data_blksz = cpu_to_le16(data->blksz);
283
284 copied = sg_pcopy_to_buffer(sg, sg_len, &request->data[0], len, skip);
285
286 if (copied != len) {
287 ret = -EINVAL;
288 goto err_put_operation;
289 }
290
291 ret = gb_operation_request_send_sync(operation);
292 if (ret < 0)
293 goto err_put_operation;
294
295 response = operation->response->payload;
296
297 send_blocks = le16_to_cpu(response->data_blocks);
298 send_blksz = le16_to_cpu(response->data_blksz);
299
300 if (len != send_blksz * send_blocks) {
301 dev_err(mmc_dev(host->mmc), "send: size received: %zu != %d\n",
302 len, send_blksz * send_blocks);
303 ret = -EINVAL;
304 }
305
306err_put_operation:
307 gb_operation_put(operation);
308
309 return ret;
310}
311
312static int _gb_sdio_recv(struct gb_sdio_host *host, struct mmc_data *data,
313 size_t len, u16 nblocks, off_t skip)
314{
315 struct gb_sdio_transfer_request *request;
316 struct gb_sdio_transfer_response *response;
317 struct gb_operation *operation;
318 struct scatterlist *sg = data->sg;
319 unsigned int sg_len = data->sg_len;
320 size_t copied;
321 u16 recv_blksz;
322 u16 recv_blocks;
323 int ret;
324
325 WARN_ON(len > host->data_max);
326
327 operation = gb_operation_create(host->connection, GB_SDIO_TYPE_TRANSFER,
328 sizeof(*request),
329 len + sizeof(*response), GFP_KERNEL);
330 if (!operation)
331 return -ENOMEM;
332
333 request = operation->request->payload;
334 request->data_flags = (data->flags >> 8);
335 request->data_blocks = cpu_to_le16(nblocks);
336 request->data_blksz = cpu_to_le16(data->blksz);
337
338 ret = gb_operation_request_send_sync(operation);
339 if (ret < 0)
340 goto err_put_operation;
341
342 response = operation->response->payload;
343 recv_blocks = le16_to_cpu(response->data_blocks);
344 recv_blksz = le16_to_cpu(response->data_blksz);
345
346 if (len != recv_blksz * recv_blocks) {
347 dev_err(mmc_dev(host->mmc), "recv: size received: %d != %zu\n",
348 recv_blksz * recv_blocks, len);
349 ret = -EINVAL;
350 goto err_put_operation;
351 }
352
353 copied = sg_pcopy_from_buffer(sg, sg_len, &response->data[0], len,
354 skip);
355 if (copied != len)
356 ret = -EINVAL;
357
358err_put_operation:
359 gb_operation_put(operation);
360
361 return ret;
362}
363
364static int gb_sdio_transfer(struct gb_sdio_host *host, struct mmc_data *data)
365{
366 size_t left, len;
367 off_t skip = 0;
368 int ret = 0;
369 u16 nblocks;
370
371 if (single_op(data->mrq->cmd) && data->blocks > 1) {
372 ret = -ETIMEDOUT;
373 goto out;
374 }
375
376 left = data->blksz * data->blocks;
377
378 while (left) {
379 /* check is a stop transmission is pending */
380 spin_lock(&host->xfer);
381 if (host->xfer_stop) {
382 host->xfer_stop = false;
383 spin_unlock(&host->xfer);
384 ret = -EINTR;
385 goto out;
386 }
387 spin_unlock(&host->xfer);
388 len = min(left, host->data_max);
389 nblocks = len / data->blksz;
390 len = nblocks * data->blksz;
391
392 if (data->flags & MMC_DATA_READ) {
393 ret = _gb_sdio_recv(host, data, len, nblocks, skip);
394 if (ret < 0)
395 goto out;
396 } else {
397 ret = _gb_sdio_send(host, data, len, nblocks, skip);
398 if (ret < 0)
399 goto out;
400 }
401 data->bytes_xfered += len;
402 left -= len;
403 skip += len;
404 }
405
406out:
407 data->error = ret;
408 return ret;
409}
410
411static int gb_sdio_command(struct gb_sdio_host *host, struct mmc_command *cmd)
412{
413 struct gb_sdio_command_request request = {0};
414 struct gb_sdio_command_response response;
415 struct mmc_data *data = host->mrq->data;
416 u8 cmd_flags;
417 u8 cmd_type;
418 int i;
419 int ret;
420
421 switch (mmc_resp_type(cmd)) {
422 case MMC_RSP_NONE:
423 cmd_flags = GB_SDIO_RSP_NONE;
424 break;
425 case MMC_RSP_R1:
426 cmd_flags = GB_SDIO_RSP_R1_R5_R6_R7;
427 break;
428 case MMC_RSP_R1B:
429 cmd_flags = GB_SDIO_RSP_R1B;
430 break;
431 case MMC_RSP_R2:
432 cmd_flags = GB_SDIO_RSP_R2;
433 break;
434 case MMC_RSP_R3:
435 cmd_flags = GB_SDIO_RSP_R3_R4;
436 break;
437 default:
438 dev_err(mmc_dev(host->mmc), "cmd flag invalid 0x%04x\n",
439 mmc_resp_type(cmd));
440 ret = -EINVAL;
441 goto out;
442 }
443
444 switch (mmc_cmd_type(cmd)) {
445 case MMC_CMD_BC:
446 cmd_type = GB_SDIO_CMD_BC;
447 break;
448 case MMC_CMD_BCR:
449 cmd_type = GB_SDIO_CMD_BCR;
450 break;
451 case MMC_CMD_AC:
452 cmd_type = GB_SDIO_CMD_AC;
453 break;
454 case MMC_CMD_ADTC:
455 cmd_type = GB_SDIO_CMD_ADTC;
456 break;
457 default:
458 dev_err(mmc_dev(host->mmc), "cmd type invalid 0x%04x\n",
459 mmc_cmd_type(cmd));
460 ret = -EINVAL;
461 goto out;
462 }
463
464 request.cmd = cmd->opcode;
465 request.cmd_flags = cmd_flags;
466 request.cmd_type = cmd_type;
467 request.cmd_arg = cpu_to_le32(cmd->arg);
468 /* some controllers need to know at command time data details */
469 if (data) {
470 request.data_blocks = cpu_to_le16(data->blocks);
471 request.data_blksz = cpu_to_le16(data->blksz);
472 }
473
474 ret = gb_operation_sync(host->connection, GB_SDIO_TYPE_COMMAND,
475 &request, sizeof(request), &response,
476 sizeof(response));
477 if (ret < 0)
478 goto out;
479
480 /* no response expected */
481 if (cmd_flags & GB_SDIO_RSP_NONE)
482 goto out;
483
484 /* long response expected */
485 if (cmd_flags & GB_SDIO_RSP_R2)
486 for (i = 0; i < 4; i++)
487 cmd->resp[i] = le32_to_cpu(response.resp[i]);
488 else
489 cmd->resp[0] = le32_to_cpu(response.resp[0]);
490
491out:
492 cmd->error = ret;
493 return ret;
494}
495
496static void gb_sdio_mrq_work(struct work_struct *work)
497{
498 struct gb_sdio_host *host;
499 struct mmc_request *mrq;
500 int ret;
501
502 host = container_of(work, struct gb_sdio_host, mrqwork);
503
504 ret = gbphy_runtime_get_sync(host->gbphy_dev);
505 if (ret)
506 return;
507
508 mutex_lock(&host->lock);
509 mrq = host->mrq;
510 if (!mrq) {
511 mutex_unlock(&host->lock);
512 gbphy_runtime_put_autosuspend(host->gbphy_dev);
513 dev_err(mmc_dev(host->mmc), "mmc request is NULL");
514 return;
515 }
516
517 if (host->removed) {
518 mrq->cmd->error = -ESHUTDOWN;
519 goto done;
520 }
521
522 if (mrq->sbc) {
523 ret = gb_sdio_command(host, mrq->sbc);
524 if (ret < 0)
525 goto done;
526 }
527
528 ret = gb_sdio_command(host, mrq->cmd);
529 if (ret < 0)
530 goto done;
531
532 if (mrq->data) {
533 ret = gb_sdio_transfer(host, mrq->data);
534 if (ret < 0)
535 goto done;
536 }
537
538 if (mrq->stop) {
539 ret = gb_sdio_command(host, mrq->stop);
540 if (ret < 0)
541 goto done;
542 }
543
544done:
545 host->mrq = NULL;
546 mutex_unlock(&host->lock);
547 mmc_request_done(host->mmc, mrq);
548 gbphy_runtime_put_autosuspend(host->gbphy_dev);
549}
550
551static void gb_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
552{
553 struct gb_sdio_host *host = mmc_priv(mmc);
554 struct mmc_command *cmd = mrq->cmd;
555
556 /* Check if it is a cancel to ongoing transfer */
557 if (cmd->opcode == MMC_STOP_TRANSMISSION) {
558 spin_lock(&host->xfer);
559 host->xfer_stop = true;
560 spin_unlock(&host->xfer);
561 }
562
563 mutex_lock(&host->lock);
564
565 WARN_ON(host->mrq);
566 host->mrq = mrq;
567
568 if (host->removed) {
569 mrq->cmd->error = -ESHUTDOWN;
570 goto out;
571 }
572 if (!host->card_present) {
573 mrq->cmd->error = -ENOMEDIUM;
574 goto out;
575 }
576
577 queue_work(host->mrq_workqueue, &host->mrqwork);
578
579 mutex_unlock(&host->lock);
580 return;
581
582out:
583 host->mrq = NULL;
584 mutex_unlock(&host->lock);
585 mmc_request_done(mmc, mrq);
586}
587
588static void gb_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
589{
590 struct gb_sdio_host *host = mmc_priv(mmc);
591 struct gb_sdio_set_ios_request request;
592 int ret;
593 u8 power_mode;
594 u8 bus_width;
595 u8 timing;
596 u8 signal_voltage;
597 u8 drv_type;
598 u32 vdd = 0;
599
600 mutex_lock(&host->lock);
601 request.clock = cpu_to_le32(ios->clock);
602
603 if (ios->vdd)
604 vdd = 1 << (ios->vdd - GB_SDIO_VDD_SHIFT);
605 request.vdd = cpu_to_le32(vdd);
606
607 request.bus_mode = (ios->bus_mode == MMC_BUSMODE_OPENDRAIN ?
608 GB_SDIO_BUSMODE_OPENDRAIN :
609 GB_SDIO_BUSMODE_PUSHPULL);
610
611 switch (ios->power_mode) {
612 case MMC_POWER_OFF:
613 default:
614 power_mode = GB_SDIO_POWER_OFF;
615 break;
616 case MMC_POWER_UP:
617 power_mode = GB_SDIO_POWER_UP;
618 break;
619 case MMC_POWER_ON:
620 power_mode = GB_SDIO_POWER_ON;
621 break;
622 case MMC_POWER_UNDEFINED:
623 power_mode = GB_SDIO_POWER_UNDEFINED;
624 break;
625 }
626 request.power_mode = power_mode;
627
628 switch (ios->bus_width) {
629 case MMC_BUS_WIDTH_1:
630 bus_width = GB_SDIO_BUS_WIDTH_1;
631 break;
632 case MMC_BUS_WIDTH_4:
633 default:
634 bus_width = GB_SDIO_BUS_WIDTH_4;
635 break;
636 case MMC_BUS_WIDTH_8:
637 bus_width = GB_SDIO_BUS_WIDTH_8;
638 break;
639 }
640 request.bus_width = bus_width;
641
642 switch (ios->timing) {
643 case MMC_TIMING_LEGACY:
644 default:
645 timing = GB_SDIO_TIMING_LEGACY;
646 break;
647 case MMC_TIMING_MMC_HS:
648 timing = GB_SDIO_TIMING_MMC_HS;
649 break;
650 case MMC_TIMING_SD_HS:
651 timing = GB_SDIO_TIMING_SD_HS;
652 break;
653 case MMC_TIMING_UHS_SDR12:
654 timing = GB_SDIO_TIMING_UHS_SDR12;
655 break;
656 case MMC_TIMING_UHS_SDR25:
657 timing = GB_SDIO_TIMING_UHS_SDR25;
658 break;
659 case MMC_TIMING_UHS_SDR50:
660 timing = GB_SDIO_TIMING_UHS_SDR50;
661 break;
662 case MMC_TIMING_UHS_SDR104:
663 timing = GB_SDIO_TIMING_UHS_SDR104;
664 break;
665 case MMC_TIMING_UHS_DDR50:
666 timing = GB_SDIO_TIMING_UHS_DDR50;
667 break;
668 case MMC_TIMING_MMC_DDR52:
669 timing = GB_SDIO_TIMING_MMC_DDR52;
670 break;
671 case MMC_TIMING_MMC_HS200:
672 timing = GB_SDIO_TIMING_MMC_HS200;
673 break;
674 case MMC_TIMING_MMC_HS400:
675 timing = GB_SDIO_TIMING_MMC_HS400;
676 break;
677 }
678 request.timing = timing;
679
680 switch (ios->signal_voltage) {
681 case MMC_SIGNAL_VOLTAGE_330:
682 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_330;
683 break;
684 case MMC_SIGNAL_VOLTAGE_180:
685 default:
686 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_180;
687 break;
688 case MMC_SIGNAL_VOLTAGE_120:
689 signal_voltage = GB_SDIO_SIGNAL_VOLTAGE_120;
690 break;
691 }
692 request.signal_voltage = signal_voltage;
693
694 switch (ios->drv_type) {
695 case MMC_SET_DRIVER_TYPE_A:
696 drv_type = GB_SDIO_SET_DRIVER_TYPE_A;
697 break;
698 case MMC_SET_DRIVER_TYPE_C:
699 drv_type = GB_SDIO_SET_DRIVER_TYPE_C;
700 break;
701 case MMC_SET_DRIVER_TYPE_D:
702 drv_type = GB_SDIO_SET_DRIVER_TYPE_D;
703 break;
704 case MMC_SET_DRIVER_TYPE_B:
705 default:
706 drv_type = GB_SDIO_SET_DRIVER_TYPE_B;
707 break;
708 }
709 request.drv_type = drv_type;
710
711 ret = gb_sdio_set_ios(host, &request);
712 if (ret < 0)
713 goto out;
714
715 memcpy(&mmc->ios, ios, sizeof(mmc->ios));
716
717out:
718 mutex_unlock(&host->lock);
719}
720
721static int gb_mmc_get_ro(struct mmc_host *mmc)
722{
723 struct gb_sdio_host *host = mmc_priv(mmc);
724
725 mutex_lock(&host->lock);
726 if (host->removed) {
727 mutex_unlock(&host->lock);
728 return -ESHUTDOWN;
729 }
730 mutex_unlock(&host->lock);
731
732 return host->read_only;
733}
734
735static int gb_mmc_get_cd(struct mmc_host *mmc)
736{
737 struct gb_sdio_host *host = mmc_priv(mmc);
738
739 mutex_lock(&host->lock);
740 if (host->removed) {
741 mutex_unlock(&host->lock);
742 return -ESHUTDOWN;
743 }
744 mutex_unlock(&host->lock);
745
746 return host->card_present;
747}
748
749static int gb_mmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
750{
751 return 0;
752}
753
754static const struct mmc_host_ops gb_sdio_ops = {
755 .request = gb_mmc_request,
756 .set_ios = gb_mmc_set_ios,
757 .get_ro = gb_mmc_get_ro,
758 .get_cd = gb_mmc_get_cd,
759 .start_signal_voltage_switch = gb_mmc_switch_voltage,
760};
761
762static int gb_sdio_probe(struct gbphy_device *gbphy_dev,
763 const struct gbphy_device_id *id)
764{
765 struct gb_connection *connection;
766 struct mmc_host *mmc;
767 struct gb_sdio_host *host;
768 int ret = 0;
769
770 mmc = mmc_alloc_host(sizeof(*host), &gbphy_dev->dev);
771 if (!mmc)
772 return -ENOMEM;
773
774 connection = gb_connection_create(gbphy_dev->bundle,
775 le16_to_cpu(gbphy_dev->cport_desc->id),
776 gb_sdio_request_handler);
777 if (IS_ERR(connection)) {
778 ret = PTR_ERR(connection);
779 goto exit_mmc_free;
780 }
781
782 host = mmc_priv(mmc);
783 host->mmc = mmc;
784 host->removed = true;
785
786 host->connection = connection;
787 gb_connection_set_data(connection, host);
788 host->gbphy_dev = gbphy_dev;
789 gb_gbphy_set_data(gbphy_dev, host);
790
791 ret = gb_connection_enable_tx(connection);
792 if (ret)
793 goto exit_connection_destroy;
794
795 ret = gb_sdio_get_caps(host);
796 if (ret < 0)
797 goto exit_connection_disable;
798
799 mmc->ops = &gb_sdio_ops;
800
801 mmc->max_segs = host->mmc->max_blk_count;
802
803 /* for now we make a map 1:1 between max request and segment size */
804 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
805 mmc->max_seg_size = mmc->max_req_size;
806
807 mutex_init(&host->lock);
808 spin_lock_init(&host->xfer);
809 host->mrq_workqueue = alloc_workqueue("mmc-%s", 0, 1,
810 dev_name(&gbphy_dev->dev));
811 if (!host->mrq_workqueue) {
812 ret = -ENOMEM;
813 goto exit_connection_disable;
814 }
815 INIT_WORK(&host->mrqwork, gb_sdio_mrq_work);
816
817 ret = gb_connection_enable(connection);
818 if (ret)
819 goto exit_wq_destroy;
820
821 ret = mmc_add_host(mmc);
822 if (ret < 0)
823 goto exit_wq_destroy;
824 host->removed = false;
825 ret = _gb_sdio_process_events(host, host->queued_events);
826 host->queued_events = 0;
827
828 gbphy_runtime_put_autosuspend(gbphy_dev);
829
830 return ret;
831
832exit_wq_destroy:
833 destroy_workqueue(host->mrq_workqueue);
834exit_connection_disable:
835 gb_connection_disable(connection);
836exit_connection_destroy:
837 gb_connection_destroy(connection);
838exit_mmc_free:
839 mmc_free_host(mmc);
840
841 return ret;
842}
843
844static void gb_sdio_remove(struct gbphy_device *gbphy_dev)
845{
846 struct gb_sdio_host *host = gb_gbphy_get_data(gbphy_dev);
847 struct gb_connection *connection = host->connection;
848 struct mmc_host *mmc;
849 int ret;
850
851 ret = gbphy_runtime_get_sync(gbphy_dev);
852 if (ret)
853 gbphy_runtime_get_noresume(gbphy_dev);
854
855 mutex_lock(&host->lock);
856 host->removed = true;
857 mmc = host->mmc;
858 gb_connection_set_data(connection, NULL);
859 mutex_unlock(&host->lock);
860
861 flush_workqueue(host->mrq_workqueue);
862 destroy_workqueue(host->mrq_workqueue);
863 gb_connection_disable_rx(connection);
864 mmc_remove_host(mmc);
865 gb_connection_disable(connection);
866 gb_connection_destroy(connection);
867 mmc_free_host(mmc);
868}
869
870static const struct gbphy_device_id gb_sdio_id_table[] = {
871 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SDIO) },
872 { },
873};
874MODULE_DEVICE_TABLE(gbphy, gb_sdio_id_table);
875
876static struct gbphy_driver sdio_driver = {
877 .name = "sdio",
878 .probe = gb_sdio_probe,
879 .remove = gb_sdio_remove,
880 .id_table = gb_sdio_id_table,
881};
882
883module_gbphy_driver(sdio_driver);
884MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spi.c b/drivers/staging/greybus/spi.c
new file mode 100644
index 000000000000..c893552b5c0b
--- /dev/null
+++ b/drivers/staging/greybus/spi.c
@@ -0,0 +1,79 @@
1/*
2 * SPI bridge PHY driver.
3 *
4 * Copyright 2014-2016 Google Inc.
5 * Copyright 2014-2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/module.h>
11
12#include "greybus.h"
13#include "gbphy.h"
14#include "spilib.h"
15
16static struct spilib_ops *spilib_ops;
17
18static int gb_spi_probe(struct gbphy_device *gbphy_dev,
19 const struct gbphy_device_id *id)
20{
21 struct gb_connection *connection;
22 int ret;
23
24 connection = gb_connection_create(gbphy_dev->bundle,
25 le16_to_cpu(gbphy_dev->cport_desc->id),
26 NULL);
27 if (IS_ERR(connection))
28 return PTR_ERR(connection);
29
30 ret = gb_connection_enable(connection);
31 if (ret)
32 goto exit_connection_destroy;
33
34 ret = gb_spilib_master_init(connection, &gbphy_dev->dev, spilib_ops);
35 if (ret)
36 goto exit_connection_disable;
37
38 gb_gbphy_set_data(gbphy_dev, connection);
39
40 gbphy_runtime_put_autosuspend(gbphy_dev);
41 return 0;
42
43exit_connection_disable:
44 gb_connection_disable(connection);
45exit_connection_destroy:
46 gb_connection_destroy(connection);
47
48 return ret;
49}
50
51static void gb_spi_remove(struct gbphy_device *gbphy_dev)
52{
53 struct gb_connection *connection = gb_gbphy_get_data(gbphy_dev);
54 int ret;
55
56 ret = gbphy_runtime_get_sync(gbphy_dev);
57 if (ret)
58 gbphy_runtime_get_noresume(gbphy_dev);
59
60 gb_spilib_master_exit(connection);
61 gb_connection_disable(connection);
62 gb_connection_destroy(connection);
63}
64
65static const struct gbphy_device_id gb_spi_id_table[] = {
66 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_SPI) },
67 { },
68};
69MODULE_DEVICE_TABLE(gbphy, gb_spi_id_table);
70
71static struct gbphy_driver spi_driver = {
72 .name = "spi",
73 .probe = gb_spi_probe,
74 .remove = gb_spi_remove,
75 .id_table = gb_spi_id_table,
76};
77
78module_gbphy_driver(spi_driver);
79MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spilib.c b/drivers/staging/greybus/spilib.c
new file mode 100644
index 000000000000..e97b19148497
--- /dev/null
+++ b/drivers/staging/greybus/spilib.c
@@ -0,0 +1,565 @@
1/*
2 * Greybus SPI library
3 *
4 * Copyright 2014-2016 Google Inc.
5 * Copyright 2014-2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/bitops.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/spi/spi.h>
15
16#include "greybus.h"
17#include "spilib.h"
18
19struct gb_spilib {
20 struct gb_connection *connection;
21 struct device *parent;
22 struct spi_transfer *first_xfer;
23 struct spi_transfer *last_xfer;
24 struct spilib_ops *ops;
25 u32 rx_xfer_offset;
26 u32 tx_xfer_offset;
27 u32 last_xfer_size;
28 unsigned int op_timeout;
29 u16 mode;
30 u16 flags;
31 u32 bits_per_word_mask;
32 u8 num_chipselect;
33 u32 min_speed_hz;
34 u32 max_speed_hz;
35};
36
37#define GB_SPI_STATE_MSG_DONE ((void *)0)
38#define GB_SPI_STATE_MSG_IDLE ((void *)1)
39#define GB_SPI_STATE_MSG_RUNNING ((void *)2)
40#define GB_SPI_STATE_OP_READY ((void *)3)
41#define GB_SPI_STATE_OP_DONE ((void *)4)
42#define GB_SPI_STATE_MSG_ERROR ((void *)-1)
43
44#define XFER_TIMEOUT_TOLERANCE 200
45
46static struct spi_master *get_master_from_spi(struct gb_spilib *spi)
47{
48 return gb_connection_get_data(spi->connection);
49}
50
51static int tx_header_fit_operation(u32 tx_size, u32 count, size_t data_max)
52{
53 size_t headers_size;
54
55 data_max -= sizeof(struct gb_spi_transfer_request);
56 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
57
58 return tx_size + headers_size > data_max ? 0 : 1;
59}
60
61static size_t calc_rx_xfer_size(u32 rx_size, u32 *tx_xfer_size, u32 len,
62 size_t data_max)
63{
64 size_t rx_xfer_size;
65
66 data_max -= sizeof(struct gb_spi_transfer_response);
67
68 if (rx_size + len > data_max)
69 rx_xfer_size = data_max - rx_size;
70 else
71 rx_xfer_size = len;
72
73 /* if this is a write_read, for symmetry read the same as write */
74 if (*tx_xfer_size && rx_xfer_size > *tx_xfer_size)
75 rx_xfer_size = *tx_xfer_size;
76 if (*tx_xfer_size && rx_xfer_size < *tx_xfer_size)
77 *tx_xfer_size = rx_xfer_size;
78
79 return rx_xfer_size;
80}
81
82static size_t calc_tx_xfer_size(u32 tx_size, u32 count, size_t len,
83 size_t data_max)
84{
85 size_t headers_size;
86
87 data_max -= sizeof(struct gb_spi_transfer_request);
88 headers_size = (count + 1) * sizeof(struct gb_spi_transfer);
89
90 if (tx_size + headers_size + len > data_max)
91 return data_max - (tx_size + sizeof(struct gb_spi_transfer));
92
93 return len;
94}
95
96static void clean_xfer_state(struct gb_spilib *spi)
97{
98 spi->first_xfer = NULL;
99 spi->last_xfer = NULL;
100 spi->rx_xfer_offset = 0;
101 spi->tx_xfer_offset = 0;
102 spi->last_xfer_size = 0;
103 spi->op_timeout = 0;
104}
105
106static bool is_last_xfer_done(struct gb_spilib *spi)
107{
108 struct spi_transfer *last_xfer = spi->last_xfer;
109
110 if ((spi->tx_xfer_offset + spi->last_xfer_size == last_xfer->len) ||
111 (spi->rx_xfer_offset + spi->last_xfer_size == last_xfer->len))
112 return true;
113
114 return false;
115}
116
117static int setup_next_xfer(struct gb_spilib *spi, struct spi_message *msg)
118{
119 struct spi_transfer *last_xfer = spi->last_xfer;
120
121 if (msg->state != GB_SPI_STATE_OP_DONE)
122 return 0;
123
124 /*
125 * if we transferred all content of the last transfer, reset values and
126 * check if this was the last transfer in the message
127 */
128 if (is_last_xfer_done(spi)) {
129 spi->tx_xfer_offset = 0;
130 spi->rx_xfer_offset = 0;
131 spi->op_timeout = 0;
132 if (last_xfer == list_last_entry(&msg->transfers,
133 struct spi_transfer,
134 transfer_list))
135 msg->state = GB_SPI_STATE_MSG_DONE;
136 else
137 spi->first_xfer = list_next_entry(last_xfer,
138 transfer_list);
139 return 0;
140 }
141
142 spi->first_xfer = last_xfer;
143 if (last_xfer->tx_buf)
144 spi->tx_xfer_offset += spi->last_xfer_size;
145
146 if (last_xfer->rx_buf)
147 spi->rx_xfer_offset += spi->last_xfer_size;
148
149 return 0;
150}
151
152static struct spi_transfer *get_next_xfer(struct spi_transfer *xfer,
153 struct spi_message *msg)
154{
155 if (xfer == list_last_entry(&msg->transfers, struct spi_transfer,
156 transfer_list))
157 return NULL;
158
159 return list_next_entry(xfer, transfer_list);
160}
161
162/* Routines to transfer data */
163static struct gb_operation *gb_spi_operation_create(struct gb_spilib *spi,
164 struct gb_connection *connection, struct spi_message *msg)
165{
166 struct gb_spi_transfer_request *request;
167 struct spi_device *dev = msg->spi;
168 struct spi_transfer *xfer;
169 struct gb_spi_transfer *gb_xfer;
170 struct gb_operation *operation;
171 u32 tx_size = 0, rx_size = 0, count = 0, xfer_len = 0, request_size;
172 u32 tx_xfer_size = 0, rx_xfer_size = 0, len;
173 u32 total_len = 0;
174 unsigned int xfer_timeout;
175 size_t data_max;
176 void *tx_data;
177
178 data_max = gb_operation_get_payload_size_max(connection);
179 xfer = spi->first_xfer;
180
181 /* Find number of transfers queued and tx/rx length in the message */
182
183 while (msg->state != GB_SPI_STATE_OP_READY) {
184 msg->state = GB_SPI_STATE_MSG_RUNNING;
185 spi->last_xfer = xfer;
186
187 if (!xfer->tx_buf && !xfer->rx_buf) {
188 dev_err(spi->parent,
189 "bufferless transfer, length %u\n", xfer->len);
190 msg->state = GB_SPI_STATE_MSG_ERROR;
191 return NULL;
192 }
193
194 tx_xfer_size = 0;
195 rx_xfer_size = 0;
196
197 if (xfer->tx_buf) {
198 len = xfer->len - spi->tx_xfer_offset;
199 if (!tx_header_fit_operation(tx_size, count, data_max))
200 break;
201 tx_xfer_size = calc_tx_xfer_size(tx_size, count,
202 len, data_max);
203 spi->last_xfer_size = tx_xfer_size;
204 }
205
206 if (xfer->rx_buf) {
207 len = xfer->len - spi->rx_xfer_offset;
208 rx_xfer_size = calc_rx_xfer_size(rx_size, &tx_xfer_size,
209 len, data_max);
210 spi->last_xfer_size = rx_xfer_size;
211 }
212
213 tx_size += tx_xfer_size;
214 rx_size += rx_xfer_size;
215
216 total_len += spi->last_xfer_size;
217 count++;
218
219 xfer = get_next_xfer(xfer, msg);
220 if (!xfer || total_len >= data_max)
221 msg->state = GB_SPI_STATE_OP_READY;
222 }
223
224 /*
225 * In addition to space for all message descriptors we need
226 * to have enough to hold all tx data.
227 */
228 request_size = sizeof(*request);
229 request_size += count * sizeof(*gb_xfer);
230 request_size += tx_size;
231
232 /* Response consists only of incoming data */
233 operation = gb_operation_create(connection, GB_SPI_TYPE_TRANSFER,
234 request_size, rx_size, GFP_KERNEL);
235 if (!operation)
236 return NULL;
237
238 request = operation->request->payload;
239 request->count = cpu_to_le16(count);
240 request->mode = dev->mode;
241 request->chip_select = dev->chip_select;
242
243 gb_xfer = &request->transfers[0];
244 tx_data = gb_xfer + count; /* place tx data after last gb_xfer */
245
246 /* Fill in the transfers array */
247 xfer = spi->first_xfer;
248 while (msg->state != GB_SPI_STATE_OP_DONE) {
249 if (xfer == spi->last_xfer)
250 xfer_len = spi->last_xfer_size;
251 else
252 xfer_len = xfer->len;
253
254 /* make sure we do not timeout in a slow transfer */
255 xfer_timeout = xfer_len * 8 * MSEC_PER_SEC / xfer->speed_hz;
256 xfer_timeout += GB_OPERATION_TIMEOUT_DEFAULT;
257
258 if (xfer_timeout > spi->op_timeout)
259 spi->op_timeout = xfer_timeout;
260
261 gb_xfer->speed_hz = cpu_to_le32(xfer->speed_hz);
262 gb_xfer->len = cpu_to_le32(xfer_len);
263 gb_xfer->delay_usecs = cpu_to_le16(xfer->delay_usecs);
264 gb_xfer->cs_change = xfer->cs_change;
265 gb_xfer->bits_per_word = xfer->bits_per_word;
266
267 /* Copy tx data */
268 if (xfer->tx_buf) {
269 gb_xfer->xfer_flags |= GB_SPI_XFER_WRITE;
270 memcpy(tx_data, xfer->tx_buf + spi->tx_xfer_offset,
271 xfer_len);
272 tx_data += xfer_len;
273 }
274
275 if (xfer->rx_buf)
276 gb_xfer->xfer_flags |= GB_SPI_XFER_READ;
277
278 if (xfer == spi->last_xfer) {
279 if (!is_last_xfer_done(spi))
280 gb_xfer->xfer_flags |= GB_SPI_XFER_INPROGRESS;
281 msg->state = GB_SPI_STATE_OP_DONE;
282 continue;
283 }
284
285 gb_xfer++;
286 xfer = get_next_xfer(xfer, msg);
287 }
288
289 msg->actual_length += total_len;
290
291 return operation;
292}
293
294static void gb_spi_decode_response(struct gb_spilib *spi,
295 struct spi_message *msg,
296 struct gb_spi_transfer_response *response)
297{
298 struct spi_transfer *xfer = spi->first_xfer;
299 void *rx_data = response->data;
300 u32 xfer_len;
301
302 while (xfer) {
303 /* Copy rx data */
304 if (xfer->rx_buf) {
305 if (xfer == spi->first_xfer)
306 xfer_len = xfer->len - spi->rx_xfer_offset;
307 else if (xfer == spi->last_xfer)
308 xfer_len = spi->last_xfer_size;
309 else
310 xfer_len = xfer->len;
311
312 memcpy(xfer->rx_buf + spi->rx_xfer_offset, rx_data,
313 xfer_len);
314 rx_data += xfer_len;
315 }
316
317 if (xfer == spi->last_xfer)
318 break;
319
320 xfer = list_next_entry(xfer, transfer_list);
321 }
322}
323
324static int gb_spi_transfer_one_message(struct spi_master *master,
325 struct spi_message *msg)
326{
327 struct gb_spilib *spi = spi_master_get_devdata(master);
328 struct gb_connection *connection = spi->connection;
329 struct gb_spi_transfer_response *response;
330 struct gb_operation *operation;
331 int ret = 0;
332
333 spi->first_xfer = list_first_entry_or_null(&msg->transfers,
334 struct spi_transfer,
335 transfer_list);
336 if (!spi->first_xfer) {
337 ret = -ENOMEM;
338 goto out;
339 }
340
341 msg->state = GB_SPI_STATE_MSG_IDLE;
342
343 while (msg->state != GB_SPI_STATE_MSG_DONE &&
344 msg->state != GB_SPI_STATE_MSG_ERROR) {
345 operation = gb_spi_operation_create(spi, connection, msg);
346 if (!operation) {
347 msg->state = GB_SPI_STATE_MSG_ERROR;
348 ret = -EINVAL;
349 continue;
350 }
351
352 ret = gb_operation_request_send_sync_timeout(operation,
353 spi->op_timeout);
354 if (!ret) {
355 response = operation->response->payload;
356 if (response)
357 gb_spi_decode_response(spi, msg, response);
358 } else {
359 dev_err(spi->parent,
360 "transfer operation failed: %d\n", ret);
361 msg->state = GB_SPI_STATE_MSG_ERROR;
362 }
363
364 gb_operation_put(operation);
365 setup_next_xfer(spi, msg);
366 }
367
368out:
369 msg->status = ret;
370 clean_xfer_state(spi);
371 spi_finalize_current_message(master);
372
373 return ret;
374}
375
376static int gb_spi_prepare_transfer_hardware(struct spi_master *master)
377{
378 struct gb_spilib *spi = spi_master_get_devdata(master);
379
380 return spi->ops->prepare_transfer_hardware(spi->parent);
381}
382
383static int gb_spi_unprepare_transfer_hardware(struct spi_master *master)
384{
385 struct gb_spilib *spi = spi_master_get_devdata(master);
386
387 spi->ops->unprepare_transfer_hardware(spi->parent);
388
389 return 0;
390}
391
392static int gb_spi_setup(struct spi_device *spi)
393{
394 /* Nothing to do for now */
395 return 0;
396}
397
398static void gb_spi_cleanup(struct spi_device *spi)
399{
400 /* Nothing to do for now */
401}
402
403/* Routines to get controller information */
404
405/*
406 * Map Greybus spi mode bits/flags/bpw into Linux ones.
407 * All bits are same for now and so these macro's return same values.
408 */
409#define gb_spi_mode_map(mode) mode
410#define gb_spi_flags_map(flags) flags
411
412static int gb_spi_get_master_config(struct gb_spilib *spi)
413{
414 struct gb_spi_master_config_response response;
415 u16 mode, flags;
416 int ret;
417
418 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_MASTER_CONFIG,
419 NULL, 0, &response, sizeof(response));
420 if (ret < 0)
421 return ret;
422
423 mode = le16_to_cpu(response.mode);
424 spi->mode = gb_spi_mode_map(mode);
425
426 flags = le16_to_cpu(response.flags);
427 spi->flags = gb_spi_flags_map(flags);
428
429 spi->bits_per_word_mask = le32_to_cpu(response.bits_per_word_mask);
430 spi->num_chipselect = response.num_chipselect;
431
432 spi->min_speed_hz = le32_to_cpu(response.min_speed_hz);
433 spi->max_speed_hz = le32_to_cpu(response.max_speed_hz);
434
435 return 0;
436}
437
438static int gb_spi_setup_device(struct gb_spilib *spi, u8 cs)
439{
440 struct spi_master *master = get_master_from_spi(spi);
441 struct gb_spi_device_config_request request;
442 struct gb_spi_device_config_response response;
443 struct spi_board_info spi_board = { {0} };
444 struct spi_device *spidev;
445 int ret;
446 u8 dev_type;
447
448 request.chip_select = cs;
449
450 ret = gb_operation_sync(spi->connection, GB_SPI_TYPE_DEVICE_CONFIG,
451 &request, sizeof(request),
452 &response, sizeof(response));
453 if (ret < 0)
454 return ret;
455
456 dev_type = response.device_type;
457
458 if (dev_type == GB_SPI_SPI_DEV)
459 strlcpy(spi_board.modalias, "spidev",
460 sizeof(spi_board.modalias));
461 else if (dev_type == GB_SPI_SPI_NOR)
462 strlcpy(spi_board.modalias, "spi-nor",
463 sizeof(spi_board.modalias));
464 else if (dev_type == GB_SPI_SPI_MODALIAS)
465 memcpy(spi_board.modalias, response.name,
466 sizeof(spi_board.modalias));
467 else
468 return -EINVAL;
469
470 spi_board.mode = le16_to_cpu(response.mode);
471 spi_board.bus_num = master->bus_num;
472 spi_board.chip_select = cs;
473 spi_board.max_speed_hz = le32_to_cpu(response.max_speed_hz);
474
475 spidev = spi_new_device(master, &spi_board);
476 if (!spidev)
477 return -EINVAL;
478
479 return 0;
480}
481
482int gb_spilib_master_init(struct gb_connection *connection, struct device *dev,
483 struct spilib_ops *ops)
484{
485 struct gb_spilib *spi;
486 struct spi_master *master;
487 int ret;
488 u8 i;
489
490 /* Allocate master with space for data */
491 master = spi_alloc_master(dev, sizeof(*spi));
492 if (!master) {
493 dev_err(dev, "cannot alloc SPI master\n");
494 return -ENOMEM;
495 }
496
497 spi = spi_master_get_devdata(master);
498 spi->connection = connection;
499 gb_connection_set_data(connection, master);
500 spi->parent = dev;
501 spi->ops = ops;
502
503 /* get master configuration */
504 ret = gb_spi_get_master_config(spi);
505 if (ret)
506 goto exit_spi_put;
507
508 master->bus_num = -1; /* Allow spi-core to allocate it dynamically */
509 master->num_chipselect = spi->num_chipselect;
510 master->mode_bits = spi->mode;
511 master->flags = spi->flags;
512 master->bits_per_word_mask = spi->bits_per_word_mask;
513
514 /* Attach methods */
515 master->cleanup = gb_spi_cleanup;
516 master->setup = gb_spi_setup;
517 master->transfer_one_message = gb_spi_transfer_one_message;
518
519 if (ops && ops->prepare_transfer_hardware) {
520 master->prepare_transfer_hardware =
521 gb_spi_prepare_transfer_hardware;
522 }
523
524 if (ops && ops->unprepare_transfer_hardware) {
525 master->unprepare_transfer_hardware =
526 gb_spi_unprepare_transfer_hardware;
527 }
528
529 master->auto_runtime_pm = true;
530
531 ret = spi_register_master(master);
532 if (ret < 0)
533 goto exit_spi_put;
534
535 /* now, fetch the devices configuration */
536 for (i = 0; i < spi->num_chipselect; i++) {
537 ret = gb_spi_setup_device(spi, i);
538 if (ret < 0) {
539 dev_err(dev, "failed to allocate spi device %d: %d\n",
540 i, ret);
541 goto exit_spi_unregister;
542 }
543 }
544
545 return 0;
546
547exit_spi_unregister:
548 spi_unregister_master(master);
549exit_spi_put:
550 spi_master_put(master);
551
552 return ret;
553}
554EXPORT_SYMBOL_GPL(gb_spilib_master_init);
555
556void gb_spilib_master_exit(struct gb_connection *connection)
557{
558 struct spi_master *master = gb_connection_get_data(connection);
559
560 spi_unregister_master(master);
561 spi_master_put(master);
562}
563EXPORT_SYMBOL_GPL(gb_spilib_master_exit);
564
565MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/spilib.h b/drivers/staging/greybus/spilib.h
new file mode 100644
index 000000000000..566d0dde7f79
--- /dev/null
+++ b/drivers/staging/greybus/spilib.h
@@ -0,0 +1,24 @@
1/*
2 * Greybus SPI library header
3 *
4 * copyright 2016 google inc.
5 * copyright 2016 linaro ltd.
6 *
7 * released under the gplv2 only.
8 */
9
10#ifndef __SPILIB_H
11#define __SPILIB_H
12
13struct device;
14struct gb_connection;
15
16struct spilib_ops {
17 int (*prepare_transfer_hardware)(struct device *dev);
18 void (*unprepare_transfer_hardware)(struct device *dev);
19};
20
21int gb_spilib_master_init(struct gb_connection *connection, struct device *dev, struct spilib_ops *ops);
22void gb_spilib_master_exit(struct gb_connection *connection);
23
24#endif /* __SPILIB_H */
diff --git a/drivers/staging/greybus/svc.c b/drivers/staging/greybus/svc.c
new file mode 100644
index 000000000000..550055ec27a5
--- /dev/null
+++ b/drivers/staging/greybus/svc.c
@@ -0,0 +1,1486 @@
1/*
2 * SVC Greybus driver.
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/debugfs.h>
11#include <linux/workqueue.h>
12
13#include "greybus.h"
14
15#define SVC_INTF_EJECT_TIMEOUT 9000
16#define SVC_INTF_ACTIVATE_TIMEOUT 6000
17#define SVC_INTF_RESUME_TIMEOUT 3000
18
19struct gb_svc_deferred_request {
20 struct work_struct work;
21 struct gb_operation *operation;
22};
23
24
25static int gb_svc_queue_deferred_request(struct gb_operation *operation);
26
27static ssize_t endo_id_show(struct device *dev,
28 struct device_attribute *attr, char *buf)
29{
30 struct gb_svc *svc = to_gb_svc(dev);
31
32 return sprintf(buf, "0x%04x\n", svc->endo_id);
33}
34static DEVICE_ATTR_RO(endo_id);
35
36static ssize_t ap_intf_id_show(struct device *dev,
37 struct device_attribute *attr, char *buf)
38{
39 struct gb_svc *svc = to_gb_svc(dev);
40
41 return sprintf(buf, "%u\n", svc->ap_intf_id);
42}
43static DEVICE_ATTR_RO(ap_intf_id);
44
45// FIXME
46// This is a hack, we need to do this "right" and clean the interface up
47// properly, not just forcibly yank the thing out of the system and hope for the
48// best. But for now, people want their modules to come out without having to
49// throw the thing to the ground or get out a screwdriver.
50static ssize_t intf_eject_store(struct device *dev,
51 struct device_attribute *attr, const char *buf,
52 size_t len)
53{
54 struct gb_svc *svc = to_gb_svc(dev);
55 unsigned short intf_id;
56 int ret;
57
58 ret = kstrtou16(buf, 10, &intf_id);
59 if (ret < 0)
60 return ret;
61
62 dev_warn(dev, "Forcibly trying to eject interface %d\n", intf_id);
63
64 ret = gb_svc_intf_eject(svc, intf_id);
65 if (ret < 0)
66 return ret;
67
68 return len;
69}
70static DEVICE_ATTR_WO(intf_eject);
71
72static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
73 char *buf)
74{
75 struct gb_svc *svc = to_gb_svc(dev);
76
77 return sprintf(buf, "%s\n",
78 gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
79}
80
81static ssize_t watchdog_store(struct device *dev,
82 struct device_attribute *attr, const char *buf,
83 size_t len)
84{
85 struct gb_svc *svc = to_gb_svc(dev);
86 int retval;
87 bool user_request;
88
89 retval = strtobool(buf, &user_request);
90 if (retval)
91 return retval;
92
93 if (user_request)
94 retval = gb_svc_watchdog_enable(svc);
95 else
96 retval = gb_svc_watchdog_disable(svc);
97 if (retval)
98 return retval;
99 return len;
100}
101static DEVICE_ATTR_RW(watchdog);
102
103static ssize_t watchdog_action_show(struct device *dev,
104 struct device_attribute *attr, char *buf)
105{
106 struct gb_svc *svc = to_gb_svc(dev);
107
108 if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL)
109 return sprintf(buf, "panic\n");
110 else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO)
111 return sprintf(buf, "reset\n");
112
113 return -EINVAL;
114}
115
116static ssize_t watchdog_action_store(struct device *dev,
117 struct device_attribute *attr,
118 const char *buf, size_t len)
119{
120 struct gb_svc *svc = to_gb_svc(dev);
121
122 if (sysfs_streq(buf, "panic"))
123 svc->action = GB_SVC_WATCHDOG_BITE_PANIC_KERNEL;
124 else if (sysfs_streq(buf, "reset"))
125 svc->action = GB_SVC_WATCHDOG_BITE_RESET_UNIPRO;
126 else
127 return -EINVAL;
128
129 return len;
130}
131static DEVICE_ATTR_RW(watchdog_action);
132
133static int gb_svc_pwrmon_rail_count_get(struct gb_svc *svc, u8 *value)
134{
135 struct gb_svc_pwrmon_rail_count_get_response response;
136 int ret;
137
138 ret = gb_operation_sync(svc->connection,
139 GB_SVC_TYPE_PWRMON_RAIL_COUNT_GET, NULL, 0,
140 &response, sizeof(response));
141 if (ret) {
142 dev_err(&svc->dev, "failed to get rail count: %d\n", ret);
143 return ret;
144 }
145
146 *value = response.rail_count;
147
148 return 0;
149}
150
151static int gb_svc_pwrmon_rail_names_get(struct gb_svc *svc,
152 struct gb_svc_pwrmon_rail_names_get_response *response,
153 size_t bufsize)
154{
155 int ret;
156
157 ret = gb_operation_sync(svc->connection,
158 GB_SVC_TYPE_PWRMON_RAIL_NAMES_GET, NULL, 0,
159 response, bufsize);
160 if (ret) {
161 dev_err(&svc->dev, "failed to get rail names: %d\n", ret);
162 return ret;
163 }
164
165 if (response->status != GB_SVC_OP_SUCCESS) {
166 dev_err(&svc->dev,
167 "SVC error while getting rail names: %u\n",
168 response->status);
169 return -EREMOTEIO;
170 }
171
172 return 0;
173}
174
175static int gb_svc_pwrmon_sample_get(struct gb_svc *svc, u8 rail_id,
176 u8 measurement_type, u32 *value)
177{
178 struct gb_svc_pwrmon_sample_get_request request;
179 struct gb_svc_pwrmon_sample_get_response response;
180 int ret;
181
182 request.rail_id = rail_id;
183 request.measurement_type = measurement_type;
184
185 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_PWRMON_SAMPLE_GET,
186 &request, sizeof(request),
187 &response, sizeof(response));
188 if (ret) {
189 dev_err(&svc->dev, "failed to get rail sample: %d\n", ret);
190 return ret;
191 }
192
193 if (response.result) {
194 dev_err(&svc->dev,
195 "UniPro error while getting rail power sample (%d %d): %d\n",
196 rail_id, measurement_type, response.result);
197 switch (response.result) {
198 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
199 return -EINVAL;
200 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
201 return -ENOMSG;
202 default:
203 return -EREMOTEIO;
204 }
205 }
206
207 *value = le32_to_cpu(response.measurement);
208
209 return 0;
210}
211
212int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
213 u8 measurement_type, u32 *value)
214{
215 struct gb_svc_pwrmon_intf_sample_get_request request;
216 struct gb_svc_pwrmon_intf_sample_get_response response;
217 int ret;
218
219 request.intf_id = intf_id;
220 request.measurement_type = measurement_type;
221
222 ret = gb_operation_sync(svc->connection,
223 GB_SVC_TYPE_PWRMON_INTF_SAMPLE_GET,
224 &request, sizeof(request),
225 &response, sizeof(response));
226 if (ret) {
227 dev_err(&svc->dev, "failed to get intf sample: %d\n", ret);
228 return ret;
229 }
230
231 if (response.result) {
232 dev_err(&svc->dev,
233 "UniPro error while getting intf power sample (%d %d): %d\n",
234 intf_id, measurement_type, response.result);
235 switch (response.result) {
236 case GB_SVC_PWRMON_GET_SAMPLE_INVAL:
237 return -EINVAL;
238 case GB_SVC_PWRMON_GET_SAMPLE_NOSUPP:
239 return -ENOMSG;
240 default:
241 return -EREMOTEIO;
242 }
243 }
244
245 *value = le32_to_cpu(response.measurement);
246
247 return 0;
248}
249
250static struct attribute *svc_attrs[] = {
251 &dev_attr_endo_id.attr,
252 &dev_attr_ap_intf_id.attr,
253 &dev_attr_intf_eject.attr,
254 &dev_attr_watchdog.attr,
255 &dev_attr_watchdog_action.attr,
256 NULL,
257};
258ATTRIBUTE_GROUPS(svc);
259
260int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id)
261{
262 struct gb_svc_intf_device_id_request request;
263
264 request.intf_id = intf_id;
265 request.device_id = device_id;
266
267 return gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_DEVICE_ID,
268 &request, sizeof(request), NULL, 0);
269}
270
271int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id)
272{
273 struct gb_svc_intf_eject_request request;
274 int ret;
275
276 request.intf_id = intf_id;
277
278 /*
279 * The pulse width for module release in svc is long so we need to
280 * increase the timeout so the operation will not return to soon.
281 */
282 ret = gb_operation_sync_timeout(svc->connection,
283 GB_SVC_TYPE_INTF_EJECT, &request,
284 sizeof(request), NULL, 0,
285 SVC_INTF_EJECT_TIMEOUT);
286 if (ret) {
287 dev_err(&svc->dev, "failed to eject interface %u\n", intf_id);
288 return ret;
289 }
290
291 return 0;
292}
293
294int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable)
295{
296 struct gb_svc_intf_vsys_request request;
297 struct gb_svc_intf_vsys_response response;
298 int type, ret;
299
300 request.intf_id = intf_id;
301
302 if (enable)
303 type = GB_SVC_TYPE_INTF_VSYS_ENABLE;
304 else
305 type = GB_SVC_TYPE_INTF_VSYS_DISABLE;
306
307 ret = gb_operation_sync(svc->connection, type,
308 &request, sizeof(request),
309 &response, sizeof(response));
310 if (ret < 0)
311 return ret;
312 if (response.result_code != GB_SVC_INTF_VSYS_OK)
313 return -EREMOTEIO;
314 return 0;
315}
316
317int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable)
318{
319 struct gb_svc_intf_refclk_request request;
320 struct gb_svc_intf_refclk_response response;
321 int type, ret;
322
323 request.intf_id = intf_id;
324
325 if (enable)
326 type = GB_SVC_TYPE_INTF_REFCLK_ENABLE;
327 else
328 type = GB_SVC_TYPE_INTF_REFCLK_DISABLE;
329
330 ret = gb_operation_sync(svc->connection, type,
331 &request, sizeof(request),
332 &response, sizeof(response));
333 if (ret < 0)
334 return ret;
335 if (response.result_code != GB_SVC_INTF_REFCLK_OK)
336 return -EREMOTEIO;
337 return 0;
338}
339
340int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable)
341{
342 struct gb_svc_intf_unipro_request request;
343 struct gb_svc_intf_unipro_response response;
344 int type, ret;
345
346 request.intf_id = intf_id;
347
348 if (enable)
349 type = GB_SVC_TYPE_INTF_UNIPRO_ENABLE;
350 else
351 type = GB_SVC_TYPE_INTF_UNIPRO_DISABLE;
352
353 ret = gb_operation_sync(svc->connection, type,
354 &request, sizeof(request),
355 &response, sizeof(response));
356 if (ret < 0)
357 return ret;
358 if (response.result_code != GB_SVC_INTF_UNIPRO_OK)
359 return -EREMOTEIO;
360 return 0;
361}
362
363int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type)
364{
365 struct gb_svc_intf_activate_request request;
366 struct gb_svc_intf_activate_response response;
367 int ret;
368
369 request.intf_id = intf_id;
370
371 ret = gb_operation_sync_timeout(svc->connection,
372 GB_SVC_TYPE_INTF_ACTIVATE,
373 &request, sizeof(request),
374 &response, sizeof(response),
375 SVC_INTF_ACTIVATE_TIMEOUT);
376 if (ret < 0)
377 return ret;
378 if (response.status != GB_SVC_OP_SUCCESS) {
379 dev_err(&svc->dev, "failed to activate interface %u: %u\n",
380 intf_id, response.status);
381 return -EREMOTEIO;
382 }
383
384 *intf_type = response.intf_type;
385
386 return 0;
387}
388
389int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id)
390{
391 struct gb_svc_intf_resume_request request;
392 struct gb_svc_intf_resume_response response;
393 int ret;
394
395 request.intf_id = intf_id;
396
397 ret = gb_operation_sync_timeout(svc->connection,
398 GB_SVC_TYPE_INTF_RESUME,
399 &request, sizeof(request),
400 &response, sizeof(response),
401 SVC_INTF_RESUME_TIMEOUT);
402 if (ret < 0) {
403 dev_err(&svc->dev, "failed to send interface resume %u: %d\n",
404 intf_id, ret);
405 return ret;
406 }
407
408 if (response.status != GB_SVC_OP_SUCCESS) {
409 dev_err(&svc->dev, "failed to resume interface %u: %u\n",
410 intf_id, response.status);
411 return -EREMOTEIO;
412 }
413
414 return 0;
415}
416
417int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
418 u32 *value)
419{
420 struct gb_svc_dme_peer_get_request request;
421 struct gb_svc_dme_peer_get_response response;
422 u16 result;
423 int ret;
424
425 request.intf_id = intf_id;
426 request.attr = cpu_to_le16(attr);
427 request.selector = cpu_to_le16(selector);
428
429 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_GET,
430 &request, sizeof(request),
431 &response, sizeof(response));
432 if (ret) {
433 dev_err(&svc->dev, "failed to get DME attribute (%u 0x%04x %u): %d\n",
434 intf_id, attr, selector, ret);
435 return ret;
436 }
437
438 result = le16_to_cpu(response.result_code);
439 if (result) {
440 dev_err(&svc->dev, "UniPro error while getting DME attribute (%u 0x%04x %u): %u\n",
441 intf_id, attr, selector, result);
442 return -EREMOTEIO;
443 }
444
445 if (value)
446 *value = le32_to_cpu(response.attr_value);
447
448 return 0;
449}
450
451int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
452 u32 value)
453{
454 struct gb_svc_dme_peer_set_request request;
455 struct gb_svc_dme_peer_set_response response;
456 u16 result;
457 int ret;
458
459 request.intf_id = intf_id;
460 request.attr = cpu_to_le16(attr);
461 request.selector = cpu_to_le16(selector);
462 request.value = cpu_to_le32(value);
463
464 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_DME_PEER_SET,
465 &request, sizeof(request),
466 &response, sizeof(response));
467 if (ret) {
468 dev_err(&svc->dev, "failed to set DME attribute (%u 0x%04x %u %u): %d\n",
469 intf_id, attr, selector, value, ret);
470 return ret;
471 }
472
473 result = le16_to_cpu(response.result_code);
474 if (result) {
475 dev_err(&svc->dev, "UniPro error while setting DME attribute (%u 0x%04x %u %u): %u\n",
476 intf_id, attr, selector, value, result);
477 return -EREMOTEIO;
478 }
479
480 return 0;
481}
482
483int gb_svc_connection_create(struct gb_svc *svc,
484 u8 intf1_id, u16 cport1_id,
485 u8 intf2_id, u16 cport2_id,
486 u8 cport_flags)
487{
488 struct gb_svc_conn_create_request request;
489
490 request.intf1_id = intf1_id;
491 request.cport1_id = cpu_to_le16(cport1_id);
492 request.intf2_id = intf2_id;
493 request.cport2_id = cpu_to_le16(cport2_id);
494 request.tc = 0; /* TC0 */
495 request.flags = cport_flags;
496
497 return gb_operation_sync(svc->connection, GB_SVC_TYPE_CONN_CREATE,
498 &request, sizeof(request), NULL, 0);
499}
500
501void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
502 u8 intf2_id, u16 cport2_id)
503{
504 struct gb_svc_conn_destroy_request request;
505 struct gb_connection *connection = svc->connection;
506 int ret;
507
508 request.intf1_id = intf1_id;
509 request.cport1_id = cpu_to_le16(cport1_id);
510 request.intf2_id = intf2_id;
511 request.cport2_id = cpu_to_le16(cport2_id);
512
513 ret = gb_operation_sync(connection, GB_SVC_TYPE_CONN_DESTROY,
514 &request, sizeof(request), NULL, 0);
515 if (ret) {
516 dev_err(&svc->dev, "failed to destroy connection (%u:%u %u:%u): %d\n",
517 intf1_id, cport1_id, intf2_id, cport2_id, ret);
518 }
519}
520
521int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
522 u32 strobe_delay, u32 refclk)
523{
524 struct gb_connection *connection = svc->connection;
525 struct gb_svc_timesync_enable_request request;
526
527 request.count = count;
528 request.frame_time = cpu_to_le64(frame_time);
529 request.strobe_delay = cpu_to_le32(strobe_delay);
530 request.refclk = cpu_to_le32(refclk);
531 return gb_operation_sync(connection,
532 GB_SVC_TYPE_TIMESYNC_ENABLE,
533 &request, sizeof(request), NULL, 0);
534}
535
536int gb_svc_timesync_disable(struct gb_svc *svc)
537{
538 struct gb_connection *connection = svc->connection;
539
540 return gb_operation_sync(connection,
541 GB_SVC_TYPE_TIMESYNC_DISABLE,
542 NULL, 0, NULL, 0);
543}
544
545int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time)
546{
547 struct gb_connection *connection = svc->connection;
548 struct gb_svc_timesync_authoritative_response response;
549 int ret, i;
550
551 ret = gb_operation_sync(connection,
552 GB_SVC_TYPE_TIMESYNC_AUTHORITATIVE, NULL, 0,
553 &response, sizeof(response));
554 if (ret < 0)
555 return ret;
556
557 for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
558 frame_time[i] = le64_to_cpu(response.frame_time[i]);
559 return 0;
560}
561
562int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time)
563{
564 struct gb_connection *connection = svc->connection;
565 struct gb_svc_timesync_ping_response response;
566 int ret;
567
568 ret = gb_operation_sync(connection,
569 GB_SVC_TYPE_TIMESYNC_PING,
570 NULL, 0,
571 &response, sizeof(response));
572 if (ret < 0)
573 return ret;
574
575 *frame_time = le64_to_cpu(response.frame_time);
576 return 0;
577}
578
579int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask)
580{
581 struct gb_connection *connection = svc->connection;
582 struct gb_svc_timesync_wake_pins_acquire_request request;
583
584 request.strobe_mask = cpu_to_le32(strobe_mask);
585 return gb_operation_sync(connection,
586 GB_SVC_TYPE_TIMESYNC_WAKE_PINS_ACQUIRE,
587 &request, sizeof(request),
588 NULL, 0);
589}
590
591int gb_svc_timesync_wake_pins_release(struct gb_svc *svc)
592{
593 struct gb_connection *connection = svc->connection;
594
595 return gb_operation_sync(connection,
596 GB_SVC_TYPE_TIMESYNC_WAKE_PINS_RELEASE,
597 NULL, 0, NULL, 0);
598}
599
600/* Creates bi-directional routes between the devices */
601int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
602 u8 intf2_id, u8 dev2_id)
603{
604 struct gb_svc_route_create_request request;
605
606 request.intf1_id = intf1_id;
607 request.dev1_id = dev1_id;
608 request.intf2_id = intf2_id;
609 request.dev2_id = dev2_id;
610
611 return gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_CREATE,
612 &request, sizeof(request), NULL, 0);
613}
614
615/* Destroys bi-directional routes between the devices */
616void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id)
617{
618 struct gb_svc_route_destroy_request request;
619 int ret;
620
621 request.intf1_id = intf1_id;
622 request.intf2_id = intf2_id;
623
624 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_ROUTE_DESTROY,
625 &request, sizeof(request), NULL, 0);
626 if (ret) {
627 dev_err(&svc->dev, "failed to destroy route (%u %u): %d\n",
628 intf1_id, intf2_id, ret);
629 }
630}
631
632int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
633 u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
634 u8 tx_amplitude, u8 tx_hs_equalizer,
635 u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
636 u8 flags, u32 quirks,
637 struct gb_svc_l2_timer_cfg *local,
638 struct gb_svc_l2_timer_cfg *remote)
639{
640 struct gb_svc_intf_set_pwrm_request request;
641 struct gb_svc_intf_set_pwrm_response response;
642 int ret;
643 u16 result_code;
644
645 memset(&request, 0, sizeof(request));
646
647 request.intf_id = intf_id;
648 request.hs_series = hs_series;
649 request.tx_mode = tx_mode;
650 request.tx_gear = tx_gear;
651 request.tx_nlanes = tx_nlanes;
652 request.tx_amplitude = tx_amplitude;
653 request.tx_hs_equalizer = tx_hs_equalizer;
654 request.rx_mode = rx_mode;
655 request.rx_gear = rx_gear;
656 request.rx_nlanes = rx_nlanes;
657 request.flags = flags;
658 request.quirks = cpu_to_le32(quirks);
659 if (local)
660 request.local_l2timerdata = *local;
661 if (remote)
662 request.remote_l2timerdata = *remote;
663
664 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
665 &request, sizeof(request),
666 &response, sizeof(response));
667 if (ret < 0)
668 return ret;
669
670 result_code = response.result_code;
671 if (result_code != GB_SVC_SETPWRM_PWR_LOCAL) {
672 dev_err(&svc->dev, "set power mode = %d\n", result_code);
673 return -EIO;
674 }
675
676 return 0;
677}
678EXPORT_SYMBOL_GPL(gb_svc_intf_set_power_mode);
679
680int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id)
681{
682 struct gb_svc_intf_set_pwrm_request request;
683 struct gb_svc_intf_set_pwrm_response response;
684 int ret;
685 u16 result_code;
686
687 memset(&request, 0, sizeof(request));
688
689 request.intf_id = intf_id;
690 request.hs_series = GB_SVC_UNIPRO_HS_SERIES_A;
691 request.tx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
692 request.rx_mode = GB_SVC_UNIPRO_HIBERNATE_MODE;
693
694 ret = gb_operation_sync(svc->connection, GB_SVC_TYPE_INTF_SET_PWRM,
695 &request, sizeof(request),
696 &response, sizeof(response));
697 if (ret < 0) {
698 dev_err(&svc->dev,
699 "failed to send set power mode operation to interface %u: %d\n",
700 intf_id, ret);
701 return ret;
702 }
703
704 result_code = response.result_code;
705 if (result_code != GB_SVC_SETPWRM_PWR_OK) {
706 dev_err(&svc->dev,
707 "failed to hibernate the link for interface %u: %u\n",
708 intf_id, result_code);
709 return -EIO;
710 }
711
712 return 0;
713}
714
715int gb_svc_ping(struct gb_svc *svc)
716{
717 return gb_operation_sync_timeout(svc->connection, GB_SVC_TYPE_PING,
718 NULL, 0, NULL, 0,
719 GB_OPERATION_TIMEOUT_DEFAULT * 2);
720}
721
722static int gb_svc_version_request(struct gb_operation *op)
723{
724 struct gb_connection *connection = op->connection;
725 struct gb_svc *svc = gb_connection_get_data(connection);
726 struct gb_svc_version_request *request;
727 struct gb_svc_version_response *response;
728
729 if (op->request->payload_size < sizeof(*request)) {
730 dev_err(&svc->dev, "short version request (%zu < %zu)\n",
731 op->request->payload_size,
732 sizeof(*request));
733 return -EINVAL;
734 }
735
736 request = op->request->payload;
737
738 if (request->major > GB_SVC_VERSION_MAJOR) {
739 dev_warn(&svc->dev, "unsupported major version (%u > %u)\n",
740 request->major, GB_SVC_VERSION_MAJOR);
741 return -ENOTSUPP;
742 }
743
744 svc->protocol_major = request->major;
745 svc->protocol_minor = request->minor;
746
747 if (!gb_operation_response_alloc(op, sizeof(*response), GFP_KERNEL))
748 return -ENOMEM;
749
750 response = op->response->payload;
751 response->major = svc->protocol_major;
752 response->minor = svc->protocol_minor;
753
754 return 0;
755}
756
757static ssize_t pwr_debugfs_voltage_read(struct file *file, char __user *buf,
758 size_t len, loff_t *offset)
759{
760 struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
761 struct gb_svc *svc = pwrmon_rails->svc;
762 int ret, desc;
763 u32 value;
764 char buff[16];
765
766 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
767 GB_SVC_PWRMON_TYPE_VOL, &value);
768 if (ret) {
769 dev_err(&svc->dev,
770 "failed to get voltage sample %u: %d\n",
771 pwrmon_rails->id, ret);
772 return ret;
773 }
774
775 desc = scnprintf(buff, sizeof(buff), "%u\n", value);
776
777 return simple_read_from_buffer(buf, len, offset, buff, desc);
778}
779
780static ssize_t pwr_debugfs_current_read(struct file *file, char __user *buf,
781 size_t len, loff_t *offset)
782{
783 struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
784 struct gb_svc *svc = pwrmon_rails->svc;
785 int ret, desc;
786 u32 value;
787 char buff[16];
788
789 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
790 GB_SVC_PWRMON_TYPE_CURR, &value);
791 if (ret) {
792 dev_err(&svc->dev,
793 "failed to get current sample %u: %d\n",
794 pwrmon_rails->id, ret);
795 return ret;
796 }
797
798 desc = scnprintf(buff, sizeof(buff), "%u\n", value);
799
800 return simple_read_from_buffer(buf, len, offset, buff, desc);
801}
802
803static ssize_t pwr_debugfs_power_read(struct file *file, char __user *buf,
804 size_t len, loff_t *offset)
805{
806 struct svc_debugfs_pwrmon_rail *pwrmon_rails = file->f_inode->i_private;
807 struct gb_svc *svc = pwrmon_rails->svc;
808 int ret, desc;
809 u32 value;
810 char buff[16];
811
812 ret = gb_svc_pwrmon_sample_get(svc, pwrmon_rails->id,
813 GB_SVC_PWRMON_TYPE_PWR, &value);
814 if (ret) {
815 dev_err(&svc->dev, "failed to get power sample %u: %d\n",
816 pwrmon_rails->id, ret);
817 return ret;
818 }
819
820 desc = scnprintf(buff, sizeof(buff), "%u\n", value);
821
822 return simple_read_from_buffer(buf, len, offset, buff, desc);
823}
824
825static const struct file_operations pwrmon_debugfs_voltage_fops = {
826 .read = pwr_debugfs_voltage_read,
827};
828
829static const struct file_operations pwrmon_debugfs_current_fops = {
830 .read = pwr_debugfs_current_read,
831};
832
833static const struct file_operations pwrmon_debugfs_power_fops = {
834 .read = pwr_debugfs_power_read,
835};
836
837static void gb_svc_pwrmon_debugfs_init(struct gb_svc *svc)
838{
839 int i;
840 size_t bufsize;
841 struct dentry *dent;
842 struct gb_svc_pwrmon_rail_names_get_response *rail_names;
843 u8 rail_count;
844
845 dent = debugfs_create_dir("pwrmon", svc->debugfs_dentry);
846 if (IS_ERR_OR_NULL(dent))
847 return;
848
849 if (gb_svc_pwrmon_rail_count_get(svc, &rail_count))
850 goto err_pwrmon_debugfs;
851
852 if (!rail_count || rail_count > GB_SVC_PWRMON_MAX_RAIL_COUNT)
853 goto err_pwrmon_debugfs;
854
855 bufsize = sizeof(*rail_names) +
856 GB_SVC_PWRMON_RAIL_NAME_BUFSIZE * rail_count;
857
858 rail_names = kzalloc(bufsize, GFP_KERNEL);
859 if (!rail_names)
860 goto err_pwrmon_debugfs;
861
862 svc->pwrmon_rails = kcalloc(rail_count, sizeof(*svc->pwrmon_rails),
863 GFP_KERNEL);
864 if (!svc->pwrmon_rails)
865 goto err_pwrmon_debugfs_free;
866
867 if (gb_svc_pwrmon_rail_names_get(svc, rail_names, bufsize))
868 goto err_pwrmon_debugfs_free;
869
870 for (i = 0; i < rail_count; i++) {
871 struct dentry *dir;
872 struct svc_debugfs_pwrmon_rail *rail = &svc->pwrmon_rails[i];
873 char fname[GB_SVC_PWRMON_RAIL_NAME_BUFSIZE];
874
875 snprintf(fname, sizeof(fname), "%s",
876 (char *)&rail_names->name[i]);
877
878 rail->id = i;
879 rail->svc = svc;
880
881 dir = debugfs_create_dir(fname, dent);
882 debugfs_create_file("voltage_now", S_IRUGO, dir, rail,
883 &pwrmon_debugfs_voltage_fops);
884 debugfs_create_file("current_now", S_IRUGO, dir, rail,
885 &pwrmon_debugfs_current_fops);
886 debugfs_create_file("power_now", S_IRUGO, dir, rail,
887 &pwrmon_debugfs_power_fops);
888 }
889
890 kfree(rail_names);
891 return;
892
893err_pwrmon_debugfs_free:
894 kfree(rail_names);
895 kfree(svc->pwrmon_rails);
896 svc->pwrmon_rails = NULL;
897
898err_pwrmon_debugfs:
899 debugfs_remove(dent);
900}
901
902static void gb_svc_debugfs_init(struct gb_svc *svc)
903{
904 svc->debugfs_dentry = debugfs_create_dir(dev_name(&svc->dev),
905 gb_debugfs_get());
906 gb_svc_pwrmon_debugfs_init(svc);
907}
908
909static void gb_svc_debugfs_exit(struct gb_svc *svc)
910{
911 debugfs_remove_recursive(svc->debugfs_dentry);
912 kfree(svc->pwrmon_rails);
913 svc->pwrmon_rails = NULL;
914}
915
916static int gb_svc_hello(struct gb_operation *op)
917{
918 struct gb_connection *connection = op->connection;
919 struct gb_svc *svc = gb_connection_get_data(connection);
920 struct gb_svc_hello_request *hello_request;
921 int ret;
922
923 if (op->request->payload_size < sizeof(*hello_request)) {
924 dev_warn(&svc->dev, "short hello request (%zu < %zu)\n",
925 op->request->payload_size,
926 sizeof(*hello_request));
927 return -EINVAL;
928 }
929
930 hello_request = op->request->payload;
931 svc->endo_id = le16_to_cpu(hello_request->endo_id);
932 svc->ap_intf_id = hello_request->interface_id;
933
934 ret = device_add(&svc->dev);
935 if (ret) {
936 dev_err(&svc->dev, "failed to register svc device: %d\n", ret);
937 return ret;
938 }
939
940 ret = gb_svc_watchdog_create(svc);
941 if (ret) {
942 dev_err(&svc->dev, "failed to create watchdog: %d\n", ret);
943 goto err_unregister_device;
944 }
945
946 gb_svc_debugfs_init(svc);
947
948 ret = gb_timesync_svc_add(svc);
949 if (ret) {
950 dev_err(&svc->dev, "failed to add SVC to timesync: %d\n", ret);
951 gb_svc_debugfs_exit(svc);
952 goto err_unregister_device;
953 }
954
955 return gb_svc_queue_deferred_request(op);
956
957err_unregister_device:
958 gb_svc_watchdog_destroy(svc);
959 device_del(&svc->dev);
960 return ret;
961}
962
963static struct gb_interface *gb_svc_interface_lookup(struct gb_svc *svc,
964 u8 intf_id)
965{
966 struct gb_host_device *hd = svc->hd;
967 struct gb_module *module;
968 size_t num_interfaces;
969 u8 module_id;
970
971 list_for_each_entry(module, &hd->modules, hd_node) {
972 module_id = module->module_id;
973 num_interfaces = module->num_interfaces;
974
975 if (intf_id >= module_id &&
976 intf_id < module_id + num_interfaces) {
977 return module->interfaces[intf_id - module_id];
978 }
979 }
980
981 return NULL;
982}
983
984static struct gb_module *gb_svc_module_lookup(struct gb_svc *svc, u8 module_id)
985{
986 struct gb_host_device *hd = svc->hd;
987 struct gb_module *module;
988
989 list_for_each_entry(module, &hd->modules, hd_node) {
990 if (module->module_id == module_id)
991 return module;
992 }
993
994 return NULL;
995}
996
997static void gb_svc_process_hello_deferred(struct gb_operation *operation)
998{
999 struct gb_connection *connection = operation->connection;
1000 struct gb_svc *svc = gb_connection_get_data(connection);
1001 int ret;
1002
1003 /*
1004 * XXX This is a hack/work-around to reconfigure the APBridgeA-Switch
1005 * link to PWM G2, 1 Lane, Slow Auto, so that it has sufficient
1006 * bandwidth for 3 audio streams plus boot-over-UniPro of a hot-plugged
1007 * module.
1008 *
1009 * The code should be removed once SW-2217, Heuristic for UniPro
1010 * Power Mode Changes is resolved.
1011 */
1012 ret = gb_svc_intf_set_power_mode(svc, svc->ap_intf_id,
1013 GB_SVC_UNIPRO_HS_SERIES_A,
1014 GB_SVC_UNIPRO_SLOW_AUTO_MODE,
1015 2, 1,
1016 GB_SVC_SMALL_AMPLITUDE, GB_SVC_NO_DE_EMPHASIS,
1017 GB_SVC_UNIPRO_SLOW_AUTO_MODE,
1018 2, 1,
1019 0, 0,
1020 NULL, NULL);
1021
1022 if (ret)
1023 dev_warn(&svc->dev,
1024 "power mode change failed on AP to switch link: %d\n",
1025 ret);
1026}
1027
1028static void gb_svc_process_module_inserted(struct gb_operation *operation)
1029{
1030 struct gb_svc_module_inserted_request *request;
1031 struct gb_connection *connection = operation->connection;
1032 struct gb_svc *svc = gb_connection_get_data(connection);
1033 struct gb_host_device *hd = svc->hd;
1034 struct gb_module *module;
1035 size_t num_interfaces;
1036 u8 module_id;
1037 u16 flags;
1038 int ret;
1039
1040 /* The request message size has already been verified. */
1041 request = operation->request->payload;
1042 module_id = request->primary_intf_id;
1043 num_interfaces = request->intf_count;
1044 flags = le16_to_cpu(request->flags);
1045
1046 dev_dbg(&svc->dev, "%s - id = %u, num_interfaces = %zu, flags = 0x%04x\n",
1047 __func__, module_id, num_interfaces, flags);
1048
1049 if (flags & GB_SVC_MODULE_INSERTED_FLAG_NO_PRIMARY) {
1050 dev_warn(&svc->dev, "no primary interface detected on module %u\n",
1051 module_id);
1052 }
1053
1054 module = gb_svc_module_lookup(svc, module_id);
1055 if (module) {
1056 dev_warn(&svc->dev, "unexpected module-inserted event %u\n",
1057 module_id);
1058 return;
1059 }
1060
1061 module = gb_module_create(hd, module_id, num_interfaces);
1062 if (!module) {
1063 dev_err(&svc->dev, "failed to create module\n");
1064 return;
1065 }
1066
1067 ret = gb_module_add(module);
1068 if (ret) {
1069 gb_module_put(module);
1070 return;
1071 }
1072
1073 list_add(&module->hd_node, &hd->modules);
1074}
1075
1076static void gb_svc_process_module_removed(struct gb_operation *operation)
1077{
1078 struct gb_svc_module_removed_request *request;
1079 struct gb_connection *connection = operation->connection;
1080 struct gb_svc *svc = gb_connection_get_data(connection);
1081 struct gb_module *module;
1082 u8 module_id;
1083
1084 /* The request message size has already been verified. */
1085 request = operation->request->payload;
1086 module_id = request->primary_intf_id;
1087
1088 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, module_id);
1089
1090 module = gb_svc_module_lookup(svc, module_id);
1091 if (!module) {
1092 dev_warn(&svc->dev, "unexpected module-removed event %u\n",
1093 module_id);
1094 return;
1095 }
1096
1097 module->disconnected = true;
1098
1099 gb_module_del(module);
1100 list_del(&module->hd_node);
1101 gb_module_put(module);
1102}
1103
1104static void gb_svc_process_intf_oops(struct gb_operation *operation)
1105{
1106 struct gb_svc_intf_oops_request *request;
1107 struct gb_connection *connection = operation->connection;
1108 struct gb_svc *svc = gb_connection_get_data(connection);
1109 struct gb_interface *intf;
1110 u8 intf_id;
1111 u8 reason;
1112
1113 /* The request message size has already been verified. */
1114 request = operation->request->payload;
1115 intf_id = request->intf_id;
1116 reason = request->reason;
1117
1118 intf = gb_svc_interface_lookup(svc, intf_id);
1119 if (!intf) {
1120 dev_warn(&svc->dev, "unexpected interface-oops event %u\n",
1121 intf_id);
1122 return;
1123 }
1124
1125 dev_info(&svc->dev, "Deactivating interface %u, interface oops reason = %u\n",
1126 intf_id, reason);
1127
1128 mutex_lock(&intf->mutex);
1129 intf->disconnected = true;
1130 gb_interface_disable(intf);
1131 gb_interface_deactivate(intf);
1132 mutex_unlock(&intf->mutex);
1133}
1134
1135static void gb_svc_process_intf_mailbox_event(struct gb_operation *operation)
1136{
1137 struct gb_svc_intf_mailbox_event_request *request;
1138 struct gb_connection *connection = operation->connection;
1139 struct gb_svc *svc = gb_connection_get_data(connection);
1140 struct gb_interface *intf;
1141 u8 intf_id;
1142 u16 result_code;
1143 u32 mailbox;
1144
1145 /* The request message size has already been verified. */
1146 request = operation->request->payload;
1147 intf_id = request->intf_id;
1148 result_code = le16_to_cpu(request->result_code);
1149 mailbox = le32_to_cpu(request->mailbox);
1150
1151 dev_dbg(&svc->dev, "%s - id = %u, result = 0x%04x, mailbox = 0x%08x\n",
1152 __func__, intf_id, result_code, mailbox);
1153
1154 intf = gb_svc_interface_lookup(svc, intf_id);
1155 if (!intf) {
1156 dev_warn(&svc->dev, "unexpected mailbox event %u\n", intf_id);
1157 return;
1158 }
1159
1160 gb_interface_mailbox_event(intf, result_code, mailbox);
1161}
1162
1163static void gb_svc_process_deferred_request(struct work_struct *work)
1164{
1165 struct gb_svc_deferred_request *dr;
1166 struct gb_operation *operation;
1167 struct gb_svc *svc;
1168 u8 type;
1169
1170 dr = container_of(work, struct gb_svc_deferred_request, work);
1171 operation = dr->operation;
1172 svc = gb_connection_get_data(operation->connection);
1173 type = operation->request->header->type;
1174
1175 switch (type) {
1176 case GB_SVC_TYPE_SVC_HELLO:
1177 gb_svc_process_hello_deferred(operation);
1178 break;
1179 case GB_SVC_TYPE_MODULE_INSERTED:
1180 gb_svc_process_module_inserted(operation);
1181 break;
1182 case GB_SVC_TYPE_MODULE_REMOVED:
1183 gb_svc_process_module_removed(operation);
1184 break;
1185 case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1186 gb_svc_process_intf_mailbox_event(operation);
1187 break;
1188 case GB_SVC_TYPE_INTF_OOPS:
1189 gb_svc_process_intf_oops(operation);
1190 break;
1191 default:
1192 dev_err(&svc->dev, "bad deferred request type: 0x%02x\n", type);
1193 }
1194
1195 gb_operation_put(operation);
1196 kfree(dr);
1197}
1198
1199static int gb_svc_queue_deferred_request(struct gb_operation *operation)
1200{
1201 struct gb_svc *svc = gb_connection_get_data(operation->connection);
1202 struct gb_svc_deferred_request *dr;
1203
1204 dr = kmalloc(sizeof(*dr), GFP_KERNEL);
1205 if (!dr)
1206 return -ENOMEM;
1207
1208 gb_operation_get(operation);
1209
1210 dr->operation = operation;
1211 INIT_WORK(&dr->work, gb_svc_process_deferred_request);
1212
1213 queue_work(svc->wq, &dr->work);
1214
1215 return 0;
1216}
1217
1218static int gb_svc_intf_reset_recv(struct gb_operation *op)
1219{
1220 struct gb_svc *svc = gb_connection_get_data(op->connection);
1221 struct gb_message *request = op->request;
1222 struct gb_svc_intf_reset_request *reset;
1223 u8 intf_id;
1224
1225 if (request->payload_size < sizeof(*reset)) {
1226 dev_warn(&svc->dev, "short reset request received (%zu < %zu)\n",
1227 request->payload_size, sizeof(*reset));
1228 return -EINVAL;
1229 }
1230 reset = request->payload;
1231
1232 intf_id = reset->intf_id;
1233
1234 /* FIXME Reset the interface here */
1235
1236 return 0;
1237}
1238
1239static int gb_svc_module_inserted_recv(struct gb_operation *op)
1240{
1241 struct gb_svc *svc = gb_connection_get_data(op->connection);
1242 struct gb_svc_module_inserted_request *request;
1243
1244 if (op->request->payload_size < sizeof(*request)) {
1245 dev_warn(&svc->dev, "short module-inserted request received (%zu < %zu)\n",
1246 op->request->payload_size, sizeof(*request));
1247 return -EINVAL;
1248 }
1249
1250 request = op->request->payload;
1251
1252 dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1253 request->primary_intf_id);
1254
1255 return gb_svc_queue_deferred_request(op);
1256}
1257
1258static int gb_svc_module_removed_recv(struct gb_operation *op)
1259{
1260 struct gb_svc *svc = gb_connection_get_data(op->connection);
1261 struct gb_svc_module_removed_request *request;
1262
1263 if (op->request->payload_size < sizeof(*request)) {
1264 dev_warn(&svc->dev, "short module-removed request received (%zu < %zu)\n",
1265 op->request->payload_size, sizeof(*request));
1266 return -EINVAL;
1267 }
1268
1269 request = op->request->payload;
1270
1271 dev_dbg(&svc->dev, "%s - id = %u\n", __func__,
1272 request->primary_intf_id);
1273
1274 return gb_svc_queue_deferred_request(op);
1275}
1276
1277static int gb_svc_intf_oops_recv(struct gb_operation *op)
1278{
1279 struct gb_svc *svc = gb_connection_get_data(op->connection);
1280 struct gb_svc_intf_oops_request *request;
1281
1282 if (op->request->payload_size < sizeof(*request)) {
1283 dev_warn(&svc->dev, "short intf-oops request received (%zu < %zu)\n",
1284 op->request->payload_size, sizeof(*request));
1285 return -EINVAL;
1286 }
1287
1288 return gb_svc_queue_deferred_request(op);
1289}
1290
1291static int gb_svc_intf_mailbox_event_recv(struct gb_operation *op)
1292{
1293 struct gb_svc *svc = gb_connection_get_data(op->connection);
1294 struct gb_svc_intf_mailbox_event_request *request;
1295
1296 if (op->request->payload_size < sizeof(*request)) {
1297 dev_warn(&svc->dev, "short mailbox request received (%zu < %zu)\n",
1298 op->request->payload_size, sizeof(*request));
1299 return -EINVAL;
1300 }
1301
1302 request = op->request->payload;
1303
1304 dev_dbg(&svc->dev, "%s - id = %u\n", __func__, request->intf_id);
1305
1306 return gb_svc_queue_deferred_request(op);
1307}
1308
1309static int gb_svc_request_handler(struct gb_operation *op)
1310{
1311 struct gb_connection *connection = op->connection;
1312 struct gb_svc *svc = gb_connection_get_data(connection);
1313 u8 type = op->type;
1314 int ret = 0;
1315
1316 /*
1317 * SVC requests need to follow a specific order (at least initially) and
1318 * below code takes care of enforcing that. The expected order is:
1319 * - PROTOCOL_VERSION
1320 * - SVC_HELLO
1321 * - Any other request, but the earlier two.
1322 *
1323 * Incoming requests are guaranteed to be serialized and so we don't
1324 * need to protect 'state' for any races.
1325 */
1326 switch (type) {
1327 case GB_SVC_TYPE_PROTOCOL_VERSION:
1328 if (svc->state != GB_SVC_STATE_RESET)
1329 ret = -EINVAL;
1330 break;
1331 case GB_SVC_TYPE_SVC_HELLO:
1332 if (svc->state != GB_SVC_STATE_PROTOCOL_VERSION)
1333 ret = -EINVAL;
1334 break;
1335 default:
1336 if (svc->state != GB_SVC_STATE_SVC_HELLO)
1337 ret = -EINVAL;
1338 break;
1339 }
1340
1341 if (ret) {
1342 dev_warn(&svc->dev, "unexpected request 0x%02x received (state %u)\n",
1343 type, svc->state);
1344 return ret;
1345 }
1346
1347 switch (type) {
1348 case GB_SVC_TYPE_PROTOCOL_VERSION:
1349 ret = gb_svc_version_request(op);
1350 if (!ret)
1351 svc->state = GB_SVC_STATE_PROTOCOL_VERSION;
1352 return ret;
1353 case GB_SVC_TYPE_SVC_HELLO:
1354 ret = gb_svc_hello(op);
1355 if (!ret)
1356 svc->state = GB_SVC_STATE_SVC_HELLO;
1357 return ret;
1358 case GB_SVC_TYPE_INTF_RESET:
1359 return gb_svc_intf_reset_recv(op);
1360 case GB_SVC_TYPE_MODULE_INSERTED:
1361 return gb_svc_module_inserted_recv(op);
1362 case GB_SVC_TYPE_MODULE_REMOVED:
1363 return gb_svc_module_removed_recv(op);
1364 case GB_SVC_TYPE_INTF_MAILBOX_EVENT:
1365 return gb_svc_intf_mailbox_event_recv(op);
1366 case GB_SVC_TYPE_INTF_OOPS:
1367 return gb_svc_intf_oops_recv(op);
1368 default:
1369 dev_warn(&svc->dev, "unsupported request 0x%02x\n", type);
1370 return -EINVAL;
1371 }
1372}
1373
1374static void gb_svc_release(struct device *dev)
1375{
1376 struct gb_svc *svc = to_gb_svc(dev);
1377
1378 if (svc->connection)
1379 gb_connection_destroy(svc->connection);
1380 ida_destroy(&svc->device_id_map);
1381 destroy_workqueue(svc->wq);
1382 kfree(svc);
1383}
1384
1385struct device_type greybus_svc_type = {
1386 .name = "greybus_svc",
1387 .release = gb_svc_release,
1388};
1389
1390struct gb_svc *gb_svc_create(struct gb_host_device *hd)
1391{
1392 struct gb_svc *svc;
1393
1394 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
1395 if (!svc)
1396 return NULL;
1397
1398 svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev));
1399 if (!svc->wq) {
1400 kfree(svc);
1401 return NULL;
1402 }
1403
1404 svc->dev.parent = &hd->dev;
1405 svc->dev.bus = &greybus_bus_type;
1406 svc->dev.type = &greybus_svc_type;
1407 svc->dev.groups = svc_groups;
1408 svc->dev.dma_mask = svc->dev.parent->dma_mask;
1409 device_initialize(&svc->dev);
1410
1411 dev_set_name(&svc->dev, "%d-svc", hd->bus_id);
1412
1413 ida_init(&svc->device_id_map);
1414 svc->state = GB_SVC_STATE_RESET;
1415 svc->hd = hd;
1416
1417 svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID,
1418 gb_svc_request_handler);
1419 if (IS_ERR(svc->connection)) {
1420 dev_err(&svc->dev, "failed to create connection: %ld\n",
1421 PTR_ERR(svc->connection));
1422 goto err_put_device;
1423 }
1424
1425 gb_connection_set_data(svc->connection, svc);
1426
1427 return svc;
1428
1429err_put_device:
1430 put_device(&svc->dev);
1431 return NULL;
1432}
1433
1434int gb_svc_add(struct gb_svc *svc)
1435{
1436 int ret;
1437
1438 /*
1439 * The SVC protocol is currently driven by the SVC, so the SVC device
1440 * is added from the connection request handler when enough
1441 * information has been received.
1442 */
1443 ret = gb_connection_enable(svc->connection);
1444 if (ret)
1445 return ret;
1446
1447 return 0;
1448}
1449
1450static void gb_svc_remove_modules(struct gb_svc *svc)
1451{
1452 struct gb_host_device *hd = svc->hd;
1453 struct gb_module *module, *tmp;
1454
1455 list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) {
1456 gb_module_del(module);
1457 list_del(&module->hd_node);
1458 gb_module_put(module);
1459 }
1460}
1461
1462void gb_svc_del(struct gb_svc *svc)
1463{
1464 gb_connection_disable_rx(svc->connection);
1465
1466 /*
1467 * The SVC device may have been registered from the request handler.
1468 */
1469 if (device_is_registered(&svc->dev)) {
1470 gb_timesync_svc_remove(svc);
1471 gb_svc_debugfs_exit(svc);
1472 gb_svc_watchdog_destroy(svc);
1473 device_del(&svc->dev);
1474 }
1475
1476 flush_workqueue(svc->wq);
1477
1478 gb_svc_remove_modules(svc);
1479
1480 gb_connection_disable(svc->connection);
1481}
1482
1483void gb_svc_put(struct gb_svc *svc)
1484{
1485 put_device(&svc->dev);
1486}
diff --git a/drivers/staging/greybus/svc.h b/drivers/staging/greybus/svc.h
new file mode 100644
index 000000000000..d1d7ef967385
--- /dev/null
+++ b/drivers/staging/greybus/svc.h
@@ -0,0 +1,109 @@
1/*
2 * Greybus SVC code
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __SVC_H
11#define __SVC_H
12
13#define GB_SVC_CPORT_FLAG_E2EFC BIT(0)
14#define GB_SVC_CPORT_FLAG_CSD_N BIT(1)
15#define GB_SVC_CPORT_FLAG_CSV_N BIT(2)
16
17enum gb_svc_state {
18 GB_SVC_STATE_RESET,
19 GB_SVC_STATE_PROTOCOL_VERSION,
20 GB_SVC_STATE_SVC_HELLO,
21};
22
23enum gb_svc_watchdog_bite {
24 GB_SVC_WATCHDOG_BITE_RESET_UNIPRO = 0,
25 GB_SVC_WATCHDOG_BITE_PANIC_KERNEL,
26};
27
28struct gb_svc_watchdog;
29
30struct svc_debugfs_pwrmon_rail {
31 u8 id;
32 struct gb_svc *svc;
33};
34
35struct gb_svc {
36 struct device dev;
37
38 struct gb_host_device *hd;
39 struct gb_connection *connection;
40 enum gb_svc_state state;
41 struct ida device_id_map;
42 struct workqueue_struct *wq;
43
44 u16 endo_id;
45 u8 ap_intf_id;
46
47 u8 protocol_major;
48 u8 protocol_minor;
49
50 struct gb_svc_watchdog *watchdog;
51 enum gb_svc_watchdog_bite action;
52
53 struct dentry *debugfs_dentry;
54 struct svc_debugfs_pwrmon_rail *pwrmon_rails;
55};
56#define to_gb_svc(d) container_of(d, struct gb_svc, dev)
57
58struct gb_svc *gb_svc_create(struct gb_host_device *hd);
59int gb_svc_add(struct gb_svc *svc);
60void gb_svc_del(struct gb_svc *svc);
61void gb_svc_put(struct gb_svc *svc);
62
63int gb_svc_pwrmon_intf_sample_get(struct gb_svc *svc, u8 intf_id,
64 u8 measurement_type, u32 *value);
65int gb_svc_intf_device_id(struct gb_svc *svc, u8 intf_id, u8 device_id);
66int gb_svc_route_create(struct gb_svc *svc, u8 intf1_id, u8 dev1_id,
67 u8 intf2_id, u8 dev2_id);
68void gb_svc_route_destroy(struct gb_svc *svc, u8 intf1_id, u8 intf2_id);
69int gb_svc_connection_create(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
70 u8 intf2_id, u16 cport2_id, u8 cport_flags);
71void gb_svc_connection_destroy(struct gb_svc *svc, u8 intf1_id, u16 cport1_id,
72 u8 intf2_id, u16 cport2_id);
73int gb_svc_intf_eject(struct gb_svc *svc, u8 intf_id);
74int gb_svc_intf_vsys_set(struct gb_svc *svc, u8 intf_id, bool enable);
75int gb_svc_intf_refclk_set(struct gb_svc *svc, u8 intf_id, bool enable);
76int gb_svc_intf_unipro_set(struct gb_svc *svc, u8 intf_id, bool enable);
77int gb_svc_intf_activate(struct gb_svc *svc, u8 intf_id, u8 *intf_type);
78int gb_svc_intf_resume(struct gb_svc *svc, u8 intf_id);
79
80int gb_svc_dme_peer_get(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
81 u32 *value);
82int gb_svc_dme_peer_set(struct gb_svc *svc, u8 intf_id, u16 attr, u16 selector,
83 u32 value);
84int gb_svc_intf_set_power_mode(struct gb_svc *svc, u8 intf_id, u8 hs_series,
85 u8 tx_mode, u8 tx_gear, u8 tx_nlanes,
86 u8 tx_amplitude, u8 tx_hs_equalizer,
87 u8 rx_mode, u8 rx_gear, u8 rx_nlanes,
88 u8 flags, u32 quirks,
89 struct gb_svc_l2_timer_cfg *local,
90 struct gb_svc_l2_timer_cfg *remote);
91int gb_svc_intf_set_power_mode_hibernate(struct gb_svc *svc, u8 intf_id);
92int gb_svc_ping(struct gb_svc *svc);
93int gb_svc_watchdog_create(struct gb_svc *svc);
94void gb_svc_watchdog_destroy(struct gb_svc *svc);
95bool gb_svc_watchdog_enabled(struct gb_svc *svc);
96int gb_svc_watchdog_enable(struct gb_svc *svc);
97int gb_svc_watchdog_disable(struct gb_svc *svc);
98int gb_svc_timesync_enable(struct gb_svc *svc, u8 count, u64 frame_time,
99 u32 strobe_delay, u32 refclk);
100int gb_svc_timesync_disable(struct gb_svc *svc);
101int gb_svc_timesync_authoritative(struct gb_svc *svc, u64 *frame_time);
102int gb_svc_timesync_ping(struct gb_svc *svc, u64 *frame_time);
103int gb_svc_timesync_wake_pins_acquire(struct gb_svc *svc, u32 strobe_mask);
104int gb_svc_timesync_wake_pins_release(struct gb_svc *svc);
105
106int gb_svc_protocol_init(void);
107void gb_svc_protocol_exit(void);
108
109#endif /* __SVC_H */
diff --git a/drivers/staging/greybus/svc_watchdog.c b/drivers/staging/greybus/svc_watchdog.c
new file mode 100644
index 000000000000..3729460fb954
--- /dev/null
+++ b/drivers/staging/greybus/svc_watchdog.c
@@ -0,0 +1,198 @@
1/*
2 * SVC Greybus "watchdog" driver.
3 *
4 * Copyright 2016 Google Inc.
5 *
6 * Released under the GPLv2 only.
7 */
8
9#include <linux/delay.h>
10#include <linux/suspend.h>
11#include <linux/workqueue.h>
12#include "greybus.h"
13
14#define SVC_WATCHDOG_PERIOD (2*HZ)
15
16struct gb_svc_watchdog {
17 struct delayed_work work;
18 struct gb_svc *svc;
19 bool enabled;
20 struct notifier_block pm_notifier;
21};
22
23static struct delayed_work reset_work;
24
25static int svc_watchdog_pm_notifier(struct notifier_block *notifier,
26 unsigned long pm_event, void *unused)
27{
28 struct gb_svc_watchdog *watchdog =
29 container_of(notifier, struct gb_svc_watchdog, pm_notifier);
30
31 switch (pm_event) {
32 case PM_SUSPEND_PREPARE:
33 gb_svc_watchdog_disable(watchdog->svc);
34 break;
35 case PM_POST_SUSPEND:
36 gb_svc_watchdog_enable(watchdog->svc);
37 break;
38 default:
39 break;
40 }
41
42 return NOTIFY_DONE;
43}
44
45static void greybus_reset(struct work_struct *work)
46{
47 static char start_path[256] = "/system/bin/start";
48 static char *envp[] = {
49 "HOME=/",
50 "PATH=/sbin:/vendor/bin:/system/sbin:/system/bin:/system/xbin",
51 NULL,
52 };
53 static char *argv[] = {
54 start_path,
55 "unipro_reset",
56 NULL,
57 };
58
59 printk(KERN_ERR "svc_watchdog: calling \"%s %s\" to reset greybus network!\n",
60 argv[0], argv[1]);
61 call_usermodehelper(start_path, argv, envp, UMH_WAIT_EXEC);
62}
63
64static void do_work(struct work_struct *work)
65{
66 struct gb_svc_watchdog *watchdog;
67 struct gb_svc *svc;
68 int retval;
69
70 watchdog = container_of(work, struct gb_svc_watchdog, work.work);
71 svc = watchdog->svc;
72
73 dev_dbg(&svc->dev, "%s: ping.\n", __func__);
74 retval = gb_svc_ping(svc);
75 if (retval) {
76 /*
77 * Something went really wrong, let's warn userspace and then
78 * pull the plug and reset the whole greybus network.
79 * We need to do this outside of this workqueue as we will be
80 * tearing down the svc device itself. So queue up
81 * yet-another-callback to do that.
82 */
83 dev_err(&svc->dev,
84 "SVC ping has returned %d, something is wrong!!!\n",
85 retval);
86
87 if (svc->action == GB_SVC_WATCHDOG_BITE_PANIC_KERNEL) {
88 panic("SVC is not responding\n");
89 } else if (svc->action == GB_SVC_WATCHDOG_BITE_RESET_UNIPRO) {
90 dev_err(&svc->dev, "Resetting the greybus network, watch out!!!\n");
91
92 INIT_DELAYED_WORK(&reset_work, greybus_reset);
93 schedule_delayed_work(&reset_work, HZ / 2);
94
95 /*
96 * Disable ourselves, we don't want to trip again unless
97 * userspace wants us to.
98 */
99 watchdog->enabled = false;
100 }
101 }
102
103 /* resubmit our work to happen again, if we are still "alive" */
104 if (watchdog->enabled)
105 schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
106}
107
108int gb_svc_watchdog_create(struct gb_svc *svc)
109{
110 struct gb_svc_watchdog *watchdog;
111 int retval;
112
113 if (svc->watchdog)
114 return 0;
115
116 watchdog = kmalloc(sizeof(*watchdog), GFP_KERNEL);
117 if (!watchdog)
118 return -ENOMEM;
119
120 watchdog->enabled = false;
121 watchdog->svc = svc;
122 INIT_DELAYED_WORK(&watchdog->work, do_work);
123 svc->watchdog = watchdog;
124
125 watchdog->pm_notifier.notifier_call = svc_watchdog_pm_notifier;
126 retval = register_pm_notifier(&watchdog->pm_notifier);
127 if (retval) {
128 dev_err(&svc->dev, "error registering pm notifier(%d)\n",
129 retval);
130 goto svc_watchdog_create_err;
131 }
132
133 retval = gb_svc_watchdog_enable(svc);
134 if (retval) {
135 dev_err(&svc->dev, "error enabling watchdog (%d)\n", retval);
136 unregister_pm_notifier(&watchdog->pm_notifier);
137 goto svc_watchdog_create_err;
138 }
139 return retval;
140
141svc_watchdog_create_err:
142 svc->watchdog = NULL;
143 kfree(watchdog);
144
145 return retval;
146}
147
148void gb_svc_watchdog_destroy(struct gb_svc *svc)
149{
150 struct gb_svc_watchdog *watchdog = svc->watchdog;
151
152 if (!watchdog)
153 return;
154
155 unregister_pm_notifier(&watchdog->pm_notifier);
156 gb_svc_watchdog_disable(svc);
157 svc->watchdog = NULL;
158 kfree(watchdog);
159}
160
161bool gb_svc_watchdog_enabled(struct gb_svc *svc)
162{
163 if (!svc || !svc->watchdog)
164 return false;
165 return svc->watchdog->enabled;
166}
167
168int gb_svc_watchdog_enable(struct gb_svc *svc)
169{
170 struct gb_svc_watchdog *watchdog;
171
172 if (!svc->watchdog)
173 return -ENODEV;
174
175 watchdog = svc->watchdog;
176 if (watchdog->enabled)
177 return 0;
178
179 watchdog->enabled = true;
180 schedule_delayed_work(&watchdog->work, SVC_WATCHDOG_PERIOD);
181 return 0;
182}
183
184int gb_svc_watchdog_disable(struct gb_svc *svc)
185{
186 struct gb_svc_watchdog *watchdog;
187
188 if (!svc->watchdog)
189 return -ENODEV;
190
191 watchdog = svc->watchdog;
192 if (!watchdog->enabled)
193 return 0;
194
195 watchdog->enabled = false;
196 cancel_delayed_work_sync(&watchdog->work);
197 return 0;
198}
diff --git a/drivers/staging/greybus/timesync.c b/drivers/staging/greybus/timesync.c
new file mode 100644
index 000000000000..2e68af7dea6d
--- /dev/null
+++ b/drivers/staging/greybus/timesync.c
@@ -0,0 +1,1357 @@
1/*
2 * TimeSync API driver.
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9#include <linux/debugfs.h>
10#include <linux/hrtimer.h>
11#include "greybus.h"
12#include "timesync.h"
13#include "greybus_trace.h"
14
15/*
16 * Minimum inter-strobe value of one millisecond is chosen because it
17 * just-about fits the common definition of a jiffy.
18 *
19 * Maximum value OTOH is constrained by the number of bits the SVC can fit
20 * into a 16 bit up-counter. The SVC configures the timer in microseconds
21 * so the maximum allowable value is 65535 microseconds. We clip that value
22 * to 10000 microseconds for the sake of using nice round base 10 numbers
23 * and since right-now there's no imaginable use-case requiring anything
24 * other than a one millisecond inter-strobe time, let alone something
25 * higher than ten milliseconds.
26 */
27#define GB_TIMESYNC_STROBE_DELAY_US 1000
28#define GB_TIMESYNC_DEFAULT_OFFSET_US 1000
29
30/* Work queue timers long, short and SVC strobe timeout */
31#define GB_TIMESYNC_DELAYED_WORK_LONG msecs_to_jiffies(10)
32#define GB_TIMESYNC_DELAYED_WORK_SHORT msecs_to_jiffies(1)
33#define GB_TIMESYNC_MAX_WAIT_SVC msecs_to_jiffies(5000)
34#define GB_TIMESYNC_KTIME_UPDATE msecs_to_jiffies(1000)
35#define GB_TIMESYNC_MAX_KTIME_CONVERSION 15
36
37/* Maximum number of times we'll retry a failed synchronous sync */
38#define GB_TIMESYNC_MAX_RETRIES 5
39
40/* Reported nanoseconds/femtoseconds per clock */
41static u64 gb_timesync_ns_per_clock;
42static u64 gb_timesync_fs_per_clock;
43
44/* Maximum difference we will accept converting FrameTime to ktime */
45static u32 gb_timesync_max_ktime_diff;
46
47/* Reported clock rate */
48static unsigned long gb_timesync_clock_rate;
49
50/* Workqueue */
51static void gb_timesync_worker(struct work_struct *work);
52
53/* List of SVCs with one FrameTime per SVC */
54static LIST_HEAD(gb_timesync_svc_list);
55
56/* Synchronize parallel contexts accessing a valid timesync_svc pointer */
57static DEFINE_MUTEX(gb_timesync_svc_list_mutex);
58
59/* Structure to convert from FrameTime to timespec/ktime */
60struct gb_timesync_frame_time_data {
61 u64 frame_time;
62 struct timespec ts;
63};
64
65struct gb_timesync_svc {
66 struct list_head list;
67 struct list_head interface_list;
68 struct gb_svc *svc;
69 struct gb_timesync_host_device *timesync_hd;
70
71 spinlock_t spinlock; /* Per SVC spinlock to sync with ISR */
72 struct mutex mutex; /* Per SVC mutex for regular synchronization */
73
74 struct dentry *frame_time_dentry;
75 struct dentry *frame_ktime_dentry;
76 struct workqueue_struct *work_queue;
77 wait_queue_head_t wait_queue;
78 struct delayed_work delayed_work;
79 struct timer_list ktime_timer;
80
81 /* The current local FrameTime */
82 u64 frame_time_offset;
83 struct gb_timesync_frame_time_data strobe_data[GB_TIMESYNC_MAX_STROBES];
84 struct gb_timesync_frame_time_data ktime_data;
85
86 /* The SVC FrameTime and relative AP FrameTime @ last TIMESYNC_PING */
87 u64 svc_ping_frame_time;
88 u64 ap_ping_frame_time;
89
90 /* Transitory settings */
91 u32 strobe_mask;
92 bool offset_down;
93 bool print_ping;
94 bool capture_ping;
95 int strobe;
96
97 /* Current state */
98 int state;
99};
100
101struct gb_timesync_host_device {
102 struct list_head list;
103 struct gb_host_device *hd;
104 u64 ping_frame_time;
105};
106
107struct gb_timesync_interface {
108 struct list_head list;
109 struct gb_interface *interface;
110 u64 ping_frame_time;
111};
112
113enum gb_timesync_state {
114 GB_TIMESYNC_STATE_INVALID = 0,
115 GB_TIMESYNC_STATE_INACTIVE = 1,
116 GB_TIMESYNC_STATE_INIT = 2,
117 GB_TIMESYNC_STATE_WAIT_SVC = 3,
118 GB_TIMESYNC_STATE_AUTHORITATIVE = 4,
119 GB_TIMESYNC_STATE_PING = 5,
120 GB_TIMESYNC_STATE_ACTIVE = 6,
121};
122
123static void gb_timesync_ktime_timer_fn(unsigned long data);
124
125static u64 gb_timesync_adjust_count(struct gb_timesync_svc *timesync_svc,
126 u64 counts)
127{
128 if (timesync_svc->offset_down)
129 return counts - timesync_svc->frame_time_offset;
130 else
131 return counts + timesync_svc->frame_time_offset;
132}
133
134/*
135 * This function provides the authoritative FrameTime to a calling function. It
136 * is designed to be lockless and should remain that way the caller is assumed
137 * to be state-aware.
138 */
139static u64 __gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
140{
141 u64 clocks = gb_timesync_platform_get_counter();
142
143 return gb_timesync_adjust_count(timesync_svc, clocks);
144}
145
146static void gb_timesync_schedule_svc_timeout(struct gb_timesync_svc
147 *timesync_svc)
148{
149 queue_delayed_work(timesync_svc->work_queue,
150 &timesync_svc->delayed_work,
151 GB_TIMESYNC_MAX_WAIT_SVC);
152}
153
154static void gb_timesync_set_state(struct gb_timesync_svc *timesync_svc,
155 int state)
156{
157 switch (state) {
158 case GB_TIMESYNC_STATE_INVALID:
159 timesync_svc->state = state;
160 wake_up(&timesync_svc->wait_queue);
161 break;
162 case GB_TIMESYNC_STATE_INACTIVE:
163 timesync_svc->state = state;
164 wake_up(&timesync_svc->wait_queue);
165 break;
166 case GB_TIMESYNC_STATE_INIT:
167 if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
168 timesync_svc->strobe = 0;
169 timesync_svc->frame_time_offset = 0;
170 timesync_svc->state = state;
171 cancel_delayed_work(&timesync_svc->delayed_work);
172 queue_delayed_work(timesync_svc->work_queue,
173 &timesync_svc->delayed_work,
174 GB_TIMESYNC_DELAYED_WORK_LONG);
175 }
176 break;
177 case GB_TIMESYNC_STATE_WAIT_SVC:
178 if (timesync_svc->state == GB_TIMESYNC_STATE_INIT)
179 timesync_svc->state = state;
180 break;
181 case GB_TIMESYNC_STATE_AUTHORITATIVE:
182 if (timesync_svc->state == GB_TIMESYNC_STATE_WAIT_SVC) {
183 timesync_svc->state = state;
184 cancel_delayed_work(&timesync_svc->delayed_work);
185 queue_delayed_work(timesync_svc->work_queue,
186 &timesync_svc->delayed_work, 0);
187 }
188 break;
189 case GB_TIMESYNC_STATE_PING:
190 if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE) {
191 timesync_svc->state = state;
192 queue_delayed_work(timesync_svc->work_queue,
193 &timesync_svc->delayed_work,
194 GB_TIMESYNC_DELAYED_WORK_SHORT);
195 }
196 break;
197 case GB_TIMESYNC_STATE_ACTIVE:
198 if (timesync_svc->state == GB_TIMESYNC_STATE_AUTHORITATIVE ||
199 timesync_svc->state == GB_TIMESYNC_STATE_PING) {
200 timesync_svc->state = state;
201 wake_up(&timesync_svc->wait_queue);
202 }
203 break;
204 }
205
206 if (WARN_ON(timesync_svc->state != state)) {
207 pr_err("Invalid state transition %d=>%d\n",
208 timesync_svc->state, state);
209 }
210}
211
212static void gb_timesync_set_state_atomic(struct gb_timesync_svc *timesync_svc,
213 int state)
214{
215 unsigned long flags;
216
217 spin_lock_irqsave(&timesync_svc->spinlock, flags);
218 gb_timesync_set_state(timesync_svc, state);
219 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
220}
221
222static u64 gb_timesync_diff(u64 x, u64 y)
223{
224 if (x > y)
225 return x - y;
226 else
227 return y - x;
228}
229
230static void gb_timesync_adjust_to_svc(struct gb_timesync_svc *svc,
231 u64 svc_frame_time, u64 ap_frame_time)
232{
233 if (svc_frame_time > ap_frame_time) {
234 svc->frame_time_offset = svc_frame_time - ap_frame_time;
235 svc->offset_down = false;
236 } else {
237 svc->frame_time_offset = ap_frame_time - svc_frame_time;
238 svc->offset_down = true;
239 }
240}
241
242/*
243 * Associate a FrameTime with a ktime timestamp represented as struct timespec
244 * Requires the calling context to hold timesync_svc->mutex
245 */
246static void gb_timesync_store_ktime(struct gb_timesync_svc *timesync_svc,
247 struct timespec ts, u64 frame_time)
248{
249 timesync_svc->ktime_data.ts = ts;
250 timesync_svc->ktime_data.frame_time = frame_time;
251}
252
253/*
254 * Find the two pulses that best-match our expected inter-strobe gap and
255 * then calculate the difference between the SVC time at the second pulse
256 * to the local time at the second pulse.
257 */
258static void gb_timesync_collate_frame_time(struct gb_timesync_svc *timesync_svc,
259 u64 *frame_time)
260{
261 int i = 0;
262 u64 delta, ap_frame_time;
263 u64 strobe_delay_ns = GB_TIMESYNC_STROBE_DELAY_US * NSEC_PER_USEC;
264 u64 least = 0;
265
266 for (i = 1; i < GB_TIMESYNC_MAX_STROBES; i++) {
267 delta = timesync_svc->strobe_data[i].frame_time -
268 timesync_svc->strobe_data[i - 1].frame_time;
269 delta *= gb_timesync_ns_per_clock;
270 delta = gb_timesync_diff(delta, strobe_delay_ns);
271
272 if (!least || delta < least) {
273 least = delta;
274 gb_timesync_adjust_to_svc(timesync_svc, frame_time[i],
275 timesync_svc->strobe_data[i].frame_time);
276
277 ap_frame_time = timesync_svc->strobe_data[i].frame_time;
278 ap_frame_time = gb_timesync_adjust_count(timesync_svc,
279 ap_frame_time);
280 gb_timesync_store_ktime(timesync_svc,
281 timesync_svc->strobe_data[i].ts,
282 ap_frame_time);
283
284 pr_debug("adjust %s local %llu svc %llu delta %llu\n",
285 timesync_svc->offset_down ? "down" : "up",
286 timesync_svc->strobe_data[i].frame_time,
287 frame_time[i], delta);
288 }
289 }
290}
291
292static void gb_timesync_teardown(struct gb_timesync_svc *timesync_svc)
293{
294 struct gb_timesync_interface *timesync_interface;
295 struct gb_svc *svc = timesync_svc->svc;
296 struct gb_interface *interface;
297 struct gb_host_device *hd;
298 int ret;
299
300 list_for_each_entry(timesync_interface,
301 &timesync_svc->interface_list, list) {
302 interface = timesync_interface->interface;
303 ret = gb_interface_timesync_disable(interface);
304 if (ret) {
305 dev_err(&interface->dev,
306 "interface timesync_disable %d\n", ret);
307 }
308 }
309
310 hd = timesync_svc->timesync_hd->hd;
311 ret = hd->driver->timesync_disable(hd);
312 if (ret < 0) {
313 dev_err(&hd->dev, "host timesync_disable %d\n",
314 ret);
315 }
316
317 gb_svc_timesync_wake_pins_release(svc);
318 gb_svc_timesync_disable(svc);
319 gb_timesync_platform_unlock_bus();
320
321 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
322}
323
324static void gb_timesync_platform_lock_bus_fail(struct gb_timesync_svc
325 *timesync_svc, int ret)
326{
327 if (ret == -EAGAIN) {
328 gb_timesync_set_state(timesync_svc, timesync_svc->state);
329 } else {
330 pr_err("Failed to lock timesync bus %d\n", ret);
331 gb_timesync_set_state(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
332 }
333}
334
335static void gb_timesync_enable(struct gb_timesync_svc *timesync_svc)
336{
337 struct gb_svc *svc = timesync_svc->svc;
338 struct gb_host_device *hd;
339 struct gb_timesync_interface *timesync_interface;
340 struct gb_interface *interface;
341 u64 init_frame_time;
342 unsigned long clock_rate = gb_timesync_clock_rate;
343 int ret;
344
345 /*
346 * Get access to the wake pins in the AP and SVC
347 * Release these pins either in gb_timesync_teardown() or in
348 * gb_timesync_authoritative()
349 */
350 ret = gb_timesync_platform_lock_bus(timesync_svc);
351 if (ret < 0) {
352 gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
353 return;
354 }
355 ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
356 if (ret) {
357 dev_err(&svc->dev,
358 "gb_svc_timesync_wake_pins_acquire %d\n", ret);
359 gb_timesync_teardown(timesync_svc);
360 return;
361 }
362
363 /* Choose an initial time in the future */
364 init_frame_time = __gb_timesync_get_frame_time(timesync_svc) + 100000UL;
365
366 /* Send enable command to all relevant participants */
367 list_for_each_entry(timesync_interface, &timesync_svc->interface_list,
368 list) {
369 interface = timesync_interface->interface;
370 ret = gb_interface_timesync_enable(interface,
371 GB_TIMESYNC_MAX_STROBES,
372 init_frame_time,
373 GB_TIMESYNC_STROBE_DELAY_US,
374 clock_rate);
375 if (ret) {
376 dev_err(&interface->dev,
377 "interface timesync_enable %d\n", ret);
378 }
379 }
380
381 hd = timesync_svc->timesync_hd->hd;
382 ret = hd->driver->timesync_enable(hd, GB_TIMESYNC_MAX_STROBES,
383 init_frame_time,
384 GB_TIMESYNC_STROBE_DELAY_US,
385 clock_rate);
386 if (ret < 0) {
387 dev_err(&hd->dev, "host timesync_enable %d\n",
388 ret);
389 }
390
391 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_WAIT_SVC);
392 ret = gb_svc_timesync_enable(svc, GB_TIMESYNC_MAX_STROBES,
393 init_frame_time,
394 GB_TIMESYNC_STROBE_DELAY_US,
395 clock_rate);
396 if (ret) {
397 dev_err(&svc->dev,
398 "gb_svc_timesync_enable %d\n", ret);
399 gb_timesync_teardown(timesync_svc);
400 return;
401 }
402
403 /* Schedule a timeout waiting for SVC to complete strobing */
404 gb_timesync_schedule_svc_timeout(timesync_svc);
405}
406
407static void gb_timesync_authoritative(struct gb_timesync_svc *timesync_svc)
408{
409 struct gb_svc *svc = timesync_svc->svc;
410 struct gb_host_device *hd;
411 struct gb_timesync_interface *timesync_interface;
412 struct gb_interface *interface;
413 u64 svc_frame_time[GB_TIMESYNC_MAX_STROBES];
414 int ret;
415
416 /* Get authoritative time from SVC and adjust local clock */
417 ret = gb_svc_timesync_authoritative(svc, svc_frame_time);
418 if (ret) {
419 dev_err(&svc->dev,
420 "gb_svc_timesync_authoritative %d\n", ret);
421 gb_timesync_teardown(timesync_svc);
422 return;
423 }
424 gb_timesync_collate_frame_time(timesync_svc, svc_frame_time);
425
426 /* Transmit authoritative time to downstream slaves */
427 hd = timesync_svc->timesync_hd->hd;
428 ret = hd->driver->timesync_authoritative(hd, svc_frame_time);
429 if (ret < 0)
430 dev_err(&hd->dev, "host timesync_authoritative %d\n", ret);
431
432 list_for_each_entry(timesync_interface,
433 &timesync_svc->interface_list, list) {
434 interface = timesync_interface->interface;
435 ret = gb_interface_timesync_authoritative(
436 interface,
437 svc_frame_time);
438 if (ret) {
439 dev_err(&interface->dev,
440 "interface timesync_authoritative %d\n", ret);
441 }
442 }
443
444 /* Release wake pins */
445 gb_svc_timesync_wake_pins_release(svc);
446 gb_timesync_platform_unlock_bus();
447
448 /* Transition to state ACTIVE */
449 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
450
451 /* Schedule a ping to verify the synchronized system time */
452 timesync_svc->print_ping = true;
453 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_PING);
454}
455
456static int __gb_timesync_get_status(struct gb_timesync_svc *timesync_svc)
457{
458 int ret = -EINVAL;
459
460 switch (timesync_svc->state) {
461 case GB_TIMESYNC_STATE_INVALID:
462 case GB_TIMESYNC_STATE_INACTIVE:
463 ret = -ENODEV;
464 break;
465 case GB_TIMESYNC_STATE_INIT:
466 case GB_TIMESYNC_STATE_WAIT_SVC:
467 case GB_TIMESYNC_STATE_AUTHORITATIVE:
468 ret = -EAGAIN;
469 break;
470 case GB_TIMESYNC_STATE_PING:
471 case GB_TIMESYNC_STATE_ACTIVE:
472 ret = 0;
473 break;
474 }
475 return ret;
476}
477
478/*
479 * This routine takes a FrameTime and derives the difference with-respect
480 * to a reference FrameTime/ktime pair. It then returns the calculated
481 * ktime based on the difference between the supplied FrameTime and
482 * the reference FrameTime.
483 *
484 * The time difference is calculated to six decimal places. Taking 19.2MHz
485 * as an example this means we have 52.083333~ nanoseconds per clock or
486 * 52083333~ femtoseconds per clock.
487 *
488 * Naively taking the count difference and converting to
489 * seconds/nanoseconds would quickly see the 0.0833 component produce
490 * noticeable errors. For example a time difference of one second would
491 * loose 19200000 * 0.08333x nanoseconds or 1.59 seconds.
492 *
493 * In contrast calculating in femtoseconds the same example of 19200000 *
494 * 0.000000083333x nanoseconds per count of error is just 1.59 nanoseconds!
495 *
496 * Continuing the example of 19.2 MHz we cap the maximum error difference
497 * at a worst-case 0.3 microseconds over a potential calculation window of
498 * abount 15 seconds, meaning you can convert a FrameTime that is <= 15
499 * seconds older/younger than the reference time with a maximum error of
500 * 0.2385 useconds. Note 19.2MHz is an example frequency not a requirement.
501 */
502static int gb_timesync_to_timespec(struct gb_timesync_svc *timesync_svc,
503 u64 frame_time, struct timespec *ts)
504{
505 unsigned long flags;
506 u64 delta_fs, counts, sec, nsec;
507 bool add;
508 int ret = 0;
509
510 memset(ts, 0x00, sizeof(*ts));
511 mutex_lock(&timesync_svc->mutex);
512 spin_lock_irqsave(&timesync_svc->spinlock, flags);
513
514 ret = __gb_timesync_get_status(timesync_svc);
515 if (ret)
516 goto done;
517
518 /* Support calculating ktime upwards or downwards from the reference */
519 if (frame_time < timesync_svc->ktime_data.frame_time) {
520 add = false;
521 counts = timesync_svc->ktime_data.frame_time - frame_time;
522 } else {
523 add = true;
524 counts = frame_time - timesync_svc->ktime_data.frame_time;
525 }
526
527 /* Enforce the .23 of a usecond boundary @ 19.2MHz */
528 if (counts > gb_timesync_max_ktime_diff) {
529 ret = -EINVAL;
530 goto done;
531 }
532
533 /* Determine the time difference in femtoseconds */
534 delta_fs = counts * gb_timesync_fs_per_clock;
535
536 /* Convert to seconds */
537 sec = delta_fs;
538 do_div(sec, NSEC_PER_SEC);
539 do_div(sec, 1000000UL);
540
541 /* Get the nanosecond remainder */
542 nsec = do_div(delta_fs, sec);
543 do_div(nsec, 1000000UL);
544
545 if (add) {
546 /* Add the calculated offset - overflow nanoseconds upwards */
547 ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec + sec;
548 ts->tv_nsec = timesync_svc->ktime_data.ts.tv_nsec + nsec;
549 if (ts->tv_nsec >= NSEC_PER_SEC) {
550 ts->tv_sec++;
551 ts->tv_nsec -= NSEC_PER_SEC;
552 }
553 } else {
554 /* Subtract the difference over/underflow as necessary */
555 if (nsec > timesync_svc->ktime_data.ts.tv_nsec) {
556 sec++;
557 nsec = nsec + timesync_svc->ktime_data.ts.tv_nsec;
558 nsec = do_div(nsec, NSEC_PER_SEC);
559 } else {
560 nsec = timesync_svc->ktime_data.ts.tv_nsec - nsec;
561 }
562 /* Cannot return a negative second value */
563 if (sec > timesync_svc->ktime_data.ts.tv_sec) {
564 ret = -EINVAL;
565 goto done;
566 }
567 ts->tv_sec = timesync_svc->ktime_data.ts.tv_sec - sec;
568 ts->tv_nsec = nsec;
569 }
570done:
571 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
572 mutex_unlock(&timesync_svc->mutex);
573 return ret;
574}
575
576static size_t gb_timesync_log_frame_time(struct gb_timesync_svc *timesync_svc,
577 char *buf, size_t buflen)
578{
579 struct gb_svc *svc = timesync_svc->svc;
580 struct gb_host_device *hd;
581 struct gb_timesync_interface *timesync_interface;
582 struct gb_interface *interface;
583 unsigned int len;
584 size_t off;
585
586 /* AP/SVC */
587 off = snprintf(buf, buflen, "%s frametime: ap=%llu %s=%llu ",
588 greybus_bus_type.name,
589 timesync_svc->ap_ping_frame_time, dev_name(&svc->dev),
590 timesync_svc->svc_ping_frame_time);
591 len = buflen - off;
592
593 /* APB/GPB */
594 if (len < buflen) {
595 hd = timesync_svc->timesync_hd->hd;
596 off += snprintf(&buf[off], len, "%s=%llu ", dev_name(&hd->dev),
597 timesync_svc->timesync_hd->ping_frame_time);
598 len = buflen - off;
599 }
600
601 list_for_each_entry(timesync_interface,
602 &timesync_svc->interface_list, list) {
603 if (len < buflen) {
604 interface = timesync_interface->interface;
605 off += snprintf(&buf[off], len, "%s=%llu ",
606 dev_name(&interface->dev),
607 timesync_interface->ping_frame_time);
608 len = buflen - off;
609 }
610 }
611 if (len < buflen)
612 off += snprintf(&buf[off], len, "\n");
613 return off;
614}
615
616static size_t gb_timesync_log_frame_ktime(struct gb_timesync_svc *timesync_svc,
617 char *buf, size_t buflen)
618{
619 struct gb_svc *svc = timesync_svc->svc;
620 struct gb_host_device *hd;
621 struct gb_timesync_interface *timesync_interface;
622 struct gb_interface *interface;
623 struct timespec ts;
624 unsigned int len;
625 size_t off;
626
627 /* AP */
628 gb_timesync_to_timespec(timesync_svc, timesync_svc->ap_ping_frame_time,
629 &ts);
630 off = snprintf(buf, buflen, "%s frametime: ap=%lu.%lu ",
631 greybus_bus_type.name, ts.tv_sec, ts.tv_nsec);
632 len = buflen - off;
633 if (len >= buflen)
634 goto done;
635
636 /* SVC */
637 gb_timesync_to_timespec(timesync_svc, timesync_svc->svc_ping_frame_time,
638 &ts);
639 off += snprintf(&buf[off], len, "%s=%lu.%lu ", dev_name(&svc->dev),
640 ts.tv_sec, ts.tv_nsec);
641 len = buflen - off;
642 if (len >= buflen)
643 goto done;
644
645 /* APB/GPB */
646 hd = timesync_svc->timesync_hd->hd;
647 gb_timesync_to_timespec(timesync_svc,
648 timesync_svc->timesync_hd->ping_frame_time,
649 &ts);
650 off += snprintf(&buf[off], len, "%s=%lu.%lu ",
651 dev_name(&hd->dev),
652 ts.tv_sec, ts.tv_nsec);
653 len = buflen - off;
654 if (len >= buflen)
655 goto done;
656
657 list_for_each_entry(timesync_interface,
658 &timesync_svc->interface_list, list) {
659 interface = timesync_interface->interface;
660 gb_timesync_to_timespec(timesync_svc,
661 timesync_interface->ping_frame_time,
662 &ts);
663 off += snprintf(&buf[off], len, "%s=%lu.%lu ",
664 dev_name(&interface->dev),
665 ts.tv_sec, ts.tv_nsec);
666 len = buflen - off;
667 if (len >= buflen)
668 goto done;
669 }
670 off += snprintf(&buf[off], len, "\n");
671done:
672 return off;
673}
674
675/*
676 * Send an SVC initiated wake 'ping' to each TimeSync participant.
677 * Get the FrameTime from each participant associated with the wake
678 * ping.
679 */
680static void gb_timesync_ping(struct gb_timesync_svc *timesync_svc)
681{
682 struct gb_svc *svc = timesync_svc->svc;
683 struct gb_host_device *hd;
684 struct gb_timesync_interface *timesync_interface;
685 struct gb_control *control;
686 u64 *ping_frame_time;
687 int ret;
688
689 /* Get access to the wake pins in the AP and SVC */
690 ret = gb_timesync_platform_lock_bus(timesync_svc);
691 if (ret < 0) {
692 gb_timesync_platform_lock_bus_fail(timesync_svc, ret);
693 return;
694 }
695 ret = gb_svc_timesync_wake_pins_acquire(svc, timesync_svc->strobe_mask);
696 if (ret) {
697 dev_err(&svc->dev,
698 "gb_svc_timesync_wake_pins_acquire %d\n", ret);
699 gb_timesync_teardown(timesync_svc);
700 return;
701 }
702
703 /* Have SVC generate a timesync ping */
704 timesync_svc->capture_ping = true;
705 timesync_svc->svc_ping_frame_time = 0;
706 ret = gb_svc_timesync_ping(svc, &timesync_svc->svc_ping_frame_time);
707 timesync_svc->capture_ping = false;
708 if (ret) {
709 dev_err(&svc->dev,
710 "gb_svc_timesync_ping %d\n", ret);
711 gb_timesync_teardown(timesync_svc);
712 return;
713 }
714
715 /* Get the ping FrameTime from each APB/GPB */
716 hd = timesync_svc->timesync_hd->hd;
717 timesync_svc->timesync_hd->ping_frame_time = 0;
718 ret = hd->driver->timesync_get_last_event(hd,
719 &timesync_svc->timesync_hd->ping_frame_time);
720 if (ret)
721 dev_err(&hd->dev, "host timesync_get_last_event %d\n", ret);
722
723 list_for_each_entry(timesync_interface,
724 &timesync_svc->interface_list, list) {
725 control = timesync_interface->interface->control;
726 timesync_interface->ping_frame_time = 0;
727 ping_frame_time = &timesync_interface->ping_frame_time;
728 ret = gb_control_timesync_get_last_event(control,
729 ping_frame_time);
730 if (ret) {
731 dev_err(&timesync_interface->interface->dev,
732 "gb_control_timesync_get_last_event %d\n", ret);
733 }
734 }
735
736 /* Ping success - move to timesync active */
737 gb_svc_timesync_wake_pins_release(svc);
738 gb_timesync_platform_unlock_bus();
739 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_ACTIVE);
740}
741
742static void gb_timesync_log_ping_time(struct gb_timesync_svc *timesync_svc)
743{
744 char *buf;
745
746 if (!timesync_svc->print_ping)
747 return;
748
749 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
750 if (buf) {
751 gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
752 dev_dbg(&timesync_svc->svc->dev, "%s", buf);
753 kfree(buf);
754 }
755}
756
757/*
758 * Perform the actual work of scheduled TimeSync logic.
759 */
760static void gb_timesync_worker(struct work_struct *work)
761{
762 struct delayed_work *delayed_work = to_delayed_work(work);
763 struct gb_timesync_svc *timesync_svc =
764 container_of(delayed_work, struct gb_timesync_svc, delayed_work);
765
766 mutex_lock(&timesync_svc->mutex);
767
768 switch (timesync_svc->state) {
769 case GB_TIMESYNC_STATE_INIT:
770 gb_timesync_enable(timesync_svc);
771 break;
772
773 case GB_TIMESYNC_STATE_WAIT_SVC:
774 dev_err(&timesync_svc->svc->dev,
775 "timeout SVC strobe completion %d/%d\n",
776 timesync_svc->strobe, GB_TIMESYNC_MAX_STROBES);
777 gb_timesync_teardown(timesync_svc);
778 break;
779
780 case GB_TIMESYNC_STATE_AUTHORITATIVE:
781 gb_timesync_authoritative(timesync_svc);
782 break;
783
784 case GB_TIMESYNC_STATE_PING:
785 gb_timesync_ping(timesync_svc);
786 gb_timesync_log_ping_time(timesync_svc);
787 break;
788
789 default:
790 pr_err("Invalid state %d for delayed work\n",
791 timesync_svc->state);
792 break;
793 }
794
795 mutex_unlock(&timesync_svc->mutex);
796}
797
798/*
799 * Schedule a new TimeSync INIT or PING operation serialized w/r to
800 * gb_timesync_worker().
801 */
802static int gb_timesync_schedule(struct gb_timesync_svc *timesync_svc, int state)
803{
804 int ret = 0;
805
806 if (state != GB_TIMESYNC_STATE_INIT && state != GB_TIMESYNC_STATE_PING)
807 return -EINVAL;
808
809 mutex_lock(&timesync_svc->mutex);
810 if (timesync_svc->state != GB_TIMESYNC_STATE_INVALID) {
811 gb_timesync_set_state_atomic(timesync_svc, state);
812 } else {
813 ret = -ENODEV;
814 }
815 mutex_unlock(&timesync_svc->mutex);
816 return ret;
817}
818
819static int __gb_timesync_schedule_synchronous(
820 struct gb_timesync_svc *timesync_svc, int state)
821{
822 unsigned long flags;
823 int ret;
824
825 ret = gb_timesync_schedule(timesync_svc, state);
826 if (ret)
827 return ret;
828
829 ret = wait_event_interruptible(timesync_svc->wait_queue,
830 (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE ||
831 timesync_svc->state == GB_TIMESYNC_STATE_INACTIVE ||
832 timesync_svc->state == GB_TIMESYNC_STATE_INVALID));
833 if (ret)
834 return ret;
835
836 mutex_lock(&timesync_svc->mutex);
837 spin_lock_irqsave(&timesync_svc->spinlock, flags);
838
839 ret = __gb_timesync_get_status(timesync_svc);
840
841 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
842 mutex_unlock(&timesync_svc->mutex);
843
844 return ret;
845}
846
847static struct gb_timesync_svc *gb_timesync_find_timesync_svc(
848 struct gb_host_device *hd)
849{
850 struct gb_timesync_svc *timesync_svc;
851
852 list_for_each_entry(timesync_svc, &gb_timesync_svc_list, list) {
853 if (timesync_svc->svc == hd->svc)
854 return timesync_svc;
855 }
856 return NULL;
857}
858
859static struct gb_timesync_interface *gb_timesync_find_timesync_interface(
860 struct gb_timesync_svc *timesync_svc,
861 struct gb_interface *interface)
862{
863 struct gb_timesync_interface *timesync_interface;
864
865 list_for_each_entry(timesync_interface, &timesync_svc->interface_list, list) {
866 if (timesync_interface->interface == interface)
867 return timesync_interface;
868 }
869 return NULL;
870}
871
872int gb_timesync_schedule_synchronous(struct gb_interface *interface)
873{
874 int ret;
875 struct gb_timesync_svc *timesync_svc;
876 int retries;
877
878 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
879 return 0;
880
881 mutex_lock(&gb_timesync_svc_list_mutex);
882 for (retries = 0; retries < GB_TIMESYNC_MAX_RETRIES; retries++) {
883 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
884 if (!timesync_svc) {
885 ret = -ENODEV;
886 goto done;
887 }
888
889 ret = __gb_timesync_schedule_synchronous(timesync_svc,
890 GB_TIMESYNC_STATE_INIT);
891 if (!ret)
892 break;
893 }
894 if (ret && retries == GB_TIMESYNC_MAX_RETRIES)
895 ret = -ETIMEDOUT;
896done:
897 mutex_unlock(&gb_timesync_svc_list_mutex);
898 return ret;
899}
900EXPORT_SYMBOL_GPL(gb_timesync_schedule_synchronous);
901
902void gb_timesync_schedule_asynchronous(struct gb_interface *interface)
903{
904 struct gb_timesync_svc *timesync_svc;
905
906 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
907 return;
908
909 mutex_lock(&gb_timesync_svc_list_mutex);
910 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
911 if (!timesync_svc)
912 goto done;
913
914 gb_timesync_schedule(timesync_svc, GB_TIMESYNC_STATE_INIT);
915done:
916 mutex_unlock(&gb_timesync_svc_list_mutex);
917 return;
918}
919EXPORT_SYMBOL_GPL(gb_timesync_schedule_asynchronous);
920
921static ssize_t gb_timesync_ping_read(struct file *file, char __user *ubuf,
922 size_t len, loff_t *offset, bool ktime)
923{
924 struct gb_timesync_svc *timesync_svc = file->f_inode->i_private;
925 char *buf;
926 ssize_t ret = 0;
927
928 mutex_lock(&gb_timesync_svc_list_mutex);
929 mutex_lock(&timesync_svc->mutex);
930 if (list_empty(&timesync_svc->interface_list))
931 ret = -ENODEV;
932 timesync_svc->print_ping = false;
933 mutex_unlock(&timesync_svc->mutex);
934 if (ret)
935 goto done;
936
937 ret = __gb_timesync_schedule_synchronous(timesync_svc,
938 GB_TIMESYNC_STATE_PING);
939 if (ret)
940 goto done;
941
942 buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
943 if (!buf) {
944 ret = -ENOMEM;
945 goto done;
946 }
947
948 if (ktime)
949 ret = gb_timesync_log_frame_ktime(timesync_svc, buf, PAGE_SIZE);
950 else
951 ret = gb_timesync_log_frame_time(timesync_svc, buf, PAGE_SIZE);
952 if (ret > 0)
953 ret = simple_read_from_buffer(ubuf, len, offset, buf, ret);
954 kfree(buf);
955done:
956 mutex_unlock(&gb_timesync_svc_list_mutex);
957 return ret;
958}
959
960static ssize_t gb_timesync_ping_read_frame_time(struct file *file,
961 char __user *buf,
962 size_t len, loff_t *offset)
963{
964 return gb_timesync_ping_read(file, buf, len, offset, false);
965}
966
967static ssize_t gb_timesync_ping_read_frame_ktime(struct file *file,
968 char __user *buf,
969 size_t len, loff_t *offset)
970{
971 return gb_timesync_ping_read(file, buf, len, offset, true);
972}
973
974static const struct file_operations gb_timesync_debugfs_frame_time_ops = {
975 .read = gb_timesync_ping_read_frame_time,
976};
977
978static const struct file_operations gb_timesync_debugfs_frame_ktime_ops = {
979 .read = gb_timesync_ping_read_frame_ktime,
980};
981
982static int gb_timesync_hd_add(struct gb_timesync_svc *timesync_svc,
983 struct gb_host_device *hd)
984{
985 struct gb_timesync_host_device *timesync_hd;
986
987 timesync_hd = kzalloc(sizeof(*timesync_hd), GFP_KERNEL);
988 if (!timesync_hd)
989 return -ENOMEM;
990
991 WARN_ON(timesync_svc->timesync_hd);
992 timesync_hd->hd = hd;
993 timesync_svc->timesync_hd = timesync_hd;
994
995 return 0;
996}
997
998static void gb_timesync_hd_remove(struct gb_timesync_svc *timesync_svc,
999 struct gb_host_device *hd)
1000{
1001 if (timesync_svc->timesync_hd->hd == hd) {
1002 kfree(timesync_svc->timesync_hd);
1003 timesync_svc->timesync_hd = NULL;
1004 return;
1005 }
1006 WARN_ON(1);
1007}
1008
1009int gb_timesync_svc_add(struct gb_svc *svc)
1010{
1011 struct gb_timesync_svc *timesync_svc;
1012 int ret;
1013
1014 timesync_svc = kzalloc(sizeof(*timesync_svc), GFP_KERNEL);
1015 if (!timesync_svc)
1016 return -ENOMEM;
1017
1018 timesync_svc->work_queue =
1019 create_singlethread_workqueue("gb-timesync-work_queue");
1020
1021 if (!timesync_svc->work_queue) {
1022 kfree(timesync_svc);
1023 return -ENOMEM;
1024 }
1025
1026 mutex_lock(&gb_timesync_svc_list_mutex);
1027 INIT_LIST_HEAD(&timesync_svc->interface_list);
1028 INIT_DELAYED_WORK(&timesync_svc->delayed_work, gb_timesync_worker);
1029 mutex_init(&timesync_svc->mutex);
1030 spin_lock_init(&timesync_svc->spinlock);
1031 init_waitqueue_head(&timesync_svc->wait_queue);
1032
1033 timesync_svc->svc = svc;
1034 timesync_svc->frame_time_offset = 0;
1035 timesync_svc->capture_ping = false;
1036 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INACTIVE);
1037
1038 timesync_svc->frame_time_dentry =
1039 debugfs_create_file("frame-time", S_IRUGO, svc->debugfs_dentry,
1040 timesync_svc,
1041 &gb_timesync_debugfs_frame_time_ops);
1042 timesync_svc->frame_ktime_dentry =
1043 debugfs_create_file("frame-ktime", S_IRUGO, svc->debugfs_dentry,
1044 timesync_svc,
1045 &gb_timesync_debugfs_frame_ktime_ops);
1046
1047 list_add(&timesync_svc->list, &gb_timesync_svc_list);
1048 ret = gb_timesync_hd_add(timesync_svc, svc->hd);
1049 if (ret) {
1050 list_del(&timesync_svc->list);
1051 debugfs_remove(timesync_svc->frame_ktime_dentry);
1052 debugfs_remove(timesync_svc->frame_time_dentry);
1053 destroy_workqueue(timesync_svc->work_queue);
1054 kfree(timesync_svc);
1055 goto done;
1056 }
1057
1058 init_timer(&timesync_svc->ktime_timer);
1059 timesync_svc->ktime_timer.function = gb_timesync_ktime_timer_fn;
1060 timesync_svc->ktime_timer.expires = jiffies + GB_TIMESYNC_KTIME_UPDATE;
1061 timesync_svc->ktime_timer.data = (unsigned long)timesync_svc;
1062 add_timer(&timesync_svc->ktime_timer);
1063done:
1064 mutex_unlock(&gb_timesync_svc_list_mutex);
1065 return ret;
1066}
1067EXPORT_SYMBOL_GPL(gb_timesync_svc_add);
1068
1069void gb_timesync_svc_remove(struct gb_svc *svc)
1070{
1071 struct gb_timesync_svc *timesync_svc;
1072 struct gb_timesync_interface *timesync_interface;
1073 struct gb_timesync_interface *next;
1074
1075 mutex_lock(&gb_timesync_svc_list_mutex);
1076 timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1077 if (!timesync_svc)
1078 goto done;
1079
1080 cancel_delayed_work_sync(&timesync_svc->delayed_work);
1081
1082 mutex_lock(&timesync_svc->mutex);
1083
1084 gb_timesync_set_state_atomic(timesync_svc, GB_TIMESYNC_STATE_INVALID);
1085 del_timer_sync(&timesync_svc->ktime_timer);
1086 gb_timesync_teardown(timesync_svc);
1087
1088 gb_timesync_hd_remove(timesync_svc, svc->hd);
1089 list_for_each_entry_safe(timesync_interface, next,
1090 &timesync_svc->interface_list, list) {
1091 list_del(&timesync_interface->list);
1092 kfree(timesync_interface);
1093 }
1094 debugfs_remove(timesync_svc->frame_ktime_dentry);
1095 debugfs_remove(timesync_svc->frame_time_dentry);
1096 destroy_workqueue(timesync_svc->work_queue);
1097 list_del(&timesync_svc->list);
1098
1099 mutex_unlock(&timesync_svc->mutex);
1100
1101 kfree(timesync_svc);
1102done:
1103 mutex_unlock(&gb_timesync_svc_list_mutex);
1104}
1105EXPORT_SYMBOL_GPL(gb_timesync_svc_remove);
1106
1107/*
1108 * Add a Greybus Interface to the set of TimeSync Interfaces.
1109 */
1110int gb_timesync_interface_add(struct gb_interface *interface)
1111{
1112 struct gb_timesync_svc *timesync_svc;
1113 struct gb_timesync_interface *timesync_interface;
1114 int ret = 0;
1115
1116 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1117 return 0;
1118
1119 mutex_lock(&gb_timesync_svc_list_mutex);
1120 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1121 if (!timesync_svc) {
1122 ret = -ENODEV;
1123 goto done;
1124 }
1125
1126 timesync_interface = kzalloc(sizeof(*timesync_interface), GFP_KERNEL);
1127 if (!timesync_interface) {
1128 ret = -ENOMEM;
1129 goto done;
1130 }
1131
1132 mutex_lock(&timesync_svc->mutex);
1133 timesync_interface->interface = interface;
1134 list_add(&timesync_interface->list, &timesync_svc->interface_list);
1135 timesync_svc->strobe_mask |= 1 << interface->interface_id;
1136 mutex_unlock(&timesync_svc->mutex);
1137
1138done:
1139 mutex_unlock(&gb_timesync_svc_list_mutex);
1140 return ret;
1141}
1142EXPORT_SYMBOL_GPL(gb_timesync_interface_add);
1143
1144/*
1145 * Remove a Greybus Interface from the set of TimeSync Interfaces.
1146 */
1147void gb_timesync_interface_remove(struct gb_interface *interface)
1148{
1149 struct gb_timesync_svc *timesync_svc;
1150 struct gb_timesync_interface *timesync_interface;
1151
1152 if (!(interface->features & GREYBUS_INTERFACE_FEATURE_TIMESYNC))
1153 return;
1154
1155 mutex_lock(&gb_timesync_svc_list_mutex);
1156 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1157 if (!timesync_svc)
1158 goto done;
1159
1160 timesync_interface = gb_timesync_find_timesync_interface(timesync_svc,
1161 interface);
1162 if (!timesync_interface)
1163 goto done;
1164
1165 mutex_lock(&timesync_svc->mutex);
1166 timesync_svc->strobe_mask &= ~(1 << interface->interface_id);
1167 list_del(&timesync_interface->list);
1168 kfree(timesync_interface);
1169 mutex_unlock(&timesync_svc->mutex);
1170done:
1171 mutex_unlock(&gb_timesync_svc_list_mutex);
1172}
1173EXPORT_SYMBOL_GPL(gb_timesync_interface_remove);
1174
1175/*
1176 * Give the authoritative FrameTime to the calling function. Returns zero if we
1177 * are not in GB_TIMESYNC_STATE_ACTIVE.
1178 */
1179static u64 gb_timesync_get_frame_time(struct gb_timesync_svc *timesync_svc)
1180{
1181 unsigned long flags;
1182 u64 ret;
1183
1184 spin_lock_irqsave(&timesync_svc->spinlock, flags);
1185 if (timesync_svc->state == GB_TIMESYNC_STATE_ACTIVE)
1186 ret = __gb_timesync_get_frame_time(timesync_svc);
1187 else
1188 ret = 0;
1189 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1190 return ret;
1191}
1192
1193u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface)
1194{
1195 struct gb_timesync_svc *timesync_svc;
1196 u64 ret = 0;
1197
1198 mutex_lock(&gb_timesync_svc_list_mutex);
1199 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1200 if (!timesync_svc)
1201 goto done;
1202
1203 ret = gb_timesync_get_frame_time(timesync_svc);
1204done:
1205 mutex_unlock(&gb_timesync_svc_list_mutex);
1206 return ret;
1207}
1208EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_interface);
1209
1210u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc)
1211{
1212 struct gb_timesync_svc *timesync_svc;
1213 u64 ret = 0;
1214
1215 mutex_lock(&gb_timesync_svc_list_mutex);
1216 timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1217 if (!timesync_svc)
1218 goto done;
1219
1220 ret = gb_timesync_get_frame_time(timesync_svc);
1221done:
1222 mutex_unlock(&gb_timesync_svc_list_mutex);
1223 return ret;
1224}
1225EXPORT_SYMBOL_GPL(gb_timesync_get_frame_time_by_svc);
1226
1227/* Incrementally updates the conversion base from FrameTime to ktime */
1228static void gb_timesync_ktime_timer_fn(unsigned long data)
1229{
1230 struct gb_timesync_svc *timesync_svc =
1231 (struct gb_timesync_svc *)data;
1232 unsigned long flags;
1233 u64 frame_time;
1234 struct timespec ts;
1235
1236 spin_lock_irqsave(&timesync_svc->spinlock, flags);
1237
1238 if (timesync_svc->state != GB_TIMESYNC_STATE_ACTIVE)
1239 goto done;
1240
1241 ktime_get_ts(&ts);
1242 frame_time = __gb_timesync_get_frame_time(timesync_svc);
1243 gb_timesync_store_ktime(timesync_svc, ts, frame_time);
1244
1245done:
1246 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1247 mod_timer(&timesync_svc->ktime_timer,
1248 jiffies + GB_TIMESYNC_KTIME_UPDATE);
1249}
1250
1251int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
1252 struct timespec *ts)
1253{
1254 struct gb_timesync_svc *timesync_svc;
1255 int ret = 0;
1256
1257 mutex_lock(&gb_timesync_svc_list_mutex);
1258 timesync_svc = gb_timesync_find_timesync_svc(svc->hd);
1259 if (!timesync_svc) {
1260 ret = -ENODEV;
1261 goto done;
1262 }
1263 ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1264done:
1265 mutex_unlock(&gb_timesync_svc_list_mutex);
1266 return ret;
1267}
1268EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_svc);
1269
1270int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
1271 u64 frame_time, struct timespec *ts)
1272{
1273 struct gb_timesync_svc *timesync_svc;
1274 int ret = 0;
1275
1276 mutex_lock(&gb_timesync_svc_list_mutex);
1277 timesync_svc = gb_timesync_find_timesync_svc(interface->hd);
1278 if (!timesync_svc) {
1279 ret = -ENODEV;
1280 goto done;
1281 }
1282
1283 ret = gb_timesync_to_timespec(timesync_svc, frame_time, ts);
1284done:
1285 mutex_unlock(&gb_timesync_svc_list_mutex);
1286 return ret;
1287}
1288EXPORT_SYMBOL_GPL(gb_timesync_to_timespec_by_interface);
1289
1290void gb_timesync_irq(struct gb_timesync_svc *timesync_svc)
1291{
1292 unsigned long flags;
1293 u64 strobe_time;
1294 bool strobe_is_ping = true;
1295 struct timespec ts;
1296
1297 ktime_get_ts(&ts);
1298 strobe_time = __gb_timesync_get_frame_time(timesync_svc);
1299
1300 spin_lock_irqsave(&timesync_svc->spinlock, flags);
1301
1302 if (timesync_svc->state == GB_TIMESYNC_STATE_PING) {
1303 if (!timesync_svc->capture_ping)
1304 goto done_nolog;
1305 timesync_svc->ap_ping_frame_time = strobe_time;
1306 goto done_log;
1307 } else if (timesync_svc->state != GB_TIMESYNC_STATE_WAIT_SVC) {
1308 goto done_nolog;
1309 }
1310
1311 timesync_svc->strobe_data[timesync_svc->strobe].frame_time = strobe_time;
1312 timesync_svc->strobe_data[timesync_svc->strobe].ts = ts;
1313
1314 if (++timesync_svc->strobe == GB_TIMESYNC_MAX_STROBES) {
1315 gb_timesync_set_state(timesync_svc,
1316 GB_TIMESYNC_STATE_AUTHORITATIVE);
1317 }
1318 strobe_is_ping = false;
1319done_log:
1320 trace_gb_timesync_irq(strobe_is_ping, timesync_svc->strobe,
1321 GB_TIMESYNC_MAX_STROBES, strobe_time);
1322done_nolog:
1323 spin_unlock_irqrestore(&timesync_svc->spinlock, flags);
1324}
1325EXPORT_SYMBOL(gb_timesync_irq);
1326
1327int __init gb_timesync_init(void)
1328{
1329 int ret = 0;
1330
1331 ret = gb_timesync_platform_init();
1332 if (ret) {
1333 pr_err("timesync platform init fail!\n");
1334 return ret;
1335 }
1336
1337 gb_timesync_clock_rate = gb_timesync_platform_get_clock_rate();
1338
1339 /* Calculate nanoseconds and femtoseconds per clock */
1340 gb_timesync_fs_per_clock = FSEC_PER_SEC;
1341 do_div(gb_timesync_fs_per_clock, gb_timesync_clock_rate);
1342 gb_timesync_ns_per_clock = NSEC_PER_SEC;
1343 do_div(gb_timesync_ns_per_clock, gb_timesync_clock_rate);
1344
1345 /* Calculate the maximum number of clocks we will convert to ktime */
1346 gb_timesync_max_ktime_diff =
1347 GB_TIMESYNC_MAX_KTIME_CONVERSION * gb_timesync_clock_rate;
1348
1349 pr_info("Time-Sync @ %lu Hz max ktime conversion +/- %d seconds\n",
1350 gb_timesync_clock_rate, GB_TIMESYNC_MAX_KTIME_CONVERSION);
1351 return 0;
1352}
1353
1354void gb_timesync_exit(void)
1355{
1356 gb_timesync_platform_exit();
1357}
diff --git a/drivers/staging/greybus/timesync.h b/drivers/staging/greybus/timesync.h
new file mode 100644
index 000000000000..72fc9a35a002
--- /dev/null
+++ b/drivers/staging/greybus/timesync.h
@@ -0,0 +1,45 @@
1/*
2 * TimeSync API driver.
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#ifndef __TIMESYNC_H
11#define __TIMESYNC_H
12
13struct gb_svc;
14struct gb_interface;
15struct gb_timesync_svc;
16
17/* Platform */
18u64 gb_timesync_platform_get_counter(void);
19u32 gb_timesync_platform_get_clock_rate(void);
20int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata);
21void gb_timesync_platform_unlock_bus(void);
22
23int gb_timesync_platform_init(void);
24void gb_timesync_platform_exit(void);
25
26/* Core API */
27int gb_timesync_interface_add(struct gb_interface *interface);
28void gb_timesync_interface_remove(struct gb_interface *interface);
29int gb_timesync_svc_add(struct gb_svc *svc);
30void gb_timesync_svc_remove(struct gb_svc *svc);
31
32u64 gb_timesync_get_frame_time_by_interface(struct gb_interface *interface);
33u64 gb_timesync_get_frame_time_by_svc(struct gb_svc *svc);
34int gb_timesync_to_timespec_by_svc(struct gb_svc *svc, u64 frame_time,
35 struct timespec *ts);
36int gb_timesync_to_timespec_by_interface(struct gb_interface *interface,
37 u64 frame_time, struct timespec *ts);
38
39int gb_timesync_schedule_synchronous(struct gb_interface *intf);
40void gb_timesync_schedule_asynchronous(struct gb_interface *intf);
41void gb_timesync_irq(struct gb_timesync_svc *timesync_svc);
42int gb_timesync_init(void);
43void gb_timesync_exit(void);
44
45#endif /* __TIMESYNC_H */
diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
new file mode 100644
index 000000000000..50e8883f932f
--- /dev/null
+++ b/drivers/staging/greybus/timesync_platform.c
@@ -0,0 +1,77 @@
1/*
2 * TimeSync API driver.
3 *
4 * Copyright 2016 Google Inc.
5 * Copyright 2016 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 *
9 * This code reads directly from an ARMv7 memory-mapped timer that lives in
10 * MMIO space. Since this counter lives inside of MMIO space its shared between
11 * cores and that means we don't have to worry about issues like TSC on x86
12 * where each time-stamp-counter (TSC) is local to a particular core.
13 *
14 * Register-level access code is based on
15 * drivers/clocksource/arm_arch_timer.c
16 */
17#include <linux/cpufreq.h>
18#include <linux/of_platform.h>
19
20#include "greybus.h"
21#include "arche_platform.h"
22
23static u32 gb_timesync_clock_frequency;
24int (*arche_platform_change_state_cb)(enum arche_platform_state state,
25 struct gb_timesync_svc *pdata);
26EXPORT_SYMBOL_GPL(arche_platform_change_state_cb);
27
28u64 gb_timesync_platform_get_counter(void)
29{
30 return (u64)get_cycles();
31}
32
33u32 gb_timesync_platform_get_clock_rate(void)
34{
35 if (unlikely(!gb_timesync_clock_frequency))
36 return cpufreq_get(0);
37
38 return gb_timesync_clock_frequency;
39}
40
41int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
42{
43 return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
44 pdata);
45}
46
47void gb_timesync_platform_unlock_bus(void)
48{
49 arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
50}
51
52static const struct of_device_id arch_timer_of_match[] = {
53 { .compatible = "google,greybus-frame-time-counter", },
54 {},
55};
56
57int __init gb_timesync_platform_init(void)
58{
59 struct device_node *np;
60
61 np = of_find_matching_node(NULL, arch_timer_of_match);
62 if (!np) {
63 /* Tolerate not finding to allow BBB etc to continue */
64 pr_warn("Unable to find a compatible ARMv7 timer\n");
65 return 0;
66 }
67
68 if (of_property_read_u32(np, "clock-frequency",
69 &gb_timesync_clock_frequency)) {
70 pr_err("Unable to find timer clock-frequency\n");
71 return -ENODEV;
72 }
73
74 return 0;
75}
76
77void gb_timesync_platform_exit(void) {}
diff --git a/drivers/staging/greybus/tools/.gitignore b/drivers/staging/greybus/tools/.gitignore
new file mode 100644
index 000000000000..023654c83068
--- /dev/null
+++ b/drivers/staging/greybus/tools/.gitignore
@@ -0,0 +1 @@
loopback_test
diff --git a/drivers/staging/greybus/tools/Android.mk b/drivers/staging/greybus/tools/Android.mk
new file mode 100644
index 000000000000..fdadbf611757
--- /dev/null
+++ b/drivers/staging/greybus/tools/Android.mk
@@ -0,0 +1,10 @@
1LOCAL_PATH:= $(call my-dir)
2
3include $(CLEAR_VARS)
4
5LOCAL_SRC_FILES:= loopback_test.c
6LOCAL_MODULE_TAGS := optional
7LOCAL_MODULE := gb_loopback_test
8
9include $(BUILD_EXECUTABLE)
10
diff --git a/drivers/staging/greybus/tools/Makefile b/drivers/staging/greybus/tools/Makefile
new file mode 100644
index 000000000000..852b12b71149
--- /dev/null
+++ b/drivers/staging/greybus/tools/Makefile
@@ -0,0 +1,31 @@
1ifeq ($(strip $(V)), 1)
2 Q =
3else
4 Q = @
5endif
6
7CFLAGS += -std=gnu99 -Wall -Wextra -g \
8 -D_GNU_SOURCE \
9 -Wno-unused-parameter \
10 -Wmaybe-uninitialized \
11 -Wredundant-decls \
12 -Wcast-align \
13 -Wsign-compare \
14 -Wno-missing-field-initializers
15
16CC := $(CROSS_COMPILE)gcc
17
18TOOLS = loopback_test
19
20all: $(TOOLS)
21
22%.o: %.c ../greybus_protocols.h
23 @echo ' TARGET_CC $@'
24 $(Q)$(CC) $(CFLAGS) -c $< -o $@
25
26loopback_%: loopback_%.o
27 @echo ' TARGET_LD $@'
28 $(Q)$(CC) $(CFLAGS) $(LDFLAGS) $^ -o $@
29
30clean::
31 rm -f *.o $(TOOLS)
diff --git a/drivers/staging/greybus/tools/README.loopback b/drivers/staging/greybus/tools/README.loopback
new file mode 100644
index 000000000000..845b08dc4696
--- /dev/null
+++ b/drivers/staging/greybus/tools/README.loopback
@@ -0,0 +1,198 @@
1
2
3 1 - LOOPBACK DRIVER
4
5The driver implements the main logic of the loopback test and provides
6sysfs files to configure the test and retrieve the results.
7A user could run a test without the need of the test application given
8that he understands the sysfs interface of the loopback driver.
9
10The loopback kernel driver needs to be loaded and at least one module
11with the loopback feature enabled must be present for the sysfs files to be
12created and for the loopback test application to be able to run.
13
14To load the module:
15# modprobe gb-loopback
16
17
18When the module is probed, New files are available on the sysfs
19directory of the detected loopback device.
20(typically under "/sys/bus/graybus/devices").
21
22Here is a short summary of the sysfs interface files that should be visible:
23
24* Loopback Configuration Files:
25 async - Use asynchronous operations.
26 iteration_max - Number of tests iterations to perform.
27 size - payload size of the transfer.
28 timeout - The number of microseconds to give an individual
29 asynchronous request before timing out.
30 us_wait - Time to wait between 2 messages
31 type - By writing the test type to this file, the test starts.
32 Valid tests are:
33 0 stop the test
34 2 - ping
35 3 - transfer
36 4 - sink
37
38* Loopback feedback files:
39 error - number of errors that have occurred.
40 iteration_count - Number of iterations performed.
41 requests_completed - Number of requests successfully completed.
42 requests_timedout - Number of requests that have timed out.
43 timeout_max - Max allowed timeout
44 timeout_min - Min allowed timeout.
45
46* Loopback result files:
47 apbridge_unipro_latency_avg
48 apbridge_unipro_latency_max
49 apbridge_unipro_latency_min
50 gpbridge_firmware_latency_avg
51 gpbridge_firmware_latency_max
52 gpbridge_firmware_latency_min
53 requests_per_second_avg
54 requests_per_second_max
55 requests_per_second_min
56 latency_avg
57 latency_max
58 latency_min
59 throughput_avg
60 throughput_max
61 throughput_min
62
63
64
65 2 - LOOPBACK TEST APPLICATION
66
67The loopback test application manages and formats the results provided by
68the loopback kernel module. The purpose of this application
69is to:
70 - Start and manage multiple loopback device tests concurrently.
71 - Calculate the aggregate results for multiple devices.
72 - Gather and format test results (csv or human readable).
73
74The best way to get up to date usage information for the application is
75usually to pass the "-h" parameter.
76Here is the summary of the available options:
77
78 Mandatory arguments
79 -t must be one of the test names - sink, transfer or ping
80 -i iteration count - the number of iterations to run the test over
81 Optional arguments
82 -S sysfs location - location for greybus 'endo' entires default /sys/bus/greybus/devices/
83 -D debugfs location - location for loopback debugfs entries default /sys/kernel/debug/gb_loopback/
84 -s size of data packet to send during test - defaults to zero
85 -m mask - a bit mask of connections to include example: -m 8 = 4th connection -m 9 = 1st and 4th connection etc
86 default is zero which means broadcast to all connections
87 -v verbose output
88 -d debug output
89 -r raw data output - when specified the full list of latency values are included in the output CSV
90 -p porcelain - when specified printout is in a user-friendly non-CSV format. This option suppresses writing to CSV file
91 -a aggregate - show aggregation of all enabled devies
92 -l list found loopback devices and exit.
93 -x Async - Enable async transfers.
94 -o Timeout - Timeout in microseconds for async operations.
95
96
97
98 3 - REAL WORLD EXAMPLE USAGES
99
100 3.1 - Using the driver sysfs files to run a test on a single device:
101
102* Run a 1000 transfers of a 100 byte packet. Each transfer is started only
103after the previous one finished successfully:
104 echo 0 > /sys/bus/greybus/devices/1-2.17/type
105 echo 0 > /sys/bus/greybus/devices/1-2.17/async
106 echo 2000 > /sys/bus/greybus/devices/1-2.17/us_wait
107 echo 100 > /sys/bus/greybus/devices/1-2.17/size
108 echo 1000 > /sys/bus/greybus/devices/1-2.17/iteration_max
109 echo 0 > /sys/bus/greybus/devices/1-2.17/mask
110 echo 200000 > /sys/bus/greybus/devices/1-2.17/timeout
111 echo 3 > /sys/bus/greybus/devices/1-2.17/type
112
113* Run a 1000 transfers of a 100 byte packet. Transfers are started without
114waiting for the previous one to finish:
115 echo 0 > /sys/bus/greybus/devices/1-2.17/type
116 echo 3 > /sys/bus/greybus/devices/1-2.17/async
117 echo 0 > /sys/bus/greybus/devices/1-2.17/us_wait
118 echo 100 > /sys/bus/greybus/devices/1-2.17/size
119 echo 1000 > /sys/bus/greybus/devices/1-2.17/iteration_max
120 echo 0 > /sys/bus/greybus/devices/1-2.17/mask
121 echo 200000 > /sys/bus/greybus/devices/1-2.17/timeout
122 echo 3 > /sys/bus/greybus/devices/1-2.17/type
123
124* Read the results from sysfs:
125 cat /sys/bus/greybus/devices/1-2.17/requests_per_second_min
126 cat /sys/bus/greybus/devices/1-2.17/requests_per_second_max
127 cat /sys/bus/greybus/devices/1-2.17/requests_per_second_avg
128
129 cat /sys/bus/greybus/devices/1-2.17/latency_min
130 cat /sys/bus/greybus/devices/1-2.17/latency_max
131 cat /sys/bus/greybus/devices/1-2.17/latency_avg
132
133 cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_min
134 cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_max
135 cat /sys/bus/greybus/devices/1-2.17/apbridge_unipro_latency_avg
136
137 cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_min
138 cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_max
139 cat /sys/bus/greybus/devices/1-2.17/gpbridge_firmware_latency_avg
140
141 cat /sys/bus/greybus/devices/1-2.17/error
142 cat /sys/bus/greybus/devices/1-2.17/requests_completed
143 cat /sys/bus/greybus/devices/1-2.17/requests_timedout
144
145
1463.2 - using the test application:
147
148* Run a transfer test 10 iterations of size 100 bytes on all available devices
149 #/loopback_test -t transfer -i 10 -s 100
150 1970-1-1 0:10:7,transfer,1-4.17,100,10,0,443,509,471.700012,66,1963,2256,2124.600098,293,102776,118088,109318.898438,15312,1620,1998,1894.099976,378,56,57,56.799999,1
151 1970-1-1 0:10:7,transfer,1-5.17,100,10,0,399,542,463.399994,143,1845,2505,2175.800049,660,92568,125744,107393.296875,33176,1469,2305,1806.500000,836,56,57,56.799999,1
152
153
154* Show the aggregate results of both devices. ("-a")
155 #/loopback_test -t transfer -i 10 -s 100 -a
156 1970-1-1 0:10:35,transfer,1-4.17,100,10,0,448,580,494.100006,132,1722,2230,2039.400024,508,103936,134560,114515.703125,30624,1513,1980,1806.900024,467,56,57,57.299999,1
157 1970-1-1 0:10:35,transfer,1-5.17,100,10,0,383,558,478.600006,175,1791,2606,2115.199951,815,88856,129456,110919.703125,40600,1457,2246,1773.599976,789,56,57,57.099998,1
158 1970-1-1 0:10:35,transfer,aggregate,100,10,0,383,580,486.000000,197,1722,2606,2077.000000,884,88856,134560,112717.000000,45704,1457,2246,1789.000000,789,56,57,57.000000,1
159
160* Example usage of the mask option to select which devices will
161 run the test (1st, 2nd, or both devices):
162 # /loopback_test -t transfer -i 10 -s 100 -m 1
163 1970-1-1 0:11:56,transfer,1-4.17,100,10,0,514,558,544.900024,44,1791,1943,1836.599976,152,119248,129456,126301.296875,10208,1600,1001609,101613.601562,1000009,56,57,56.900002,1
164 # /loopback_test -t transfer -i 10 -s 100 -m 2
165 1970-1-1 0:12:0,transfer,1-5.17,100,10,0,468,554,539.000000,86,1804,2134,1859.500000,330,108576,128528,124932.500000,19952,1606,1626,1619.300049,20,56,57,57.400002,1
166 # /loopback_test -t transfer -i 10 -s 100 -m 3
167 1970-1-1 0:12:3,transfer,1-4.17,100,10,0,432,510,469.399994,78,1959,2313,2135.800049,354,100224,118320,108785.296875,18096,1610,2024,1893.500000,414,56,57,57.200001,1
168 1970-1-1 0:12:3,transfer,1-5.17,100,10,0,404,542,468.799988,138,1843,2472,2152.500000,629,93728,125744,108646.101562,32016,1504,2247,1853.099976,743,56,57,57.099998,1
169
170* Show output in human readable format ("-p")
171 # /loopback_test -t transfer -i 10 -s 100 -m 3 -p
172
173 1970-1-1 0:12:37
174 test: transfer
175 path: 1-4.17
176 size: 100
177 iterations: 10
178 errors: 0
179 async: Disabled
180 requests per-sec: min=390, max=547, average=469.299988, jitter=157
181 ap-throughput B/s: min=90480 max=126904 average=108762.101562 jitter=36424
182 ap-latency usec: min=1826 max=2560 average=2146.000000 jitter=734
183 apbridge-latency usec: min=1620 max=1982 average=1882.099976 jitter=362
184 gpbridge-latency usec: min=56 max=57 average=57.099998 jitter=1
185
186
187 1970-1-1 0:12:37
188 test: transfer
189 path: 1-5.17
190 size: 100
191 iterations: 10
192 errors: 0
193 async: Disabled
194 requests per-sec: min=397, max=538, average=461.700012, jitter=141
195 ap-throughput B/s: min=92104 max=124816 average=106998.898438 jitter=32712
196 ap-latency usec: min=1856 max=2514 average=2185.699951 jitter=658
197 apbridge-latency usec: min=1460 max=2296 average=1828.599976 jitter=836
198 gpbridge-latency usec: min=56 max=57 average=57.099998 jitter=1
diff --git a/drivers/staging/greybus/tools/lbtest b/drivers/staging/greybus/tools/lbtest
new file mode 100755
index 000000000000..d7353f1a2a6f
--- /dev/null
+++ b/drivers/staging/greybus/tools/lbtest
@@ -0,0 +1,168 @@
1#!/usr/bin/env python
2
3# Copyright (c) 2015 Google, Inc.
4# Copyright (c) 2015 Linaro, Ltd.
5# All rights reserved.
6#
7# Redistribution and use in source and binary forms, with or without
8# modification, are permitted provided that the following conditions are met:
9# 1. Redistributions of source code must retain the above copyright notice,
10# this list of conditions and the following disclaimer.
11# 2. Redistributions in binary form must reproduce the above copyright notice,
12# this list of conditions and the following disclaimer in the documentation
13# and/or other materials provided with the distribution.
14# 3. Neither the name of the copyright holder nor the names of its
15# contributors may be used to endorse or promote products derived from this
16# software without specific prior written permission.
17#
18# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
25# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
27# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
28# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30from __future__ import print_function
31import csv
32import datetime
33import sys
34import time
35
36dict = {'ping': '2', 'transfer': '3', 'sink': '4'}
37verbose = 1
38
39def abort():
40 sys.exit(1)
41
42def usage():
43 print('Usage: looptest TEST SIZE ITERATIONS PATH\n\n'
44 ' Run TEST for a number of ITERATIONS with operation data SIZE bytes\n'
45 ' TEST may be \'ping\' \'transfer\' or \'sink\'\n'
46 ' SIZE indicates the size of transfer <= greybus max payload bytes\n'
47 ' ITERATIONS indicates the number of times to execute TEST at SIZE bytes\n'
48 ' Note if ITERATIONS is set to zero then this utility will\n'
49 ' initiate an infinite (non terminating) test and exit\n'
50 ' without logging any metrics data\n'
51 ' PATH indicates the sysfs path for the loopback greybus entries e.g.\n'
52 ' /sys/bus/greybus/devices/endo0:1:1:1:1/\n'
53 'Examples:\n'
54 ' looptest transfer 128 10000\n'
55 ' looptest ping 0 128\n'
56 ' looptest sink 2030 32768\n'
57 .format(sys.argv[0]), file=sys.stderr)
58
59 abort()
60
61def read_sysfs_int(path):
62 try:
63 f = open(path, "r");
64 val = f.read();
65 f.close()
66 return int(val)
67 except IOError as e:
68 print("I/O error({0}): {1}".format(e.errno, e.strerror))
69 print("Invalid path %s" % path)
70
71def write_sysfs_val(path, val):
72 try:
73 f = open(path, "r+")
74 f.write(val)
75 f.close()
76 except IOError as e:
77 print("I/O error({0}): {1}".format(e.errno, e.strerror))
78 print("Invalid path %s" % path)
79
80def log_csv(test_name, size, iteration_max, sys_pfx):
81 # file name will test_name_size_iteration_max.csv
82 # every time the same test with the same parameters is run we will then
83 # append to the same CSV with datestamp - representing each test dataset
84 fname = test_name + '_' + size + '_' + str(iteration_max) + '.csv'
85
86 try:
87 # gather data set
88 date = str(datetime.datetime.now())
89 error = read_sysfs_int(sys_pfx + 'error')
90 request_min = read_sysfs_int(sys_pfx + 'requests_per_second_min')
91 request_max = read_sysfs_int(sys_pfx + 'requests_per_second_max')
92 request_avg = read_sysfs_int(sys_pfx + 'requests_per_second_avg')
93 latency_min = read_sysfs_int(sys_pfx + 'latency_min')
94 latency_max = read_sysfs_int(sys_pfx + 'latency_max')
95 latency_avg = read_sysfs_int(sys_pfx + 'latency_avg')
96 throughput_min = read_sysfs_int(sys_pfx + 'throughput_min')
97 throughput_max = read_sysfs_int(sys_pfx + 'throughput_max')
98 throughput_avg = read_sysfs_int(sys_pfx + 'throughput_avg')
99
100 # derive jitter
101 request_jitter = request_max - request_min
102 latency_jitter = latency_max - latency_min
103 throughput_jitter = throughput_max - throughput_min
104
105 # append data set to file
106 with open(fname, 'a') as csvf:
107 row = csv.writer(csvf, delimiter=",", quotechar="'",
108 quoting=csv.QUOTE_MINIMAL)
109 row.writerow([date, test_name, size, iteration_max, error,
110 request_min, request_max, request_avg, request_jitter,
111 latency_min, latency_max, latency_avg, latency_jitter,
112 throughput_min, throughput_max, throughput_avg, throughput_jitter])
113 except IOError as e:
114 print("I/O error({0}): {1}".format(e.errno, e.strerror))
115
116def loopback_run(test_name, size, iteration_max, sys_pfx):
117 test_id = dict[test_name]
118 try:
119 # Terminate any currently running test
120 write_sysfs_val(sys_pfx + 'type', '0')
121 # Set parameter for no wait between messages
122 write_sysfs_val(sys_pfx + 'ms_wait', '0')
123 # Set operation size
124 write_sysfs_val(sys_pfx + 'size', size)
125 # Set iterations
126 write_sysfs_val(sys_pfx + 'iteration_max', str(iteration_max))
127 # Initiate by setting loopback operation type
128 write_sysfs_val(sys_pfx + 'type', test_id)
129 time.sleep(1)
130
131 if iteration_max == 0:
132 print ("Infinite test initiated CSV won't be logged\n")
133 return
134
135 previous = 0
136 err = 0
137 while True:
138 # get current count bail out if it hasn't changed
139 iteration_count = read_sysfs_int(sys_pfx + 'iteration_count')
140 if previous == iteration_count:
141 err = 1
142 break
143 elif iteration_count == iteration_max:
144 break
145 previous = iteration_count
146 if verbose:
147 print('%02d%% complete %d of %d ' %
148 (100 * iteration_count / iteration_max,
149 iteration_count, iteration_max))
150 time.sleep(1)
151 if err:
152 print ('\nError executing test\n')
153 else:
154 log_csv(test_name, size, iteration_max, sys_pfx)
155 except ValueError as ve:
156 print("Error: %s " % format(e.strerror), file=sys.stderr)
157 abort()
158
159def main():
160 if len(sys.argv) < 5:
161 usage()
162
163 if sys.argv[1] in dict.keys():
164 loopback_run(sys.argv[1], sys.argv[2], int(sys.argv[3]), sys.argv[4])
165 else:
166 usage()
167if __name__ == '__main__':
168 main()
diff --git a/drivers/staging/greybus/tools/loopback_test.c b/drivers/staging/greybus/tools/loopback_test.c
new file mode 100644
index 000000000000..f7f4cd6fb55b
--- /dev/null
+++ b/drivers/staging/greybus/tools/loopback_test.c
@@ -0,0 +1,1000 @@
1/*
2 * Loopback test application
3 *
4 * Copyright 2015 Google Inc.
5 * Copyright 2015 Linaro Ltd.
6 *
7 * Provided under the three clause BSD license found in the LICENSE file.
8 */
9#include <errno.h>
10#include <fcntl.h>
11#include <stdio.h>
12#include <string.h>
13#include <stdlib.h>
14#include <stdint.h>
15#include <poll.h>
16#include <sys/types.h>
17#include <time.h>
18#include <unistd.h>
19#include <dirent.h>
20#include <signal.h>
21
22#define MAX_NUM_DEVICES 10
23#define MAX_SYSFS_PATH 0x200
24#define CSV_MAX_LINE 0x1000
25#define SYSFS_MAX_INT 0x20
26#define MAX_STR_LEN 255
27#define DEFAULT_ASYNC_TIMEOUT 200000
28
29struct dict {
30 char *name;
31 int type;
32};
33
34static struct dict dict[] = {
35 {"ping", 2},
36 {"transfer", 3},
37 {"sink", 4},
38 {NULL,} /* list termination */
39};
40
41struct loopback_results {
42 float latency_avg;
43 uint32_t latency_max;
44 uint32_t latency_min;
45 uint32_t latency_jitter;
46
47 float request_avg;
48 uint32_t request_max;
49 uint32_t request_min;
50 uint32_t request_jitter;
51
52 float throughput_avg;
53 uint32_t throughput_max;
54 uint32_t throughput_min;
55 uint32_t throughput_jitter;
56
57 float apbridge_unipro_latency_avg;
58 uint32_t apbridge_unipro_latency_max;
59 uint32_t apbridge_unipro_latency_min;
60 uint32_t apbridge_unipro_latency_jitter;
61
62 float gbphy_firmware_latency_avg;
63 uint32_t gbphy_firmware_latency_max;
64 uint32_t gbphy_firmware_latency_min;
65 uint32_t gbphy_firmware_latency_jitter;
66
67 uint32_t error;
68};
69
70struct loopback_device {
71 char name[MAX_SYSFS_PATH];
72 char sysfs_entry[MAX_SYSFS_PATH];
73 char debugfs_entry[MAX_SYSFS_PATH];
74 struct loopback_results results;
75};
76
77struct loopback_test {
78 int verbose;
79 int debug;
80 int raw_data_dump;
81 int porcelain;
82 int mask;
83 int size;
84 int iteration_max;
85 int aggregate_output;
86 int test_id;
87 int device_count;
88 int list_devices;
89 int use_async;
90 int async_timeout;
91 int async_outstanding_operations;
92 int us_wait;
93 int file_output;
94 int stop_all;
95 int poll_count;
96 char test_name[MAX_STR_LEN];
97 char sysfs_prefix[MAX_SYSFS_PATH];
98 char debugfs_prefix[MAX_SYSFS_PATH];
99 struct timespec poll_timeout;
100 struct loopback_device devices[MAX_NUM_DEVICES];
101 struct loopback_results aggregate_results;
102 struct pollfd fds[MAX_NUM_DEVICES];
103};
104
105struct loopback_test t;
106
107/* Helper macros to calculate the aggregate results for all devices */
108static inline int device_enabled(struct loopback_test *t, int dev_idx);
109
110#define GET_MAX(field) \
111static int get_##field##_aggregate(struct loopback_test *t) \
112{ \
113 uint32_t max = 0; \
114 int i; \
115 for (i = 0; i < t->device_count; i++) { \
116 if (!device_enabled(t, i)) \
117 continue; \
118 if (t->devices[i].results.field > max) \
119 max = t->devices[i].results.field; \
120 } \
121 return max; \
122} \
123
124#define GET_MIN(field) \
125static int get_##field##_aggregate(struct loopback_test *t) \
126{ \
127 uint32_t min = ~0; \
128 int i; \
129 for (i = 0; i < t->device_count; i++) { \
130 if (!device_enabled(t, i)) \
131 continue; \
132 if (t->devices[i].results.field < min) \
133 min = t->devices[i].results.field; \
134 } \
135 return min; \
136} \
137
138#define GET_AVG(field) \
139static int get_##field##_aggregate(struct loopback_test *t) \
140{ \
141 uint32_t val = 0; \
142 uint32_t count = 0; \
143 int i; \
144 for (i = 0; i < t->device_count; i++) { \
145 if (!device_enabled(t, i)) \
146 continue; \
147 count++; \
148 val += t->devices[i].results.field; \
149 } \
150 if (count) \
151 val /= count; \
152 return val; \
153} \
154
155GET_MAX(throughput_max);
156GET_MAX(request_max);
157GET_MAX(latency_max);
158GET_MAX(apbridge_unipro_latency_max);
159GET_MAX(gbphy_firmware_latency_max);
160GET_MIN(throughput_min);
161GET_MIN(request_min);
162GET_MIN(latency_min);
163GET_MIN(apbridge_unipro_latency_min);
164GET_MIN(gbphy_firmware_latency_min);
165GET_AVG(throughput_avg);
166GET_AVG(request_avg);
167GET_AVG(latency_avg);
168GET_AVG(apbridge_unipro_latency_avg);
169GET_AVG(gbphy_firmware_latency_avg);
170
171void abort()
172{
173 _exit(1);
174}
175
176void usage(void)
177{
178 fprintf(stderr, "Usage: loopback_test TEST [SIZE] ITERATIONS [SYSPATH] [DBGPATH]\n\n"
179 " Run TEST for a number of ITERATIONS with operation data SIZE bytes\n"
180 " TEST may be \'ping\' \'transfer\' or \'sink\'\n"
181 " SIZE indicates the size of transfer <= greybus max payload bytes\n"
182 " ITERATIONS indicates the number of times to execute TEST at SIZE bytes\n"
183 " Note if ITERATIONS is set to zero then this utility will\n"
184 " initiate an infinite (non terminating) test and exit\n"
185 " without logging any metrics data\n"
186 " SYSPATH indicates the sysfs path for the loopback greybus entries e.g.\n"
187 " /sys/bus/greybus/devices\n"
188 " DBGPATH indicates the debugfs path for the loopback greybus entries e.g.\n"
189 " /sys/kernel/debug/gb_loopback/\n"
190 " Mandatory arguments\n"
191 " -t must be one of the test names - sink, transfer or ping\n"
192 " -i iteration count - the number of iterations to run the test over\n"
193 " Optional arguments\n"
194 " -S sysfs location - location for greybus 'endo' entires default /sys/bus/greybus/devices/\n"
195 " -D debugfs location - location for loopback debugfs entries default /sys/kernel/debug/gb_loopback/\n"
196 " -s size of data packet to send during test - defaults to zero\n"
197 " -m mask - a bit mask of connections to include example: -m 8 = 4th connection -m 9 = 1st and 4th connection etc\n"
198 " default is zero which means broadcast to all connections\n"
199 " -v verbose output\n"
200 " -d debug output\n"
201 " -r raw data output - when specified the full list of latency values are included in the output CSV\n"
202 " -p porcelain - when specified printout is in a user-friendly non-CSV format. This option suppresses writing to CSV file\n"
203 " -a aggregate - show aggregation of all enabled devices\n"
204 " -l list found loopback devices and exit\n"
205 " -x Async - Enable async transfers\n"
206 " -o Async Timeout - Timeout in uSec for async operations\n"
207 " -O Poll loop time out in seconds(max time a test is expected to last, default: 30sec)\n"
208 " -c Max number of outstanding operations for async operations\n"
209 " -w Wait in uSec between operations\n"
210 " -z Enable output to a CSV file (incompatible with -p)\n"
211 " -f When starting new loopback test, stop currently running tests on all devices\n"
212 "Examples:\n"
213 " Send 10000 transfers with a packet size of 128 bytes to all active connections\n"
214 " loopback_test -t transfer -s 128 -i 10000 -S /sys/bus/greybus/devices/ -D /sys/kernel/debug/gb_loopback/\n"
215 " loopback_test -t transfer -s 128 -i 10000 -m 0\n"
216 " Send 10000 transfers with a packet size of 128 bytes to connection 1 and 4\n"
217 " loopback_test -t transfer -s 128 -i 10000 -m 9\n"
218 " loopback_test -t ping -s 0 128 -i -S /sys/bus/greybus/devices/ -D /sys/kernel/debug/gb_loopback/\n"
219 " loopback_test -t sink -s 2030 -i 32768 -S /sys/bus/greybus/devices/ -D /sys/kernel/debug/gb_loopback/\n");
220 abort();
221}
222
223static inline int device_enabled(struct loopback_test *t, int dev_idx)
224{
225 if (!t->mask || (t->mask & (1 << dev_idx)))
226 return 1;
227
228 return 0;
229}
230
231static void show_loopback_devices(struct loopback_test *t)
232{
233 int i;
234
235 if (t->device_count == 0) {
236 printf("No loopback devices.\n");
237 return;
238 }
239
240 for (i = 0; i < t->device_count; i++)
241 printf("device[%d] = %s\n", i, t->devices[i].name);
242
243}
244
245int open_sysfs(const char *sys_pfx, const char *node, int flags)
246{
247 int fd;
248 char path[MAX_SYSFS_PATH];
249
250 snprintf(path, sizeof(path), "%s%s", sys_pfx, node);
251 fd = open(path, flags);
252 if (fd < 0) {
253 fprintf(stderr, "unable to open %s\n", path);
254 abort();
255 }
256 return fd;
257}
258
259int read_sysfs_int_fd(int fd, const char *sys_pfx, const char *node)
260{
261 char buf[SYSFS_MAX_INT];
262
263 if (read(fd, buf, sizeof(buf)) < 0) {
264 fprintf(stderr, "unable to read from %s%s %s\n", sys_pfx, node,
265 strerror(errno));
266 close(fd);
267 abort();
268 }
269 return atoi(buf);
270}
271
272float read_sysfs_float_fd(int fd, const char *sys_pfx, const char *node)
273{
274 char buf[SYSFS_MAX_INT];
275
276 if (read(fd, buf, sizeof(buf)) < 0) {
277
278 fprintf(stderr, "unable to read from %s%s %s\n", sys_pfx, node,
279 strerror(errno));
280 close(fd);
281 abort();
282 }
283 return atof(buf);
284}
285
286int read_sysfs_int(const char *sys_pfx, const char *node)
287{
288 int fd, val;
289
290 fd = open_sysfs(sys_pfx, node, O_RDONLY);
291 val = read_sysfs_int_fd(fd, sys_pfx, node);
292 close(fd);
293 return val;
294}
295
296float read_sysfs_float(const char *sys_pfx, const char *node)
297{
298 int fd;
299 float val;
300
301 fd = open_sysfs(sys_pfx, node, O_RDONLY);
302 val = read_sysfs_float_fd(fd, sys_pfx, node);
303 close(fd);
304 return val;
305}
306
307void write_sysfs_val(const char *sys_pfx, const char *node, int val)
308{
309 int fd, len;
310 char buf[SYSFS_MAX_INT];
311
312 fd = open_sysfs(sys_pfx, node, O_RDWR);
313 len = snprintf(buf, sizeof(buf), "%d", val);
314 if (write(fd, buf, len) < 0) {
315 fprintf(stderr, "unable to write to %s%s %s\n", sys_pfx, node,
316 strerror(errno));
317 close(fd);
318 abort();
319 }
320 close(fd);
321}
322
323static int get_results(struct loopback_test *t)
324{
325 struct loopback_device *d;
326 struct loopback_results *r;
327 int i;
328
329 for (i = 0; i < t->device_count; i++) {
330 if (!device_enabled(t, i))
331 continue;
332
333 d = &t->devices[i];
334 r = &d->results;
335
336 r->error = read_sysfs_int(d->sysfs_entry, "error");
337 r->request_min = read_sysfs_int(d->sysfs_entry, "requests_per_second_min");
338 r->request_max = read_sysfs_int(d->sysfs_entry, "requests_per_second_max");
339 r->request_avg = read_sysfs_float(d->sysfs_entry, "requests_per_second_avg");
340
341 r->latency_min = read_sysfs_int(d->sysfs_entry, "latency_min");
342 r->latency_max = read_sysfs_int(d->sysfs_entry, "latency_max");
343 r->latency_avg = read_sysfs_float(d->sysfs_entry, "latency_avg");
344
345 r->throughput_min = read_sysfs_int(d->sysfs_entry, "throughput_min");
346 r->throughput_max = read_sysfs_int(d->sysfs_entry, "throughput_max");
347 r->throughput_avg = read_sysfs_float(d->sysfs_entry, "throughput_avg");
348
349 r->apbridge_unipro_latency_min =
350 read_sysfs_int(d->sysfs_entry, "apbridge_unipro_latency_min");
351 r->apbridge_unipro_latency_max =
352 read_sysfs_int(d->sysfs_entry, "apbridge_unipro_latency_max");
353 r->apbridge_unipro_latency_avg =
354 read_sysfs_float(d->sysfs_entry, "apbridge_unipro_latency_avg");
355
356 r->gbphy_firmware_latency_min =
357 read_sysfs_int(d->sysfs_entry, "gbphy_firmware_latency_min");
358 r->gbphy_firmware_latency_max =
359 read_sysfs_int(d->sysfs_entry, "gbphy_firmware_latency_max");
360 r->gbphy_firmware_latency_avg =
361 read_sysfs_float(d->sysfs_entry, "gbphy_firmware_latency_avg");
362
363 r->request_jitter = r->request_max - r->request_min;
364 r->latency_jitter = r->latency_max - r->latency_min;
365 r->throughput_jitter = r->throughput_max - r->throughput_min;
366 r->apbridge_unipro_latency_jitter =
367 r->apbridge_unipro_latency_max - r->apbridge_unipro_latency_min;
368 r->gbphy_firmware_latency_jitter =
369 r->gbphy_firmware_latency_max - r->gbphy_firmware_latency_min;
370
371 }
372
373 /*calculate the aggregate results of all enabled devices */
374 if (t->aggregate_output) {
375 r = &t->aggregate_results;
376
377 r->request_min = get_request_min_aggregate(t);
378 r->request_max = get_request_max_aggregate(t);
379 r->request_avg = get_request_avg_aggregate(t);
380
381 r->latency_min = get_latency_min_aggregate(t);
382 r->latency_max = get_latency_max_aggregate(t);
383 r->latency_avg = get_latency_avg_aggregate(t);
384
385 r->throughput_min = get_throughput_min_aggregate(t);
386 r->throughput_max = get_throughput_max_aggregate(t);
387 r->throughput_avg = get_throughput_avg_aggregate(t);
388
389 r->apbridge_unipro_latency_min =
390 get_apbridge_unipro_latency_min_aggregate(t);
391 r->apbridge_unipro_latency_max =
392 get_apbridge_unipro_latency_max_aggregate(t);
393 r->apbridge_unipro_latency_avg =
394 get_apbridge_unipro_latency_avg_aggregate(t);
395
396 r->gbphy_firmware_latency_min =
397 get_gbphy_firmware_latency_min_aggregate(t);
398 r->gbphy_firmware_latency_max =
399 get_gbphy_firmware_latency_max_aggregate(t);
400 r->gbphy_firmware_latency_avg =
401 get_gbphy_firmware_latency_avg_aggregate(t);
402
403 r->request_jitter = r->request_max - r->request_min;
404 r->latency_jitter = r->latency_max - r->latency_min;
405 r->throughput_jitter = r->throughput_max - r->throughput_min;
406 r->apbridge_unipro_latency_jitter =
407 r->apbridge_unipro_latency_max - r->apbridge_unipro_latency_min;
408 r->gbphy_firmware_latency_jitter =
409 r->gbphy_firmware_latency_max - r->gbphy_firmware_latency_min;
410
411 }
412
413 return 0;
414}
415
416void log_csv_error(int len, int err)
417{
418 fprintf(stderr, "unable to write %d bytes to csv %s\n", len,
419 strerror(err));
420}
421
422int format_output(struct loopback_test *t,
423 struct loopback_results *r,
424 const char *dev_name,
425 char *buf, int buf_len,
426 struct tm *tm)
427{
428 int len = 0;
429
430 memset(buf, 0x00, buf_len);
431 len = snprintf(buf, buf_len, "%u-%u-%u %u:%u:%u",
432 tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
433 tm->tm_hour, tm->tm_min, tm->tm_sec);
434
435 if (t->porcelain) {
436 len += snprintf(&buf[len], buf_len - len,
437 "\n test:\t\t\t%s\n path:\t\t\t%s\n size:\t\t\t%u\n iterations:\t\t%u\n errors:\t\t%u\n async:\t\t\t%s\n",
438 t->test_name,
439 dev_name,
440 t->size,
441 t->iteration_max,
442 r->error,
443 t->use_async ? "Enabled" : "Disabled");
444
445 len += snprintf(&buf[len], buf_len - len,
446 " requests per-sec:\tmin=%u, max=%u, average=%f, jitter=%u\n",
447 r->request_min,
448 r->request_max,
449 r->request_avg,
450 r->request_jitter);
451
452 len += snprintf(&buf[len], buf_len - len,
453 " ap-throughput B/s:\tmin=%u max=%u average=%f jitter=%u\n",
454 r->throughput_min,
455 r->throughput_max,
456 r->throughput_avg,
457 r->throughput_jitter);
458 len += snprintf(&buf[len], buf_len - len,
459 " ap-latency usec:\tmin=%u max=%u average=%f jitter=%u\n",
460 r->latency_min,
461 r->latency_max,
462 r->latency_avg,
463 r->latency_jitter);
464 len += snprintf(&buf[len], buf_len - len,
465 " apbridge-latency usec:\tmin=%u max=%u average=%f jitter=%u\n",
466 r->apbridge_unipro_latency_min,
467 r->apbridge_unipro_latency_max,
468 r->apbridge_unipro_latency_avg,
469 r->apbridge_unipro_latency_jitter);
470
471 len += snprintf(&buf[len], buf_len - len,
472 " gbphy-latency usec:\tmin=%u max=%u average=%f jitter=%u\n",
473 r->gbphy_firmware_latency_min,
474 r->gbphy_firmware_latency_max,
475 r->gbphy_firmware_latency_avg,
476 r->gbphy_firmware_latency_jitter);
477
478 } else {
479 len += snprintf(&buf[len], buf_len- len, ",%s,%s,%u,%u,%u",
480 t->test_name, dev_name, t->size, t->iteration_max,
481 r->error);
482
483 len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
484 r->request_min,
485 r->request_max,
486 r->request_avg,
487 r->request_jitter);
488
489 len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
490 r->latency_min,
491 r->latency_max,
492 r->latency_avg,
493 r->latency_jitter);
494
495 len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
496 r->throughput_min,
497 r->throughput_max,
498 r->throughput_avg,
499 r->throughput_jitter);
500
501 len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
502 r->apbridge_unipro_latency_min,
503 r->apbridge_unipro_latency_max,
504 r->apbridge_unipro_latency_avg,
505 r->apbridge_unipro_latency_jitter);
506
507 len += snprintf(&buf[len], buf_len - len, ",%u,%u,%f,%u",
508 r->gbphy_firmware_latency_min,
509 r->gbphy_firmware_latency_max,
510 r->gbphy_firmware_latency_avg,
511 r->gbphy_firmware_latency_jitter);
512 }
513
514 printf("\n%s\n", buf);
515
516 return len;
517}
518
519static int log_results(struct loopback_test *t)
520{
521 int fd, i, len, ret;
522 struct tm tm;
523 time_t local_time;
524 mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
525 char file_name[MAX_SYSFS_PATH];
526 char data[CSV_MAX_LINE];
527
528 local_time = time(NULL);
529 tm = *localtime(&local_time);
530
531 /*
532 * file name will test_name_size_iteration_max.csv
533 * every time the same test with the same parameters is run we will then
534 * append to the same CSV with datestamp - representing each test
535 * dataset.
536 */
537 if (t->file_output && !t->porcelain) {
538 snprintf(file_name, sizeof(file_name), "%s_%d_%d.csv",
539 t->test_name, t->size, t->iteration_max);
540
541 fd = open(file_name, O_WRONLY | O_CREAT | O_APPEND, mode);
542 if (fd < 0) {
543 fprintf(stderr, "unable to open %s for appendation\n", file_name);
544 abort();
545 }
546
547 }
548 for (i = 0; i < t->device_count; i++) {
549 if (!device_enabled(t, i))
550 continue;
551
552 len = format_output(t, &t->devices[i].results,
553 t->devices[i].name,
554 data, sizeof(data), &tm);
555 if (t->file_output && !t->porcelain) {
556 ret = write(fd, data, len);
557 if (ret == -1)
558 fprintf(stderr, "unable to write %d bytes to csv.\n", len);
559 }
560
561 }
562
563
564 if (t->aggregate_output) {
565 len = format_output(t, &t->aggregate_results, "aggregate",
566 data, sizeof(data), &tm);
567 if (t->file_output && !t->porcelain) {
568 ret = write(fd, data, len);
569 if (ret == -1)
570 fprintf(stderr, "unable to write %d bytes to csv.\n", len);
571 }
572 }
573
574 if (t->file_output && !t->porcelain)
575 close(fd);
576
577 return 0;
578}
579
580int is_loopback_device(const char *path, const char *node)
581{
582 char file[MAX_SYSFS_PATH];
583
584 snprintf(file, MAX_SYSFS_PATH, "%s%s/iteration_count", path, node);
585 if (access(file, F_OK) == 0)
586 return 1;
587 return 0;
588}
589
590int find_loopback_devices(struct loopback_test *t)
591{
592 struct dirent **namelist;
593 int i, n, ret;
594 unsigned int dev_id;
595 struct loopback_device *d;
596
597 n = scandir(t->sysfs_prefix, &namelist, NULL, alphasort);
598 if (n < 0) {
599 perror("scandir");
600 ret = -ENODEV;
601 goto baddir;
602 }
603
604 /* Don't include '.' and '..' */
605 if (n <= 2) {
606 ret = -ENOMEM;
607 goto done;
608 }
609
610 for (i = 0; i < n; i++) {
611 ret = sscanf(namelist[i]->d_name, "gb_loopback%u", &dev_id);
612 if (ret != 1)
613 continue;
614
615 if (!is_loopback_device(t->sysfs_prefix, namelist[i]->d_name))
616 continue;
617
618 if (t->device_count == MAX_NUM_DEVICES) {
619 fprintf(stderr, "max number of devices reached!\n");
620 break;
621 }
622
623 d = &t->devices[t->device_count++];
624 snprintf(d->name, MAX_STR_LEN, "gb_loopback%u", dev_id);
625
626 snprintf(d->sysfs_entry, MAX_SYSFS_PATH, "%s%s/",
627 t->sysfs_prefix, d->name);
628
629 snprintf(d->debugfs_entry, MAX_SYSFS_PATH, "%sraw_latency_%s",
630 t->debugfs_prefix, d->name);
631
632 if (t->debug)
633 printf("add %s %s\n", d->sysfs_entry,
634 d->debugfs_entry);
635 }
636
637 ret = 0;
638done:
639 for (i = 0; i < n; i++)
640 free(namelist[n]);
641 free(namelist);
642baddir:
643 return ret;
644}
645
646static int open_poll_files(struct loopback_test *t)
647{
648 struct loopback_device *dev;
649 char buf[MAX_STR_LEN];
650 char dummy;
651 int fds_idx = 0;
652 int i;
653
654 for (i = 0; i < t->device_count; i++) {
655 dev = &t->devices[i];
656
657 if (!device_enabled(t, i))
658 continue;
659
660 snprintf(buf, sizeof(buf), "%s%s", dev->sysfs_entry, "iteration_count");
661 t->fds[fds_idx].fd = open(buf, O_RDONLY);
662 if (t->fds[fds_idx].fd < 0) {
663 fprintf(stderr, "Error opening poll file!\n");
664 goto err;
665 }
666 read(t->fds[fds_idx].fd, &dummy, 1);
667 t->fds[fds_idx].events = POLLERR|POLLPRI;
668 t->fds[fds_idx].revents = 0;
669 fds_idx++;
670 }
671
672 t->poll_count = fds_idx;
673
674 return 0;
675
676err:
677 for (i = 0; i < fds_idx; i++)
678 close(t->fds[fds_idx].fd);
679
680 return -1;
681}
682
683static int close_poll_files(struct loopback_test *t)
684{
685 int i;
686 for (i = 0; i < t->poll_count; i++)
687 close(t->fds[i].fd);
688
689 return 0;
690}
691static int is_complete(struct loopback_test *t)
692{
693 int iteration_count;
694 int i;
695
696 for (i = 0; i < t->device_count; i++) {
697 if (!device_enabled(t, i))
698 continue;
699
700 iteration_count = read_sysfs_int(t->devices[i].sysfs_entry,
701 "iteration_count");
702
703 /* at least one device did not finish yet */
704 if (iteration_count != t->iteration_max)
705 return 0;
706 }
707
708 return 1;
709}
710
711static void stop_tests(struct loopback_test *t)
712{
713 int i;
714
715 for (i = 0; i < t->device_count; i++) {
716 if (!device_enabled(t, i))
717 continue;
718 write_sysfs_val(t->devices[i].sysfs_entry, "type", 0);
719 }
720}
721
722static void handler(int sig) { /* do nothing */ }
723
724static int wait_for_complete(struct loopback_test *t)
725{
726 int number_of_events = 0;
727 char dummy;
728 int ret;
729 int i;
730 struct timespec *ts = NULL;
731 struct sigaction sa;
732 sigset_t mask_old, mask;
733
734 sigemptyset(&mask);
735 sigemptyset(&mask_old);
736 sigaddset(&mask, SIGINT);
737 sigprocmask(SIG_BLOCK, &mask, &mask_old);
738
739 sa.sa_handler = handler;
740 sa.sa_flags = 0;
741 sigemptyset(&sa.sa_mask);
742 if (sigaction(SIGINT, &sa, NULL) == -1) {
743 fprintf(stderr, "sigaction error\n");
744 return -1;
745 }
746
747 if (t->poll_timeout.tv_sec != 0)
748 ts = &t->poll_timeout;
749
750 while (1) {
751
752 ret = ppoll(t->fds, t->poll_count, ts, &mask_old);
753 if (ret <= 0) {
754 stop_tests(t);
755 fprintf(stderr, "Poll exit with errno %d\n", errno);
756 return -1;
757 }
758
759 for (i = 0; i < t->poll_count; i++) {
760 if (t->fds[i].revents & POLLPRI) {
761 /* Dummy read to clear the event */
762 read(t->fds[i].fd, &dummy, 1);
763 number_of_events++;
764 }
765 }
766
767 if (number_of_events == t->poll_count)
768 break;
769 }
770
771 if (!is_complete(t)) {
772 fprintf(stderr, "Iteration count did not finish!\n");
773 return -1;
774 }
775
776 return 0;
777}
778
779static void prepare_devices(struct loopback_test *t)
780{
781 int i;
782
783 /* Cancel any running tests on enabled devices. If
784 * stop_all option is given, stop test on all devices.
785 */
786 for (i = 0; i < t->device_count; i++)
787 if (t->stop_all || device_enabled(t, i))
788 write_sysfs_val(t->devices[i].sysfs_entry, "type", 0);
789
790
791 for (i = 0; i < t->device_count; i++) {
792 if (!device_enabled(t, i))
793 continue;
794
795 write_sysfs_val(t->devices[i].sysfs_entry, "us_wait",
796 t->us_wait);
797
798 /* Set operation size */
799 write_sysfs_val(t->devices[i].sysfs_entry, "size", t->size);
800
801 /* Set iterations */
802 write_sysfs_val(t->devices[i].sysfs_entry, "iteration_max",
803 t->iteration_max);
804
805 if (t->use_async) {
806 write_sysfs_val(t->devices[i].sysfs_entry,
807 "async", 1);
808 write_sysfs_val(t->devices[i].sysfs_entry,
809 "timeout", t->async_timeout);
810 write_sysfs_val(t->devices[i].sysfs_entry,
811 "outstanding_operations_max",
812 t->async_outstanding_operations);
813 } else
814 write_sysfs_val(t->devices[i].sysfs_entry,
815 "async", 0);
816 }
817}
818
819static int start(struct loopback_test *t)
820{
821 int i;
822
823 /* the test starts by writing test_id to the type file. */
824 for (i = 0; i < t->device_count; i++) {
825 if (!device_enabled(t, i))
826 continue;
827
828 write_sysfs_val(t->devices[i].sysfs_entry, "type", t->test_id);
829 }
830
831 return 0;
832}
833
834
835void loopback_run(struct loopback_test *t)
836{
837 int i;
838 int ret;
839
840 for (i = 0; dict[i].name != NULL; i++) {
841 if (strstr(dict[i].name, t->test_name))
842 t->test_id = dict[i].type;
843 }
844 if (!t->test_id) {
845 fprintf(stderr, "invalid test %s\n", t->test_name);
846 usage();
847 return;
848 }
849
850 prepare_devices(t);
851
852 ret = open_poll_files(t);
853 if (ret)
854 goto err;
855
856 start(t);
857
858 ret = wait_for_complete(t);
859 close_poll_files(t);
860 if (ret)
861 goto err;
862
863
864 get_results(t);
865
866 log_results(t);
867
868 return;
869
870err:
871 printf("Error running test\n");
872 return;
873}
874
875static int sanity_check(struct loopback_test *t)
876{
877 int i;
878
879 if (t->device_count == 0) {
880 fprintf(stderr, "No loopback devices found\n");
881 return -1;
882 }
883
884 for (i = 0; i < MAX_NUM_DEVICES; i++) {
885 if (!device_enabled(t, i))
886 continue;
887
888 if (t->mask && !strcmp(t->devices[i].name, "")) {
889 fprintf(stderr, "Bad device mask %x\n", (1 << i));
890 return -1;
891 }
892
893 }
894
895
896 return 0;
897}
898
899int main(int argc, char *argv[])
900{
901 int o, ret;
902 char *sysfs_prefix = "/sys/class/gb_loopback/";
903 char *debugfs_prefix = "/sys/kernel/debug/gb_loopback/";
904
905 memset(&t, 0, sizeof(t));
906
907 while ((o = getopt(argc, argv,
908 "t:s:i:S:D:m:v::d::r::p::a::l::x::o:O:c:w:z::f::")) != -1) {
909 switch (o) {
910 case 't':
911 snprintf(t.test_name, MAX_STR_LEN, "%s", optarg);
912 break;
913 case 's':
914 t.size = atoi(optarg);
915 break;
916 case 'i':
917 t.iteration_max = atoi(optarg);
918 break;
919 case 'S':
920 snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
921 break;
922 case 'D':
923 snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
924 break;
925 case 'm':
926 t.mask = atol(optarg);
927 break;
928 case 'v':
929 t.verbose = 1;
930 break;
931 case 'd':
932 t.debug = 1;
933 break;
934 case 'r':
935 t.raw_data_dump = 1;
936 break;
937 case 'p':
938 t.porcelain = 1;
939 break;
940 case 'a':
941 t.aggregate_output = 1;
942 break;
943 case 'l':
944 t.list_devices = 1;
945 break;
946 case 'x':
947 t.use_async = 1;
948 break;
949 case 'o':
950 t.async_timeout = atoi(optarg);
951 break;
952 case 'O':
953 t.poll_timeout.tv_sec = atoi(optarg);
954 break;
955 case 'c':
956 t.async_outstanding_operations = atoi(optarg);
957 break;
958 case 'w':
959 t.us_wait = atoi(optarg);
960 break;
961 case 'z':
962 t.file_output = 1;
963 break;
964 case 'f':
965 t.stop_all = 1;
966 break;
967 default:
968 usage();
969 return -EINVAL;
970 }
971 }
972
973 if (!strcmp(t.sysfs_prefix, ""))
974 snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix);
975
976 if (!strcmp(t.debugfs_prefix, ""))
977 snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix);
978
979 ret = find_loopback_devices(&t);
980 if (ret)
981 return ret;
982 ret = sanity_check(&t);
983 if (ret)
984 return ret;
985
986 if (t.list_devices) {
987 show_loopback_devices(&t);
988 return 0;
989 }
990
991 if (t.test_name[0] == '\0' || t.iteration_max == 0)
992 usage();
993
994 if (t.async_timeout == 0)
995 t.async_timeout = DEFAULT_ASYNC_TIMEOUT;
996
997 loopback_run(&t);
998
999 return 0;
1000}
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
new file mode 100644
index 000000000000..01aeed1b3a9c
--- /dev/null
+++ b/drivers/staging/greybus/uart.c
@@ -0,0 +1,1075 @@
1/*
2 * UART driver for the Greybus "generic" UART module.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 *
9 * Heavily based on drivers/usb/class/cdc-acm.c and
10 * drivers/usb/serial/usb-serial.c.
11 */
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/wait.h>
19#include <linux/slab.h>
20#include <linux/uaccess.h>
21#include <linux/mutex.h>
22#include <linux/tty.h>
23#include <linux/serial.h>
24#include <linux/tty_driver.h>
25#include <linux/tty_flip.h>
26#include <linux/serial.h>
27#include <linux/idr.h>
28#include <linux/fs.h>
29#include <linux/kdev_t.h>
30#include <linux/kfifo.h>
31#include <linux/workqueue.h>
32#include <linux/completion.h>
33
34#include "greybus.h"
35#include "gbphy.h"
36
37#define GB_NUM_MINORS 16 /* 16 is is more than enough */
38#define GB_NAME "ttyGB"
39
40#define GB_UART_WRITE_FIFO_SIZE PAGE_SIZE
41#define GB_UART_WRITE_ROOM_MARGIN 1 /* leave some space in fifo */
42#define GB_UART_FIRMWARE_CREDITS 4096
43#define GB_UART_CREDIT_WAIT_TIMEOUT_MSEC 10000
44
45struct gb_tty_line_coding {
46 __le32 rate;
47 __u8 format;
48 __u8 parity;
49 __u8 data_bits;
50 __u8 flow_control;
51};
52
53struct gb_tty {
54 struct gbphy_device *gbphy_dev;
55 struct tty_port port;
56 void *buffer;
57 size_t buffer_payload_max;
58 struct gb_connection *connection;
59 u16 cport_id;
60 unsigned int minor;
61 unsigned char clocal;
62 bool disconnected;
63 spinlock_t read_lock;
64 spinlock_t write_lock;
65 struct async_icount iocount;
66 struct async_icount oldcount;
67 wait_queue_head_t wioctl;
68 struct mutex mutex;
69 u8 ctrlin; /* input control lines */
70 u8 ctrlout; /* output control lines */
71 struct gb_tty_line_coding line_coding;
72 struct work_struct tx_work;
73 struct kfifo write_fifo;
74 bool close_pending;
75 unsigned int credits;
76 struct completion credits_complete;
77};
78
79static struct tty_driver *gb_tty_driver;
80static DEFINE_IDR(tty_minors);
81static DEFINE_MUTEX(table_lock);
82
83static int gb_uart_receive_data_handler(struct gb_operation *op)
84{
85 struct gb_connection *connection = op->connection;
86 struct gb_tty *gb_tty = gb_connection_get_data(connection);
87 struct tty_port *port = &gb_tty->port;
88 struct gb_message *request = op->request;
89 struct gb_uart_recv_data_request *receive_data;
90 u16 recv_data_size;
91 int count;
92 unsigned long tty_flags = TTY_NORMAL;
93
94 if (request->payload_size < sizeof(*receive_data)) {
95 dev_err(&gb_tty->gbphy_dev->dev,
96 "short receive-data request received (%zu < %zu)\n",
97 request->payload_size, sizeof(*receive_data));
98 return -EINVAL;
99 }
100
101 receive_data = op->request->payload;
102 recv_data_size = le16_to_cpu(receive_data->size);
103
104 if (recv_data_size != request->payload_size - sizeof(*receive_data)) {
105 dev_err(&gb_tty->gbphy_dev->dev,
106 "malformed receive-data request received (%u != %zu)\n",
107 recv_data_size,
108 request->payload_size - sizeof(*receive_data));
109 return -EINVAL;
110 }
111
112 if (!recv_data_size)
113 return -EINVAL;
114
115 if (receive_data->flags) {
116 if (receive_data->flags & GB_UART_RECV_FLAG_BREAK)
117 tty_flags = TTY_BREAK;
118 else if (receive_data->flags & GB_UART_RECV_FLAG_PARITY)
119 tty_flags = TTY_PARITY;
120 else if (receive_data->flags & GB_UART_RECV_FLAG_FRAMING)
121 tty_flags = TTY_FRAME;
122
123 /* overrun is special, not associated with a char */
124 if (receive_data->flags & GB_UART_RECV_FLAG_OVERRUN)
125 tty_insert_flip_char(port, 0, TTY_OVERRUN);
126 }
127 count = tty_insert_flip_string_fixed_flag(port, receive_data->data,
128 tty_flags, recv_data_size);
129 if (count != recv_data_size) {
130 dev_err(&gb_tty->gbphy_dev->dev,
131 "UART: RX 0x%08x bytes only wrote 0x%08x\n",
132 recv_data_size, count);
133 }
134 if (count)
135 tty_flip_buffer_push(port);
136 return 0;
137}
138
139static int gb_uart_serial_state_handler(struct gb_operation *op)
140{
141 struct gb_connection *connection = op->connection;
142 struct gb_tty *gb_tty = gb_connection_get_data(connection);
143 struct gb_message *request = op->request;
144 struct gb_uart_serial_state_request *serial_state;
145
146 if (request->payload_size < sizeof(*serial_state)) {
147 dev_err(&gb_tty->gbphy_dev->dev,
148 "short serial-state event received (%zu < %zu)\n",
149 request->payload_size, sizeof(*serial_state));
150 return -EINVAL;
151 }
152
153 serial_state = request->payload;
154 gb_tty->ctrlin = serial_state->control;
155
156 return 0;
157}
158
159static int gb_uart_receive_credits_handler(struct gb_operation *op)
160{
161 struct gb_connection *connection = op->connection;
162 struct gb_tty *gb_tty = gb_connection_get_data(connection);
163 struct gb_message *request = op->request;
164 struct gb_uart_receive_credits_request *credit_request;
165 unsigned long flags;
166 unsigned int incoming_credits;
167 int ret = 0;
168
169 if (request->payload_size < sizeof(*credit_request)) {
170 dev_err(&gb_tty->gbphy_dev->dev,
171 "short receive_credits event received (%zu < %zu)\n",
172 request->payload_size,
173 sizeof(*credit_request));
174 return -EINVAL;
175 }
176
177 credit_request = request->payload;
178 incoming_credits = le16_to_cpu(credit_request->count);
179
180 spin_lock_irqsave(&gb_tty->write_lock, flags);
181 gb_tty->credits += incoming_credits;
182 if (gb_tty->credits > GB_UART_FIRMWARE_CREDITS) {
183 gb_tty->credits -= incoming_credits;
184 ret = -EINVAL;
185 }
186 spin_unlock_irqrestore(&gb_tty->write_lock, flags);
187
188 if (ret) {
189 dev_err(&gb_tty->gbphy_dev->dev,
190 "invalid number of incoming credits: %d\n",
191 incoming_credits);
192 return ret;
193 }
194
195 if (!gb_tty->close_pending)
196 schedule_work(&gb_tty->tx_work);
197
198 /*
199 * the port the tty layer may be waiting for credits
200 */
201 tty_port_tty_wakeup(&gb_tty->port);
202
203 if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
204 complete(&gb_tty->credits_complete);
205
206 return ret;
207}
208
209static int gb_uart_request_handler(struct gb_operation *op)
210{
211 struct gb_connection *connection = op->connection;
212 struct gb_tty *gb_tty = gb_connection_get_data(connection);
213 int type = op->type;
214 int ret;
215
216 switch (type) {
217 case GB_UART_TYPE_RECEIVE_DATA:
218 ret = gb_uart_receive_data_handler(op);
219 break;
220 case GB_UART_TYPE_SERIAL_STATE:
221 ret = gb_uart_serial_state_handler(op);
222 break;
223 case GB_UART_TYPE_RECEIVE_CREDITS:
224 ret = gb_uart_receive_credits_handler(op);
225 break;
226 default:
227 dev_err(&gb_tty->gbphy_dev->dev,
228 "unsupported unsolicited request: 0x%02x\n", type);
229 ret = -EINVAL;
230 }
231
232 return ret;
233}
234
235static void gb_uart_tx_write_work(struct work_struct *work)
236{
237 struct gb_uart_send_data_request *request;
238 struct gb_tty *gb_tty;
239 unsigned long flags;
240 unsigned int send_size;
241 int ret;
242
243 gb_tty = container_of(work, struct gb_tty, tx_work);
244 request = gb_tty->buffer;
245
246 while (1) {
247 if (gb_tty->close_pending)
248 break;
249
250 spin_lock_irqsave(&gb_tty->write_lock, flags);
251 send_size = gb_tty->buffer_payload_max;
252 if (send_size > gb_tty->credits)
253 send_size = gb_tty->credits;
254
255 send_size = kfifo_out_peek(&gb_tty->write_fifo,
256 &request->data[0],
257 send_size);
258 if (!send_size) {
259 spin_unlock_irqrestore(&gb_tty->write_lock, flags);
260 break;
261 }
262
263 gb_tty->credits -= send_size;
264 spin_unlock_irqrestore(&gb_tty->write_lock, flags);
265
266 request->size = cpu_to_le16(send_size);
267 ret = gb_operation_sync(gb_tty->connection,
268 GB_UART_TYPE_SEND_DATA,
269 request, sizeof(*request) + send_size,
270 NULL, 0);
271 if (ret) {
272 dev_err(&gb_tty->gbphy_dev->dev,
273 "send data error: %d\n", ret);
274 spin_lock_irqsave(&gb_tty->write_lock, flags);
275 gb_tty->credits += send_size;
276 spin_unlock_irqrestore(&gb_tty->write_lock, flags);
277 if (!gb_tty->close_pending)
278 schedule_work(work);
279 return;
280 }
281
282 spin_lock_irqsave(&gb_tty->write_lock, flags);
283 ret = kfifo_out(&gb_tty->write_fifo, &request->data[0],
284 send_size);
285 spin_unlock_irqrestore(&gb_tty->write_lock, flags);
286
287 tty_port_tty_wakeup(&gb_tty->port);
288 }
289}
290
291static int send_line_coding(struct gb_tty *tty)
292{
293 struct gb_uart_set_line_coding_request request;
294
295 memcpy(&request, &tty->line_coding,
296 sizeof(tty->line_coding));
297 return gb_operation_sync(tty->connection, GB_UART_TYPE_SET_LINE_CODING,
298 &request, sizeof(request), NULL, 0);
299}
300
301static int send_control(struct gb_tty *gb_tty, u8 control)
302{
303 struct gb_uart_set_control_line_state_request request;
304
305 request.control = control;
306 return gb_operation_sync(gb_tty->connection,
307 GB_UART_TYPE_SET_CONTROL_LINE_STATE,
308 &request, sizeof(request), NULL, 0);
309}
310
311static int send_break(struct gb_tty *gb_tty, u8 state)
312{
313 struct gb_uart_set_break_request request;
314
315 if ((state != 0) && (state != 1)) {
316 dev_err(&gb_tty->gbphy_dev->dev,
317 "invalid break state of %d\n", state);
318 return -EINVAL;
319 }
320
321 request.state = state;
322 return gb_operation_sync(gb_tty->connection, GB_UART_TYPE_SEND_BREAK,
323 &request, sizeof(request), NULL, 0);
324}
325
326static int gb_uart_wait_for_all_credits(struct gb_tty *gb_tty)
327{
328 int ret;
329
330 if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
331 return 0;
332
333 ret = wait_for_completion_timeout(&gb_tty->credits_complete,
334 msecs_to_jiffies(GB_UART_CREDIT_WAIT_TIMEOUT_MSEC));
335 if (!ret) {
336 dev_err(&gb_tty->gbphy_dev->dev,
337 "time out waiting for credits\n");
338 return -ETIMEDOUT;
339 }
340
341 return 0;
342}
343
344static int gb_uart_flush(struct gb_tty *gb_tty, u8 flags)
345{
346 struct gb_uart_serial_flush_request request;
347
348 request.flags = flags;
349 return gb_operation_sync(gb_tty->connection, GB_UART_TYPE_FLUSH_FIFOS,
350 &request, sizeof(request), NULL, 0);
351}
352
353static struct gb_tty *get_gb_by_minor(unsigned minor)
354{
355 struct gb_tty *gb_tty;
356
357 mutex_lock(&table_lock);
358 gb_tty = idr_find(&tty_minors, minor);
359 if (gb_tty) {
360 mutex_lock(&gb_tty->mutex);
361 if (gb_tty->disconnected) {
362 mutex_unlock(&gb_tty->mutex);
363 gb_tty = NULL;
364 } else {
365 tty_port_get(&gb_tty->port);
366 mutex_unlock(&gb_tty->mutex);
367 }
368 }
369 mutex_unlock(&table_lock);
370 return gb_tty;
371}
372
373static int alloc_minor(struct gb_tty *gb_tty)
374{
375 int minor;
376
377 mutex_lock(&table_lock);
378 minor = idr_alloc(&tty_minors, gb_tty, 0, GB_NUM_MINORS, GFP_KERNEL);
379 mutex_unlock(&table_lock);
380 if (minor >= 0)
381 gb_tty->minor = minor;
382 return minor;
383}
384
385static void release_minor(struct gb_tty *gb_tty)
386{
387 int minor = gb_tty->minor;
388
389 gb_tty->minor = 0; /* Maybe should use an invalid value instead */
390 mutex_lock(&table_lock);
391 idr_remove(&tty_minors, minor);
392 mutex_unlock(&table_lock);
393}
394
395static int gb_tty_install(struct tty_driver *driver, struct tty_struct *tty)
396{
397 struct gb_tty *gb_tty;
398 int retval;
399
400 gb_tty = get_gb_by_minor(tty->index);
401 if (!gb_tty)
402 return -ENODEV;
403
404 retval = tty_standard_install(driver, tty);
405 if (retval)
406 goto error;
407
408 tty->driver_data = gb_tty;
409 return 0;
410error:
411 tty_port_put(&gb_tty->port);
412 return retval;
413}
414
415static int gb_tty_open(struct tty_struct *tty, struct file *file)
416{
417 struct gb_tty *gb_tty = tty->driver_data;
418
419 return tty_port_open(&gb_tty->port, tty, file);
420}
421
422static void gb_tty_close(struct tty_struct *tty, struct file *file)
423{
424 struct gb_tty *gb_tty = tty->driver_data;
425
426 tty_port_close(&gb_tty->port, tty, file);
427}
428
429static void gb_tty_cleanup(struct tty_struct *tty)
430{
431 struct gb_tty *gb_tty = tty->driver_data;
432
433 tty_port_put(&gb_tty->port);
434}
435
436static void gb_tty_hangup(struct tty_struct *tty)
437{
438 struct gb_tty *gb_tty = tty->driver_data;
439
440 tty_port_hangup(&gb_tty->port);
441}
442
443static int gb_tty_write(struct tty_struct *tty, const unsigned char *buf,
444 int count)
445{
446 struct gb_tty *gb_tty = tty->driver_data;
447
448 count = kfifo_in_spinlocked(&gb_tty->write_fifo, buf, count,
449 &gb_tty->write_lock);
450 if (count && !gb_tty->close_pending)
451 schedule_work(&gb_tty->tx_work);
452
453 return count;
454}
455
456static int gb_tty_write_room(struct tty_struct *tty)
457{
458 struct gb_tty *gb_tty = tty->driver_data;
459 unsigned long flags;
460 int room;
461
462 spin_lock_irqsave(&gb_tty->write_lock, flags);
463 room = kfifo_avail(&gb_tty->write_fifo);
464 spin_unlock_irqrestore(&gb_tty->write_lock, flags);
465
466 room -= GB_UART_WRITE_ROOM_MARGIN;
467 if (room < 0)
468 return 0;
469
470 return room;
471}
472
473static int gb_tty_chars_in_buffer(struct tty_struct *tty)
474{
475 struct gb_tty *gb_tty = tty->driver_data;
476 unsigned long flags;
477 int chars;
478
479 spin_lock_irqsave(&gb_tty->write_lock, flags);
480 chars = kfifo_len(&gb_tty->write_fifo);
481 if (gb_tty->credits < GB_UART_FIRMWARE_CREDITS)
482 chars += GB_UART_FIRMWARE_CREDITS - gb_tty->credits;
483 spin_unlock_irqrestore(&gb_tty->write_lock, flags);
484
485 return chars;
486}
487
488static int gb_tty_break_ctl(struct tty_struct *tty, int state)
489{
490 struct gb_tty *gb_tty = tty->driver_data;
491
492 return send_break(gb_tty, state ? 1 : 0);
493}
494
495static void gb_tty_set_termios(struct tty_struct *tty,
496 struct ktermios *termios_old)
497{
498 struct gb_tty *gb_tty = tty->driver_data;
499 struct ktermios *termios = &tty->termios;
500 struct gb_tty_line_coding newline;
501 u8 newctrl = gb_tty->ctrlout;
502
503 newline.rate = cpu_to_le32(tty_get_baud_rate(tty));
504 newline.format = termios->c_cflag & CSTOPB ?
505 GB_SERIAL_2_STOP_BITS : GB_SERIAL_1_STOP_BITS;
506 newline.parity = termios->c_cflag & PARENB ?
507 (termios->c_cflag & PARODD ? 1 : 2) +
508 (termios->c_cflag & CMSPAR ? 2 : 0) : 0;
509
510 switch (termios->c_cflag & CSIZE) {
511 case CS5:
512 newline.data_bits = 5;
513 break;
514 case CS6:
515 newline.data_bits = 6;
516 break;
517 case CS7:
518 newline.data_bits = 7;
519 break;
520 case CS8:
521 default:
522 newline.data_bits = 8;
523 break;
524 }
525
526 /* FIXME: needs to clear unsupported bits in the termios */
527 gb_tty->clocal = ((termios->c_cflag & CLOCAL) != 0);
528
529 if (C_BAUD(tty) == B0) {
530 newline.rate = gb_tty->line_coding.rate;
531 newctrl &= ~(GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
532 } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
533 newctrl |= (GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
534 }
535
536 if (newctrl != gb_tty->ctrlout) {
537 gb_tty->ctrlout = newctrl;
538 send_control(gb_tty, newctrl);
539 }
540
541 if (C_CRTSCTS(tty) && C_BAUD(tty) != B0)
542 newline.flow_control |= GB_SERIAL_AUTO_RTSCTS_EN;
543 else
544 newline.flow_control &= ~GB_SERIAL_AUTO_RTSCTS_EN;
545
546 if (memcmp(&gb_tty->line_coding, &newline, sizeof(newline))) {
547 memcpy(&gb_tty->line_coding, &newline, sizeof(newline));
548 send_line_coding(gb_tty);
549 }
550}
551
552static int gb_tty_tiocmget(struct tty_struct *tty)
553{
554 struct gb_tty *gb_tty = tty->driver_data;
555
556 return (gb_tty->ctrlout & GB_UART_CTRL_DTR ? TIOCM_DTR : 0) |
557 (gb_tty->ctrlout & GB_UART_CTRL_RTS ? TIOCM_RTS : 0) |
558 (gb_tty->ctrlin & GB_UART_CTRL_DSR ? TIOCM_DSR : 0) |
559 (gb_tty->ctrlin & GB_UART_CTRL_RI ? TIOCM_RI : 0) |
560 (gb_tty->ctrlin & GB_UART_CTRL_DCD ? TIOCM_CD : 0) |
561 TIOCM_CTS;
562}
563
564static int gb_tty_tiocmset(struct tty_struct *tty, unsigned int set,
565 unsigned int clear)
566{
567 struct gb_tty *gb_tty = tty->driver_data;
568 u8 newctrl = gb_tty->ctrlout;
569
570 set = (set & TIOCM_DTR ? GB_UART_CTRL_DTR : 0) |
571 (set & TIOCM_RTS ? GB_UART_CTRL_RTS : 0);
572 clear = (clear & TIOCM_DTR ? GB_UART_CTRL_DTR : 0) |
573 (clear & TIOCM_RTS ? GB_UART_CTRL_RTS : 0);
574
575 newctrl = (newctrl & ~clear) | set;
576 if (gb_tty->ctrlout == newctrl)
577 return 0;
578
579 gb_tty->ctrlout = newctrl;
580 return send_control(gb_tty, newctrl);
581}
582
583static void gb_tty_throttle(struct tty_struct *tty)
584{
585 struct gb_tty *gb_tty = tty->driver_data;
586 unsigned char stop_char;
587 int retval;
588
589 if (I_IXOFF(tty)) {
590 stop_char = STOP_CHAR(tty);
591 retval = gb_tty_write(tty, &stop_char, 1);
592 if (retval <= 0)
593 return;
594 }
595
596 if (tty->termios.c_cflag & CRTSCTS) {
597 gb_tty->ctrlout &= ~GB_UART_CTRL_RTS;
598 retval = send_control(gb_tty, gb_tty->ctrlout);
599 }
600}
601
602static void gb_tty_unthrottle(struct tty_struct *tty)
603{
604 struct gb_tty *gb_tty = tty->driver_data;
605 unsigned char start_char;
606 int retval;
607
608 if (I_IXOFF(tty)) {
609 start_char = START_CHAR(tty);
610 retval = gb_tty_write(tty, &start_char, 1);
611 if (retval <= 0)
612 return;
613 }
614
615 if (tty->termios.c_cflag & CRTSCTS) {
616 gb_tty->ctrlout |= GB_UART_CTRL_RTS;
617 retval = send_control(gb_tty, gb_tty->ctrlout);
618 }
619}
620
621static int get_serial_info(struct gb_tty *gb_tty,
622 struct serial_struct __user *info)
623{
624 struct serial_struct tmp;
625
626 if (!info)
627 return -EINVAL;
628
629 memset(&tmp, 0, sizeof(tmp));
630 tmp.flags = ASYNC_LOW_LATENCY | ASYNC_SKIP_TEST;
631 tmp.type = PORT_16550A;
632 tmp.line = gb_tty->minor;
633 tmp.xmit_fifo_size = 16;
634 tmp.baud_base = 9600;
635 tmp.close_delay = gb_tty->port.close_delay / 10;
636 tmp.closing_wait = gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
637 ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
638
639 if (copy_to_user(info, &tmp, sizeof(tmp)))
640 return -EFAULT;
641 return 0;
642}
643
644static int set_serial_info(struct gb_tty *gb_tty,
645 struct serial_struct __user *newinfo)
646{
647 struct serial_struct new_serial;
648 unsigned int closing_wait;
649 unsigned int close_delay;
650 int retval = 0;
651
652 if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
653 return -EFAULT;
654
655 close_delay = new_serial.close_delay * 10;
656 closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
657 ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
658
659 mutex_lock(&gb_tty->port.mutex);
660 if (!capable(CAP_SYS_ADMIN)) {
661 if ((close_delay != gb_tty->port.close_delay) ||
662 (closing_wait != gb_tty->port.closing_wait))
663 retval = -EPERM;
664 else
665 retval = -EOPNOTSUPP;
666 } else {
667 gb_tty->port.close_delay = close_delay;
668 gb_tty->port.closing_wait = closing_wait;
669 }
670 mutex_unlock(&gb_tty->port.mutex);
671 return retval;
672}
673
674static int wait_serial_change(struct gb_tty *gb_tty, unsigned long arg)
675{
676 int retval = 0;
677 DECLARE_WAITQUEUE(wait, current);
678 struct async_icount old;
679 struct async_icount new;
680
681 if (!(arg & (TIOCM_DSR | TIOCM_RI | TIOCM_CD)))
682 return -EINVAL;
683
684 do {
685 spin_lock_irq(&gb_tty->read_lock);
686 old = gb_tty->oldcount;
687 new = gb_tty->iocount;
688 gb_tty->oldcount = new;
689 spin_unlock_irq(&gb_tty->read_lock);
690
691 if ((arg & TIOCM_DSR) && (old.dsr != new.dsr))
692 break;
693 if ((arg & TIOCM_CD) && (old.dcd != new.dcd))
694 break;
695 if ((arg & TIOCM_RI) && (old.rng != new.rng))
696 break;
697
698 add_wait_queue(&gb_tty->wioctl, &wait);
699 set_current_state(TASK_INTERRUPTIBLE);
700 schedule();
701 remove_wait_queue(&gb_tty->wioctl, &wait);
702 if (gb_tty->disconnected) {
703 if (arg & TIOCM_CD)
704 break;
705 retval = -ENODEV;
706 } else if (signal_pending(current)) {
707 retval = -ERESTARTSYS;
708 }
709 } while (!retval);
710
711 return retval;
712}
713
714static int get_serial_usage(struct gb_tty *gb_tty,
715 struct serial_icounter_struct __user *count)
716{
717 struct serial_icounter_struct icount;
718 int retval = 0;
719
720 memset(&icount, 0, sizeof(icount));
721 icount.dsr = gb_tty->iocount.dsr;
722 icount.rng = gb_tty->iocount.rng;
723 icount.dcd = gb_tty->iocount.dcd;
724 icount.frame = gb_tty->iocount.frame;
725 icount.overrun = gb_tty->iocount.overrun;
726 icount.parity = gb_tty->iocount.parity;
727 icount.brk = gb_tty->iocount.brk;
728
729 if (copy_to_user(count, &icount, sizeof(icount)) > 0)
730 retval = -EFAULT;
731
732 return retval;
733}
734
735static int gb_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
736 unsigned long arg)
737{
738 struct gb_tty *gb_tty = tty->driver_data;
739
740 switch (cmd) {
741 case TIOCGSERIAL:
742 return get_serial_info(gb_tty,
743 (struct serial_struct __user *)arg);
744 case TIOCSSERIAL:
745 return set_serial_info(gb_tty,
746 (struct serial_struct __user *)arg);
747 case TIOCMIWAIT:
748 return wait_serial_change(gb_tty, arg);
749 case TIOCGICOUNT:
750 return get_serial_usage(gb_tty,
751 (struct serial_icounter_struct __user *)arg);
752 }
753
754 return -ENOIOCTLCMD;
755}
756
757static void gb_tty_dtr_rts(struct tty_port *port, int on)
758{
759 struct gb_tty *gb_tty;
760 u8 newctrl;
761
762 gb_tty = container_of(port, struct gb_tty, port);
763 newctrl = gb_tty->ctrlout;
764
765 if (on)
766 newctrl |= (GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
767 else
768 newctrl &= ~(GB_UART_CTRL_DTR | GB_UART_CTRL_RTS);
769
770 gb_tty->ctrlout = newctrl;
771 send_control(gb_tty, newctrl);
772}
773
774static int gb_tty_port_activate(struct tty_port *port,
775 struct tty_struct *tty)
776{
777 struct gb_tty *gb_tty;
778
779 gb_tty = container_of(port, struct gb_tty, port);
780
781 return gbphy_runtime_get_sync(gb_tty->gbphy_dev);
782}
783
784static void gb_tty_port_shutdown(struct tty_port *port)
785{
786 struct gb_tty *gb_tty;
787 unsigned long flags;
788 int ret;
789
790 gb_tty = container_of(port, struct gb_tty, port);
791
792 gb_tty->close_pending = true;
793
794 cancel_work_sync(&gb_tty->tx_work);
795
796 spin_lock_irqsave(&gb_tty->write_lock, flags);
797 kfifo_reset_out(&gb_tty->write_fifo);
798 spin_unlock_irqrestore(&gb_tty->write_lock, flags);
799
800 if (gb_tty->credits == GB_UART_FIRMWARE_CREDITS)
801 goto out;
802
803 ret = gb_uart_flush(gb_tty, GB_SERIAL_FLAG_FLUSH_TRANSMITTER);
804 if (ret) {
805 dev_err(&gb_tty->gbphy_dev->dev,
806 "error flushing transmitter: %d\n", ret);
807 }
808
809 gb_uart_wait_for_all_credits(gb_tty);
810
811out:
812 gb_tty->close_pending = false;
813
814 gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
815}
816
817static const struct tty_operations gb_ops = {
818 .install = gb_tty_install,
819 .open = gb_tty_open,
820 .close = gb_tty_close,
821 .cleanup = gb_tty_cleanup,
822 .hangup = gb_tty_hangup,
823 .write = gb_tty_write,
824 .write_room = gb_tty_write_room,
825 .ioctl = gb_tty_ioctl,
826 .throttle = gb_tty_throttle,
827 .unthrottle = gb_tty_unthrottle,
828 .chars_in_buffer = gb_tty_chars_in_buffer,
829 .break_ctl = gb_tty_break_ctl,
830 .set_termios = gb_tty_set_termios,
831 .tiocmget = gb_tty_tiocmget,
832 .tiocmset = gb_tty_tiocmset,
833};
834
835static struct tty_port_operations gb_port_ops = {
836 .dtr_rts = gb_tty_dtr_rts,
837 .activate = gb_tty_port_activate,
838 .shutdown = gb_tty_port_shutdown,
839};
840
841static int gb_uart_probe(struct gbphy_device *gbphy_dev,
842 const struct gbphy_device_id *id)
843{
844 struct gb_connection *connection;
845 size_t max_payload;
846 struct gb_tty *gb_tty;
847 struct device *tty_dev;
848 int retval;
849 int minor;
850
851 gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
852 if (!gb_tty)
853 return -ENOMEM;
854
855 connection = gb_connection_create(gbphy_dev->bundle,
856 le16_to_cpu(gbphy_dev->cport_desc->id),
857 gb_uart_request_handler);
858 if (IS_ERR(connection)) {
859 retval = PTR_ERR(connection);
860 goto exit_tty_free;
861 }
862
863 max_payload = gb_operation_get_payload_size_max(connection);
864 if (max_payload < sizeof(struct gb_uart_send_data_request)) {
865 retval = -EINVAL;
866 goto exit_connection_destroy;
867 }
868
869 gb_tty->buffer_payload_max = max_payload -
870 sizeof(struct gb_uart_send_data_request);
871
872 gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
873 if (!gb_tty->buffer) {
874 retval = -ENOMEM;
875 goto exit_connection_destroy;
876 }
877
878 INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
879
880 retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
881 GFP_KERNEL);
882 if (retval)
883 goto exit_buf_free;
884
885 gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
886 init_completion(&gb_tty->credits_complete);
887
888 minor = alloc_minor(gb_tty);
889 if (minor < 0) {
890 if (minor == -ENOSPC) {
891 dev_err(&connection->bundle->dev,
892 "no more free minor numbers\n");
893 retval = -ENODEV;
894 } else {
895 retval = minor;
896 }
897 goto exit_kfifo_free;
898 }
899
900 gb_tty->minor = minor;
901 spin_lock_init(&gb_tty->write_lock);
902 spin_lock_init(&gb_tty->read_lock);
903 init_waitqueue_head(&gb_tty->wioctl);
904 mutex_init(&gb_tty->mutex);
905
906 tty_port_init(&gb_tty->port);
907 gb_tty->port.ops = &gb_port_ops;
908
909 gb_tty->connection = connection;
910 gb_tty->gbphy_dev = gbphy_dev;
911 gb_connection_set_data(connection, gb_tty);
912 gb_gbphy_set_data(gbphy_dev, gb_tty);
913
914 retval = gb_connection_enable_tx(connection);
915 if (retval)
916 goto exit_release_minor;
917
918 send_control(gb_tty, gb_tty->ctrlout);
919
920 /* initialize the uart to be 9600n81 */
921 gb_tty->line_coding.rate = cpu_to_le32(9600);
922 gb_tty->line_coding.format = GB_SERIAL_1_STOP_BITS;
923 gb_tty->line_coding.parity = GB_SERIAL_NO_PARITY;
924 gb_tty->line_coding.data_bits = 8;
925 send_line_coding(gb_tty);
926
927 retval = gb_connection_enable(connection);
928 if (retval)
929 goto exit_connection_disable;
930
931 tty_dev = tty_port_register_device(&gb_tty->port, gb_tty_driver, minor,
932 &gbphy_dev->dev);
933 if (IS_ERR(tty_dev)) {
934 retval = PTR_ERR(tty_dev);
935 goto exit_connection_disable;
936 }
937
938 gbphy_runtime_put_autosuspend(gbphy_dev);
939 return 0;
940
941exit_connection_disable:
942 gb_connection_disable(connection);
943exit_release_minor:
944 release_minor(gb_tty);
945exit_kfifo_free:
946 kfifo_free(&gb_tty->write_fifo);
947exit_buf_free:
948 kfree(gb_tty->buffer);
949exit_connection_destroy:
950 gb_connection_destroy(connection);
951exit_tty_free:
952 kfree(gb_tty);
953
954 return retval;
955}
956
957static void gb_uart_remove(struct gbphy_device *gbphy_dev)
958{
959 struct gb_tty *gb_tty = gb_gbphy_get_data(gbphy_dev);
960 struct gb_connection *connection = gb_tty->connection;
961 struct tty_struct *tty;
962 int ret;
963
964 ret = gbphy_runtime_get_sync(gbphy_dev);
965 if (ret)
966 gbphy_runtime_get_noresume(gbphy_dev);
967
968 mutex_lock(&gb_tty->mutex);
969 gb_tty->disconnected = true;
970
971 wake_up_all(&gb_tty->wioctl);
972 mutex_unlock(&gb_tty->mutex);
973
974 tty = tty_port_tty_get(&gb_tty->port);
975 if (tty) {
976 tty_vhangup(tty);
977 tty_kref_put(tty);
978 }
979
980 gb_connection_disable_rx(connection);
981 tty_unregister_device(gb_tty_driver, gb_tty->minor);
982
983 /* FIXME - free transmit / receive buffers */
984
985 gb_connection_disable(connection);
986 tty_port_destroy(&gb_tty->port);
987 gb_connection_destroy(connection);
988 release_minor(gb_tty);
989 kfifo_free(&gb_tty->write_fifo);
990 kfree(gb_tty->buffer);
991 kfree(gb_tty);
992}
993
994static int gb_tty_init(void)
995{
996 int retval = 0;
997
998 gb_tty_driver = tty_alloc_driver(GB_NUM_MINORS, 0);
999 if (IS_ERR(gb_tty_driver)) {
1000 pr_err("Can not allocate tty driver\n");
1001 retval = -ENOMEM;
1002 goto fail_unregister_dev;
1003 }
1004
1005 gb_tty_driver->driver_name = "gb";
1006 gb_tty_driver->name = GB_NAME;
1007 gb_tty_driver->major = 0;
1008 gb_tty_driver->minor_start = 0;
1009 gb_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
1010 gb_tty_driver->subtype = SERIAL_TYPE_NORMAL;
1011 gb_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
1012 gb_tty_driver->init_termios = tty_std_termios;
1013 gb_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
1014 tty_set_operations(gb_tty_driver, &gb_ops);
1015
1016 retval = tty_register_driver(gb_tty_driver);
1017 if (retval) {
1018 pr_err("Can not register tty driver: %d\n", retval);
1019 goto fail_put_gb_tty;
1020 }
1021
1022 return 0;
1023
1024fail_put_gb_tty:
1025 put_tty_driver(gb_tty_driver);
1026fail_unregister_dev:
1027 return retval;
1028}
1029
1030static void gb_tty_exit(void)
1031{
1032 tty_unregister_driver(gb_tty_driver);
1033 put_tty_driver(gb_tty_driver);
1034 idr_destroy(&tty_minors);
1035}
1036
1037static const struct gbphy_device_id gb_uart_id_table[] = {
1038 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_UART) },
1039 { },
1040};
1041MODULE_DEVICE_TABLE(gbphy, gb_uart_id_table);
1042
1043static struct gbphy_driver uart_driver = {
1044 .name = "uart",
1045 .probe = gb_uart_probe,
1046 .remove = gb_uart_remove,
1047 .id_table = gb_uart_id_table,
1048};
1049
1050static int gb_uart_driver_init(void)
1051{
1052 int ret;
1053
1054 ret = gb_tty_init();
1055 if (ret)
1056 return ret;
1057
1058 ret = gb_gbphy_register(&uart_driver);
1059 if (ret) {
1060 gb_tty_exit();
1061 return ret;
1062 }
1063
1064 return 0;
1065}
1066module_init(gb_uart_driver_init);
1067
1068static void gb_uart_driver_exit(void)
1069{
1070 gb_gbphy_deregister(&uart_driver);
1071 gb_tty_exit();
1072}
1073
1074module_exit(gb_uart_driver_exit);
1075MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/usb.c b/drivers/staging/greybus/usb.c
new file mode 100644
index 000000000000..ccadda084b76
--- /dev/null
+++ b/drivers/staging/greybus/usb.c
@@ -0,0 +1,247 @@
1/*
2 * USB host driver for the Greybus "generic" USB module.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 *
9 */
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/usb.h>
14#include <linux/usb/hcd.h>
15
16#include "greybus.h"
17#include "gbphy.h"
18
19/* Greybus USB request types */
20#define GB_USB_TYPE_HCD_START 0x02
21#define GB_USB_TYPE_HCD_STOP 0x03
22#define GB_USB_TYPE_HUB_CONTROL 0x04
23
24struct gb_usb_hub_control_request {
25 __le16 typeReq;
26 __le16 wValue;
27 __le16 wIndex;
28 __le16 wLength;
29};
30
31struct gb_usb_hub_control_response {
32 u8 buf[0];
33};
34
35struct gb_usb_device {
36 struct gb_connection *connection;
37 struct gbphy_device *gbphy_dev;
38};
39
40static inline struct gb_usb_device *to_gb_usb_device(struct usb_hcd *hcd)
41{
42 return (struct gb_usb_device *)hcd->hcd_priv;
43}
44
45static inline struct usb_hcd *gb_usb_device_to_hcd(struct gb_usb_device *dev)
46{
47 return container_of((void *)dev, struct usb_hcd, hcd_priv);
48}
49
50static void hcd_stop(struct usb_hcd *hcd)
51{
52 struct gb_usb_device *dev = to_gb_usb_device(hcd);
53 int ret;
54
55 ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_STOP,
56 NULL, 0, NULL, 0);
57 if (ret)
58 dev_err(&dev->gbphy_dev->dev, "HCD stop failed '%d'\n", ret);
59}
60
61static int hcd_start(struct usb_hcd *hcd)
62{
63 struct usb_bus *bus = hcd_to_bus(hcd);
64 struct gb_usb_device *dev = to_gb_usb_device(hcd);
65 int ret;
66
67 ret = gb_operation_sync(dev->connection, GB_USB_TYPE_HCD_START,
68 NULL, 0, NULL, 0);
69 if (ret) {
70 dev_err(&dev->gbphy_dev->dev, "HCD start failed '%d'\n", ret);
71 return ret;
72 }
73
74 hcd->state = HC_STATE_RUNNING;
75 if (bus->root_hub)
76 usb_hcd_resume_root_hub(hcd);
77 return 0;
78}
79
80static int urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
81{
82 return -ENXIO;
83}
84
85static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
86{
87 return -ENXIO;
88}
89
90static int get_frame_number(struct usb_hcd *hcd)
91{
92 return 0;
93}
94
95static int hub_status_data(struct usb_hcd *hcd, char *buf)
96{
97 return 0;
98}
99
100static int hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
101 char *buf, u16 wLength)
102{
103 struct gb_usb_device *dev = to_gb_usb_device(hcd);
104 struct gb_operation *operation;
105 struct gb_usb_hub_control_request *request;
106 struct gb_usb_hub_control_response *response;
107 size_t response_size;
108 int ret;
109
110 /* FIXME: handle unspecified lengths */
111 response_size = sizeof(*response) + wLength;
112
113 operation = gb_operation_create(dev->connection,
114 GB_USB_TYPE_HUB_CONTROL,
115 sizeof(*request),
116 response_size,
117 GFP_KERNEL);
118 if (!operation)
119 return -ENOMEM;
120
121 request = operation->request->payload;
122 request->typeReq = cpu_to_le16(typeReq);
123 request->wValue = cpu_to_le16(wValue);
124 request->wIndex = cpu_to_le16(wIndex);
125 request->wLength = cpu_to_le16(wLength);
126
127 ret = gb_operation_request_send_sync(operation);
128 if (ret)
129 goto out;
130
131 if (wLength) {
132 /* Greybus core has verified response size */
133 response = operation->response->payload;
134 memcpy(buf, response->buf, wLength);
135 }
136out:
137 gb_operation_put(operation);
138
139 return ret;
140}
141
142static struct hc_driver usb_gb_hc_driver = {
143 .description = "greybus-hcd",
144 .product_desc = "Greybus USB Host Controller",
145 .hcd_priv_size = sizeof(struct gb_usb_device),
146
147 .flags = HCD_USB2,
148
149 .start = hcd_start,
150 .stop = hcd_stop,
151
152 .urb_enqueue = urb_enqueue,
153 .urb_dequeue = urb_dequeue,
154
155 .get_frame_number = get_frame_number,
156 .hub_status_data = hub_status_data,
157 .hub_control = hub_control,
158};
159
160static int gb_usb_probe(struct gbphy_device *gbphy_dev,
161 const struct gbphy_device_id *id)
162{
163 struct gb_connection *connection;
164 struct device *dev = &gbphy_dev->dev;
165 struct gb_usb_device *gb_usb_dev;
166 struct usb_hcd *hcd;
167 int retval;
168
169 hcd = usb_create_hcd(&usb_gb_hc_driver, dev, dev_name(dev));
170 if (!hcd)
171 return -ENOMEM;
172
173 connection = gb_connection_create(gbphy_dev->bundle,
174 le16_to_cpu(gbphy_dev->cport_desc->id),
175 NULL);
176 if (IS_ERR(connection)) {
177 retval = PTR_ERR(connection);
178 goto exit_usb_put;
179 }
180
181 gb_usb_dev = to_gb_usb_device(hcd);
182 gb_usb_dev->connection = connection;
183 gb_connection_set_data(connection, gb_usb_dev);
184 gb_usb_dev->gbphy_dev = gbphy_dev;
185 gb_gbphy_set_data(gbphy_dev, gb_usb_dev);
186
187 hcd->has_tt = 1;
188
189 retval = gb_connection_enable(connection);
190 if (retval)
191 goto exit_connection_destroy;
192
193 /*
194 * FIXME: The USB bridged-PHY protocol driver depends on changes to
195 * USB core which are not yet upstream.
196 *
197 * Disable for now.
198 */
199 if (1) {
200 dev_warn(dev, "USB protocol disabled\n");
201 retval = -EPROTONOSUPPORT;
202 goto exit_connection_disable;
203 }
204
205 retval = usb_add_hcd(hcd, 0, 0);
206 if (retval)
207 goto exit_connection_disable;
208
209 return 0;
210
211exit_connection_disable:
212 gb_connection_disable(connection);
213exit_connection_destroy:
214 gb_connection_destroy(connection);
215exit_usb_put:
216 usb_put_hcd(hcd);
217
218 return retval;
219}
220
221static void gb_usb_remove(struct gbphy_device *gbphy_dev)
222{
223 struct gb_usb_device *gb_usb_dev = gb_gbphy_get_data(gbphy_dev);
224 struct gb_connection *connection = gb_usb_dev->connection;
225 struct usb_hcd *hcd = gb_usb_device_to_hcd(gb_usb_dev);
226
227 usb_remove_hcd(hcd);
228 gb_connection_disable(connection);
229 gb_connection_destroy(connection);
230 usb_put_hcd(hcd);
231}
232
233static const struct gbphy_device_id gb_usb_id_table[] = {
234 { GBPHY_PROTOCOL(GREYBUS_PROTOCOL_USB) },
235 { },
236};
237MODULE_DEVICE_TABLE(gbphy, gb_usb_id_table);
238
239static struct gbphy_driver usb_driver = {
240 .name = "usb",
241 .probe = gb_usb_probe,
242 .remove = gb_usb_remove,
243 .id_table = gb_usb_id_table,
244};
245
246module_gbphy_driver(usb_driver);
247MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
new file mode 100644
index 000000000000..4ba0e168930f
--- /dev/null
+++ b/drivers/staging/greybus/vibrator.c
@@ -0,0 +1,249 @@
1/*
2 * Greybus Vibrator protocol driver.
3 *
4 * Copyright 2014 Google Inc.
5 * Copyright 2014 Linaro Ltd.
6 *
7 * Released under the GPLv2 only.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/slab.h>
13#include <linux/device.h>
14#include <linux/kdev_t.h>
15#include <linux/idr.h>
16#include <linux/pm_runtime.h>
17
18#include "greybus.h"
19
20struct gb_vibrator_device {
21 struct gb_connection *connection;
22 struct device *dev;
23 int minor; /* vibrator minor number */
24 struct delayed_work delayed_work;
25};
26
27/* Greybus Vibrator operation types */
28#define GB_VIBRATOR_TYPE_ON 0x02
29#define GB_VIBRATOR_TYPE_OFF 0x03
30
31static int turn_off(struct gb_vibrator_device *vib)
32{
33 struct gb_bundle *bundle = vib->connection->bundle;
34 int ret;
35
36 ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_OFF,
37 NULL, 0, NULL, 0);
38
39 gb_pm_runtime_put_autosuspend(bundle);
40
41 return ret;
42}
43
44static int turn_on(struct gb_vibrator_device *vib, u16 timeout_ms)
45{
46 struct gb_bundle *bundle = vib->connection->bundle;
47 int ret;
48
49 ret = gb_pm_runtime_get_sync(bundle);
50 if (ret)
51 return ret;
52
53 /* Vibrator was switched ON earlier */
54 if (cancel_delayed_work_sync(&vib->delayed_work))
55 turn_off(vib);
56
57 ret = gb_operation_sync(vib->connection, GB_VIBRATOR_TYPE_ON,
58 NULL, 0, NULL, 0);
59 if (ret) {
60 gb_pm_runtime_put_autosuspend(bundle);
61 return ret;
62 }
63
64 schedule_delayed_work(&vib->delayed_work, msecs_to_jiffies(timeout_ms));
65
66 return 0;
67}
68
69static void gb_vibrator_worker(struct work_struct *work)
70{
71 struct delayed_work *delayed_work = to_delayed_work(work);
72 struct gb_vibrator_device *vib =
73 container_of(delayed_work, struct gb_vibrator_device, delayed_work);
74
75 turn_off(vib);
76}
77
78static ssize_t timeout_store(struct device *dev, struct device_attribute *attr,
79 const char *buf, size_t count)
80{
81 struct gb_vibrator_device *vib = dev_get_drvdata(dev);
82 unsigned long val;
83 int retval;
84
85 retval = kstrtoul(buf, 10, &val);
86 if (retval < 0) {
87 dev_err(dev, "could not parse timeout value %d\n", retval);
88 return retval;
89 }
90
91 if (val)
92 retval = turn_on(vib, (u16)val);
93 else
94 retval = turn_off(vib);
95 if (retval)
96 return retval;
97
98 return count;
99}
100static DEVICE_ATTR_WO(timeout);
101
102static struct attribute *vibrator_attrs[] = {
103 &dev_attr_timeout.attr,
104 NULL,
105};
106ATTRIBUTE_GROUPS(vibrator);
107
108static struct class vibrator_class = {
109 .name = "vibrator",
110 .owner = THIS_MODULE,
111 .dev_groups = vibrator_groups,
112};
113
114static DEFINE_IDA(minors);
115
116static int gb_vibrator_probe(struct gb_bundle *bundle,
117 const struct greybus_bundle_id *id)
118{
119 struct greybus_descriptor_cport *cport_desc;
120 struct gb_connection *connection;
121 struct gb_vibrator_device *vib;
122 struct device *dev;
123 int retval;
124
125 if (bundle->num_cports != 1)
126 return -ENODEV;
127
128 cport_desc = &bundle->cport_desc[0];
129 if (cport_desc->protocol_id != GREYBUS_PROTOCOL_VIBRATOR)
130 return -ENODEV;
131
132 vib = kzalloc(sizeof(*vib), GFP_KERNEL);
133 if (!vib)
134 return -ENOMEM;
135
136 connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
137 NULL);
138 if (IS_ERR(connection)) {
139 retval = PTR_ERR(connection);
140 goto err_free_vib;
141 }
142 gb_connection_set_data(connection, vib);
143
144 vib->connection = connection;
145
146 greybus_set_drvdata(bundle, vib);
147
148 retval = gb_connection_enable(connection);
149 if (retval)
150 goto err_connection_destroy;
151
152 /*
153 * For now we create a device in sysfs for the vibrator, but odds are
154 * there is a "real" device somewhere in the kernel for this, but I
155 * can't find it at the moment...
156 */
157 vib->minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
158 if (vib->minor < 0) {
159 retval = vib->minor;
160 goto err_connection_disable;
161 }
162 dev = device_create(&vibrator_class, &bundle->dev,
163 MKDEV(0, 0), vib, "vibrator%d", vib->minor);
164 if (IS_ERR(dev)) {
165 retval = -EINVAL;
166 goto err_ida_remove;
167 }
168 vib->dev = dev;
169
170 INIT_DELAYED_WORK(&vib->delayed_work, gb_vibrator_worker);
171
172 gb_pm_runtime_put_autosuspend(bundle);
173
174 return 0;
175
176err_ida_remove:
177 ida_simple_remove(&minors, vib->minor);
178err_connection_disable:
179 gb_connection_disable(connection);
180err_connection_destroy:
181 gb_connection_destroy(connection);
182err_free_vib:
183 kfree(vib);
184
185 return retval;
186}
187
188static void gb_vibrator_disconnect(struct gb_bundle *bundle)
189{
190 struct gb_vibrator_device *vib = greybus_get_drvdata(bundle);
191 int ret;
192
193 ret = gb_pm_runtime_get_sync(bundle);
194 if (ret)
195 gb_pm_runtime_get_noresume(bundle);
196
197 if (cancel_delayed_work_sync(&vib->delayed_work))
198 turn_off(vib);
199
200 device_unregister(vib->dev);
201 ida_simple_remove(&minors, vib->minor);
202 gb_connection_disable(vib->connection);
203 gb_connection_destroy(vib->connection);
204 kfree(vib);
205}
206
207static const struct greybus_bundle_id gb_vibrator_id_table[] = {
208 { GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_VIBRATOR) },
209 { }
210};
211MODULE_DEVICE_TABLE(greybus, gb_vibrator_id_table);
212
213static struct greybus_driver gb_vibrator_driver = {
214 .name = "vibrator",
215 .probe = gb_vibrator_probe,
216 .disconnect = gb_vibrator_disconnect,
217 .id_table = gb_vibrator_id_table,
218};
219
220static __init int gb_vibrator_init(void)
221{
222 int retval;
223
224 retval = class_register(&vibrator_class);
225 if (retval)
226 return retval;
227
228 retval = greybus_register(&gb_vibrator_driver);
229 if (retval)
230 goto err_class_unregister;
231
232 return 0;
233
234err_class_unregister:
235 class_unregister(&vibrator_class);
236
237 return retval;
238}
239module_init(gb_vibrator_init);
240
241static __exit void gb_vibrator_exit(void)
242{
243 greybus_deregister(&gb_vibrator_driver);
244 class_unregister(&vibrator_class);
245 ida_destroy(&minors);
246}
247module_exit(gb_vibrator_exit);
248
249MODULE_LICENSE("GPL v2");