summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt48
-rw-r--r--drivers/thunderbolt/Kconfig12
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/domain.c195
-rw-r--r--drivers/thunderbolt/icm.c1058
-rw-r--r--drivers/thunderbolt/nhi.c33
-rw-r--r--drivers/thunderbolt/nhi_regs.h7
-rw-r--r--drivers/thunderbolt/switch.c222
-rw-r--r--drivers/thunderbolt/tb.c7
-rw-r--r--drivers/thunderbolt/tb.h79
-rw-r--r--drivers/thunderbolt/tb_msgs.h152
12 files changed, 1805 insertions, 12 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index 29a516f53d2c..05b7f9a6431f 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -1,3 +1,51 @@
1What: /sys/bus/thunderbolt/devices/.../domainX/security
2Date: Sep 2017
3KernelVersion: 4.13
4Contact: thunderbolt-software@lists.01.org
5Description: This attribute holds current Thunderbolt security level
6 set by the system BIOS. Possible values are:
7
8 none: All devices are automatically authorized
9 user: Devices are only authorized based on writing
10 appropriate value to the authorized attribute
11 secure: Require devices that support secure connect at
12 minimum. User needs to authorize each device.
13 dponly: Automatically tunnel Display port (and USB). No
14 PCIe tunnels are created.
15
16What: /sys/bus/thunderbolt/devices/.../authorized
17Date: Sep 2017
18KernelVersion: 4.13
19Contact: thunderbolt-software@lists.01.org
20Description: This attribute is used to authorize Thunderbolt devices
21 after they have been connected. If the device is not
22 authorized, no devices such as PCIe and Display port are
23 available to the system.
24
25 Contents of this attribute will be 0 when the device is not
26 yet authorized.
27
28 Possible values are supported:
29 1: The device will be authorized and connected
30
31 When key attribute contains 32 byte hex string the possible
32 values are:
33 1: The 32 byte hex string is added to the device NVM and
34 the device is authorized.
35 2: Send a challenge based on the 32 byte hex string. If the
36 challenge response from device is valid, the device is
37 authorized. In case of failure errno will be ENOKEY if
38 the device did not contain a key at all, and
39 EKEYREJECTED if the challenge response did not match.
40
41What: /sys/bus/thunderbolt/devices/.../key
42Date: Sep 2017
43KernelVersion: 4.13
44Contact: thunderbolt-software@lists.01.org
45Description: When a devices supports Thunderbolt secure connect it will
46 have this attribute. Writing 32 byte hex string changes
47 authorization to use the secure connection method instead.
48
1What: /sys/bus/thunderbolt/devices/.../device 49What: /sys/bus/thunderbolt/devices/.../device
2Date: Sep 2017 50Date: Sep 2017
3KernelVersion: 4.13 51KernelVersion: 4.13
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index d35db16aa43f..a9cc724985ad 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -1,15 +1,15 @@
1menuconfig THUNDERBOLT 1menuconfig THUNDERBOLT
2 tristate "Thunderbolt support for Apple devices" 2 tristate "Thunderbolt support"
3 depends on PCI 3 depends on PCI
4 depends on X86 || COMPILE_TEST 4 depends on X86 || COMPILE_TEST
5 select APPLE_PROPERTIES if EFI_STUB && X86 5 select APPLE_PROPERTIES if EFI_STUB && X86
6 select CRC32 6 select CRC32
7 select CRYPTO
8 select CRYPTO_HASH
7 help 9 help
8 Cactus Ridge Thunderbolt Controller driver 10 Thunderbolt Controller driver. This driver is required if you
9 This driver is required if you want to hotplug Thunderbolt devices on 11 want to hotplug Thunderbolt devices on Apple hardware or on PCs
10 Apple hardware. 12 with Intel Falcon Ridge or newer.
11
12 Device chaining is currently not supported.
13 13
14 To compile this driver a module, choose M here. The module will be 14 To compile this driver a module, choose M here. The module will be
15 called thunderbolt. 15 called thunderbolt.
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 9828e862dd35..4900febc6c8a 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o 1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o 2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
3thunderbolt-objs += domain.o dma_port.o 3thunderbolt-objs += domain.o dma_port.o icm.o
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 27c30ff79a84..69c0232a22f8 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -463,6 +463,8 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
463 "RX: checksum mismatch, dropping packet\n"); 463 "RX: checksum mismatch, dropping packet\n");
464 goto rx; 464 goto rx;
465 } 465 }
466 /* Fall through */
467 case TB_CFG_PKG_ICM_EVENT:
466 tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size); 468 tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size);
467 goto rx; 469 goto rx;
468 470
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 54bc15f9bf6b..f71b63e90016 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -13,11 +13,43 @@
13#include <linux/idr.h> 13#include <linux/idr.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/random.h>
17#include <crypto/hash.h>
16 18
17#include "tb.h" 19#include "tb.h"
18 20
19static DEFINE_IDA(tb_domain_ida); 21static DEFINE_IDA(tb_domain_ida);
20 22
23static const char * const tb_security_names[] = {
24 [TB_SECURITY_NONE] = "none",
25 [TB_SECURITY_USER] = "user",
26 [TB_SECURITY_SECURE] = "secure",
27 [TB_SECURITY_DPONLY] = "dponly",
28};
29
30static ssize_t security_show(struct device *dev, struct device_attribute *attr,
31 char *buf)
32{
33 struct tb *tb = container_of(dev, struct tb, dev);
34
35 return sprintf(buf, "%s\n", tb_security_names[tb->security_level]);
36}
37static DEVICE_ATTR_RO(security);
38
39static struct attribute *domain_attrs[] = {
40 &dev_attr_security.attr,
41 NULL,
42};
43
44static struct attribute_group domain_attr_group = {
45 .attrs = domain_attrs,
46};
47
48static const struct attribute_group *domain_attr_groups[] = {
49 &domain_attr_group,
50 NULL,
51};
52
21struct bus_type tb_bus_type = { 53struct bus_type tb_bus_type = {
22 .name = "thunderbolt", 54 .name = "thunderbolt",
23}; 55};
@@ -82,6 +114,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
82 tb->dev.parent = &nhi->pdev->dev; 114 tb->dev.parent = &nhi->pdev->dev;
83 tb->dev.bus = &tb_bus_type; 115 tb->dev.bus = &tb_bus_type;
84 tb->dev.type = &tb_domain_type; 116 tb->dev.type = &tb_domain_type;
117 tb->dev.groups = domain_attr_groups;
85 dev_set_name(&tb->dev, "domain%d", tb->index); 118 dev_set_name(&tb->dev, "domain%d", tb->index);
86 device_initialize(&tb->dev); 119 device_initialize(&tb->dev);
87 120
@@ -140,6 +173,12 @@ int tb_domain_add(struct tb *tb)
140 */ 173 */
141 tb_ctl_start(tb->ctl); 174 tb_ctl_start(tb->ctl);
142 175
176 if (tb->cm_ops->driver_ready) {
177 ret = tb->cm_ops->driver_ready(tb);
178 if (ret)
179 goto err_ctl_stop;
180 }
181
143 ret = device_add(&tb->dev); 182 ret = device_add(&tb->dev);
144 if (ret) 183 if (ret)
145 goto err_ctl_stop; 184 goto err_ctl_stop;
@@ -231,6 +270,162 @@ int tb_domain_resume_noirq(struct tb *tb)
231 return ret; 270 return ret;
232} 271}
233 272
273int tb_domain_suspend(struct tb *tb)
274{
275 int ret;
276
277 mutex_lock(&tb->lock);
278 if (tb->cm_ops->suspend) {
279 ret = tb->cm_ops->suspend(tb);
280 if (ret) {
281 mutex_unlock(&tb->lock);
282 return ret;
283 }
284 }
285 mutex_unlock(&tb->lock);
286 return 0;
287}
288
289void tb_domain_complete(struct tb *tb)
290{
291 mutex_lock(&tb->lock);
292 if (tb->cm_ops->complete)
293 tb->cm_ops->complete(tb);
294 mutex_unlock(&tb->lock);
295}
296
297/**
298 * tb_domain_approve_switch() - Approve switch
299 * @tb: Domain the switch belongs to
300 * @sw: Switch to approve
301 *
302 * This will approve switch by connection manager specific means. In
303 * case of success the connection manager will create tunnels for all
304 * supported protocols.
305 */
306int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
307{
308 struct tb_switch *parent_sw;
309
310 if (!tb->cm_ops->approve_switch)
311 return -EPERM;
312
313 /* The parent switch must be authorized before this one */
314 parent_sw = tb_to_switch(sw->dev.parent);
315 if (!parent_sw || !parent_sw->authorized)
316 return -EINVAL;
317
318 return tb->cm_ops->approve_switch(tb, sw);
319}
320
321/**
322 * tb_domain_approve_switch_key() - Approve switch and add key
323 * @tb: Domain the switch belongs to
324 * @sw: Switch to approve
325 *
326 * For switches that support secure connect, this function first adds
327 * key to the switch NVM using connection manager specific means. If
328 * adding the key is successful, the switch is approved and connected.
329 *
330 * Return: %0 on success and negative errno in case of failure.
331 */
332int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
333{
334 struct tb_switch *parent_sw;
335 int ret;
336
337 if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
338 return -EPERM;
339
340 /* The parent switch must be authorized before this one */
341 parent_sw = tb_to_switch(sw->dev.parent);
342 if (!parent_sw || !parent_sw->authorized)
343 return -EINVAL;
344
345 ret = tb->cm_ops->add_switch_key(tb, sw);
346 if (ret)
347 return ret;
348
349 return tb->cm_ops->approve_switch(tb, sw);
350}
351
352/**
353 * tb_domain_challenge_switch_key() - Challenge and approve switch
354 * @tb: Domain the switch belongs to
355 * @sw: Switch to approve
356 *
357 * For switches that support secure connect, this function generates
358 * random challenge and sends it to the switch. The switch responds to
359 * this and if the response matches our random challenge, the switch is
360 * approved and connected.
361 *
362 * Return: %0 on success and negative errno in case of failure.
363 */
364int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
365{
366 u8 challenge[TB_SWITCH_KEY_SIZE];
367 u8 response[TB_SWITCH_KEY_SIZE];
368 u8 hmac[TB_SWITCH_KEY_SIZE];
369 struct tb_switch *parent_sw;
370 struct crypto_shash *tfm;
371 struct shash_desc *shash;
372 int ret;
373
374 if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
375 return -EPERM;
376
377 /* The parent switch must be authorized before this one */
378 parent_sw = tb_to_switch(sw->dev.parent);
379 if (!parent_sw || !parent_sw->authorized)
380 return -EINVAL;
381
382 get_random_bytes(challenge, sizeof(challenge));
383 ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
384 if (ret)
385 return ret;
386
387 tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
388 if (IS_ERR(tfm))
389 return PTR_ERR(tfm);
390
391 ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
392 if (ret)
393 goto err_free_tfm;
394
395 shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
396 GFP_KERNEL);
397 if (!shash) {
398 ret = -ENOMEM;
399 goto err_free_tfm;
400 }
401
402 shash->tfm = tfm;
403 shash->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
404
405 memset(hmac, 0, sizeof(hmac));
406 ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
407 if (ret)
408 goto err_free_shash;
409
410 /* The returned HMAC must match the one we calculated */
411 if (memcmp(response, hmac, sizeof(hmac))) {
412 ret = -EKEYREJECTED;
413 goto err_free_shash;
414 }
415
416 crypto_free_shash(tfm);
417 kfree(shash);
418
419 return tb->cm_ops->approve_switch(tb, sw);
420
421err_free_shash:
422 kfree(shash);
423err_free_tfm:
424 crypto_free_shash(tfm);
425
426 return ret;
427}
428
234int tb_domain_init(void) 429int tb_domain_init(void)
235{ 430{
236 return bus_register(&tb_bus_type); 431 return bus_register(&tb_bus_type);
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
new file mode 100644
index 000000000000..0ffa4ec249ac
--- /dev/null
+++ b/drivers/thunderbolt/icm.c
@@ -0,0 +1,1058 @@
1/*
2 * Internal Thunderbolt Connection Manager. This is a firmware running on
3 * the Thunderbolt host controller performing most of the low-level
4 * handling.
5 *
6 * Copyright (C) 2017, Intel Corporation
7 * Authors: Michael Jamet <michael.jamet@intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/delay.h>
16#include <linux/dmi.h>
17#include <linux/mutex.h>
18#include <linux/pci.h>
19#include <linux/sizes.h>
20#include <linux/slab.h>
21#include <linux/workqueue.h>
22
23#include "ctl.h"
24#include "nhi_regs.h"
25#include "tb.h"
26
27#define PCIE2CIO_CMD 0x30
28#define PCIE2CIO_CMD_TIMEOUT BIT(31)
29#define PCIE2CIO_CMD_START BIT(30)
30#define PCIE2CIO_CMD_WRITE BIT(21)
31#define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
32#define PCIE2CIO_CMD_CS_SHIFT 19
33#define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
34#define PCIE2CIO_CMD_PORT_SHIFT 13
35
36#define PCIE2CIO_WRDATA 0x34
37#define PCIE2CIO_RDDATA 0x38
38
39#define PHY_PORT_CS1 0x37
40#define PHY_PORT_CS1_LINK_DISABLE BIT(14)
41#define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
42#define PHY_PORT_CS1_LINK_STATE_SHIFT 26
43
44#define ICM_TIMEOUT 5000 /* ms */
45#define ICM_MAX_LINK 4
46#define ICM_MAX_DEPTH 6
47
48/**
49 * struct icm - Internal connection manager private data
50 * @request_lock: Makes sure only one message is send to ICM at time
51 * @rescan_work: Work used to rescan the surviving switches after resume
52 * @upstream_port: Pointer to the PCIe upstream port this host
53 * controller is connected. This is only set for systems
54 * where ICM needs to be started manually
55 * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
56 * (only set when @upstream_port is not %NULL)
57 * @is_supported: Checks if we can support ICM on this controller
58 * @get_mode: Read and return the ICM firmware mode (optional)
59 * @get_route: Find a route string for given switch
60 * @device_connected: Handle device connected ICM message
61 * @device_disconnected: Handle device disconnected ICM message
62 */
63struct icm {
64 struct mutex request_lock;
65 struct delayed_work rescan_work;
66 struct pci_dev *upstream_port;
67 int vnd_cap;
68 bool (*is_supported)(struct tb *tb);
69 int (*get_mode)(struct tb *tb);
70 int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
71 void (*device_connected)(struct tb *tb,
72 const struct icm_pkg_header *hdr);
73 void (*device_disconnected)(struct tb *tb,
74 const struct icm_pkg_header *hdr);
75};
76
77struct icm_notification {
78 struct work_struct work;
79 struct icm_pkg_header *pkg;
80 struct tb *tb;
81};
82
83static inline struct tb *icm_to_tb(struct icm *icm)
84{
85 return ((void *)icm - sizeof(struct tb));
86}
87
88static inline u8 phy_port_from_route(u64 route, u8 depth)
89{
90 return tb_switch_phy_port_from_link(route >> ((depth - 1) * 8));
91}
92
93static inline u8 dual_link_from_link(u8 link)
94{
95 return link ? ((link - 1) ^ 0x01) + 1 : 0;
96}
97
98static inline u64 get_route(u32 route_hi, u32 route_lo)
99{
100 return (u64)route_hi << 32 | route_lo;
101}
102
103static inline bool is_apple(void)
104{
105 return dmi_match(DMI_BOARD_VENDOR, "Apple Inc.");
106}
107
108static bool icm_match(const struct tb_cfg_request *req,
109 const struct ctl_pkg *pkg)
110{
111 const struct icm_pkg_header *res_hdr = pkg->buffer;
112 const struct icm_pkg_header *req_hdr = req->request;
113
114 if (pkg->frame.eof != req->response_type)
115 return false;
116 if (res_hdr->code != req_hdr->code)
117 return false;
118
119 return true;
120}
121
122static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
123{
124 const struct icm_pkg_header *hdr = pkg->buffer;
125
126 if (hdr->packet_id < req->npackets) {
127 size_t offset = hdr->packet_id * req->response_size;
128
129 memcpy(req->response + offset, pkg->buffer, req->response_size);
130 }
131
132 return hdr->packet_id == hdr->total_packets - 1;
133}
134
135static int icm_request(struct tb *tb, const void *request, size_t request_size,
136 void *response, size_t response_size, size_t npackets,
137 unsigned int timeout_msec)
138{
139 struct icm *icm = tb_priv(tb);
140 int retries = 3;
141
142 do {
143 struct tb_cfg_request *req;
144 struct tb_cfg_result res;
145
146 req = tb_cfg_request_alloc();
147 if (!req)
148 return -ENOMEM;
149
150 req->match = icm_match;
151 req->copy = icm_copy;
152 req->request = request;
153 req->request_size = request_size;
154 req->request_type = TB_CFG_PKG_ICM_CMD;
155 req->response = response;
156 req->npackets = npackets;
157 req->response_size = response_size;
158 req->response_type = TB_CFG_PKG_ICM_RESP;
159
160 mutex_lock(&icm->request_lock);
161 res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
162 mutex_unlock(&icm->request_lock);
163
164 tb_cfg_request_put(req);
165
166 if (res.err != -ETIMEDOUT)
167 return res.err == 1 ? -EIO : res.err;
168
169 usleep_range(20, 50);
170 } while (retries--);
171
172 return -ETIMEDOUT;
173}
174
175static bool icm_fr_is_supported(struct tb *tb)
176{
177 return !is_apple();
178}
179
180static inline int icm_fr_get_switch_index(u32 port)
181{
182 int index;
183
184 if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
185 return 0;
186
187 index = port >> ICM_PORT_INDEX_SHIFT;
188 return index != 0xff ? index : 0;
189}
190
191static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
192{
193 struct icm_fr_pkg_get_topology_response *switches, *sw;
194 struct icm_fr_pkg_get_topology request = {
195 .hdr = { .code = ICM_GET_TOPOLOGY },
196 };
197 size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
198 int ret, index;
199 u8 i;
200
201 switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
202 if (!switches)
203 return -ENOMEM;
204
205 ret = icm_request(tb, &request, sizeof(request), switches,
206 sizeof(*switches), npackets, ICM_TIMEOUT);
207 if (ret)
208 goto err_free;
209
210 sw = &switches[0];
211 index = icm_fr_get_switch_index(sw->ports[link]);
212 if (!index) {
213 ret = -ENODEV;
214 goto err_free;
215 }
216
217 sw = &switches[index];
218 for (i = 1; i < depth; i++) {
219 unsigned int j;
220
221 if (!(sw->first_data & ICM_SWITCH_USED)) {
222 ret = -ENODEV;
223 goto err_free;
224 }
225
226 for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
227 index = icm_fr_get_switch_index(sw->ports[j]);
228 if (index > sw->switch_index) {
229 sw = &switches[index];
230 break;
231 }
232 }
233 }
234
235 *route = get_route(sw->route_hi, sw->route_lo);
236
237err_free:
238 kfree(switches);
239 return ret;
240}
241
242static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
243{
244 struct icm_fr_pkg_approve_device request;
245 struct icm_fr_pkg_approve_device reply;
246 int ret;
247
248 memset(&request, 0, sizeof(request));
249 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
250 request.hdr.code = ICM_APPROVE_DEVICE;
251 request.connection_id = sw->connection_id;
252 request.connection_key = sw->connection_key;
253
254 memset(&reply, 0, sizeof(reply));
255 /* Use larger timeout as establishing tunnels can take some time */
256 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
257 1, 10000);
258 if (ret)
259 return ret;
260
261 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
262 tb_warn(tb, "PCIe tunnel creation failed\n");
263 return -EIO;
264 }
265
266 return 0;
267}
268
269static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
270{
271 struct icm_fr_pkg_add_device_key request;
272 struct icm_fr_pkg_add_device_key_response reply;
273 int ret;
274
275 memset(&request, 0, sizeof(request));
276 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
277 request.hdr.code = ICM_ADD_DEVICE_KEY;
278 request.connection_id = sw->connection_id;
279 request.connection_key = sw->connection_key;
280 memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
281
282 memset(&reply, 0, sizeof(reply));
283 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
284 1, ICM_TIMEOUT);
285 if (ret)
286 return ret;
287
288 if (reply.hdr.flags & ICM_FLAGS_ERROR) {
289 tb_warn(tb, "Adding key to switch failed\n");
290 return -EIO;
291 }
292
293 return 0;
294}
295
296static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
297 const u8 *challenge, u8 *response)
298{
299 struct icm_fr_pkg_challenge_device request;
300 struct icm_fr_pkg_challenge_device_response reply;
301 int ret;
302
303 memset(&request, 0, sizeof(request));
304 memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
305 request.hdr.code = ICM_CHALLENGE_DEVICE;
306 request.connection_id = sw->connection_id;
307 request.connection_key = sw->connection_key;
308 memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
309
310 memset(&reply, 0, sizeof(reply));
311 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
312 1, ICM_TIMEOUT);
313 if (ret)
314 return ret;
315
316 if (reply.hdr.flags & ICM_FLAGS_ERROR)
317 return -EKEYREJECTED;
318 if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
319 return -ENOKEY;
320
321 memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
322
323 return 0;
324}
325
326static void remove_switch(struct tb_switch *sw)
327{
328 struct tb_switch *parent_sw;
329
330 parent_sw = tb_to_switch(sw->dev.parent);
331 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
332 tb_switch_remove(sw);
333}
334
335static void
336icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
337{
338 const struct icm_fr_event_device_connected *pkg =
339 (const struct icm_fr_event_device_connected *)hdr;
340 struct tb_switch *sw, *parent_sw;
341 struct icm *icm = tb_priv(tb);
342 bool authorized = false;
343 u8 link, depth;
344 u64 route;
345 int ret;
346
347 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
348 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
349 ICM_LINK_INFO_DEPTH_SHIFT;
350 authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
351
352 ret = icm->get_route(tb, link, depth, &route);
353 if (ret) {
354 tb_err(tb, "failed to find route string for switch at %u.%u\n",
355 link, depth);
356 return;
357 }
358
359 sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
360 if (sw) {
361 u8 phy_port, sw_phy_port;
362
363 parent_sw = tb_to_switch(sw->dev.parent);
364 sw_phy_port = phy_port_from_route(tb_route(sw), sw->depth);
365 phy_port = phy_port_from_route(route, depth);
366
367 /*
368 * On resume ICM will send us connected events for the
369 * devices that still are present. However, that
370 * information might have changed for example by the
371 * fact that a switch on a dual-link connection might
372 * have been enumerated using the other link now. Make
373 * sure our book keeping matches that.
374 */
375 if (sw->depth == depth && sw_phy_port == phy_port &&
376 !!sw->authorized == authorized) {
377 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
378 tb_port_at(route, parent_sw)->remote =
379 tb_upstream_port(sw);
380 sw->config.route_hi = upper_32_bits(route);
381 sw->config.route_lo = lower_32_bits(route);
382 sw->connection_id = pkg->connection_id;
383 sw->connection_key = pkg->connection_key;
384 sw->link = link;
385 sw->depth = depth;
386 sw->is_unplugged = false;
387 tb_switch_put(sw);
388 return;
389 }
390
391 /*
392 * User connected the same switch to another physical
393 * port or to another part of the topology. Remove the
394 * existing switch now before adding the new one.
395 */
396 remove_switch(sw);
397 tb_switch_put(sw);
398 }
399
400 /*
401 * If the switch was not found by UUID, look for a switch on
402 * same physical port (taking possible link aggregation into
403 * account) and depth. If we found one it is definitely a stale
404 * one so remove it first.
405 */
406 sw = tb_switch_find_by_link_depth(tb, link, depth);
407 if (!sw) {
408 u8 dual_link;
409
410 dual_link = dual_link_from_link(link);
411 if (dual_link)
412 sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
413 }
414 if (sw) {
415 remove_switch(sw);
416 tb_switch_put(sw);
417 }
418
419 parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
420 if (!parent_sw) {
421 tb_err(tb, "failed to find parent switch for %u.%u\n",
422 link, depth);
423 return;
424 }
425
426 sw = tb_switch_alloc(tb, &parent_sw->dev, route);
427 if (!sw) {
428 tb_switch_put(parent_sw);
429 return;
430 }
431
432 sw->uuid = kmemdup(&pkg->ep_uuid, sizeof(pkg->ep_uuid), GFP_KERNEL);
433 sw->connection_id = pkg->connection_id;
434 sw->connection_key = pkg->connection_key;
435 sw->link = link;
436 sw->depth = depth;
437 sw->authorized = authorized;
438 sw->security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
439 ICM_FLAGS_SLEVEL_SHIFT;
440
441 /* Link the two switches now */
442 tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
443 tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
444
445 ret = tb_switch_add(sw);
446 if (ret) {
447 tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
448 tb_switch_put(sw);
449 }
450 tb_switch_put(parent_sw);
451}
452
453static void
454icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
455{
456 const struct icm_fr_event_device_disconnected *pkg =
457 (const struct icm_fr_event_device_disconnected *)hdr;
458 struct tb_switch *sw;
459 u8 link, depth;
460
461 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
462 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
463 ICM_LINK_INFO_DEPTH_SHIFT;
464
465 if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
466 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
467 return;
468 }
469
470 sw = tb_switch_find_by_link_depth(tb, link, depth);
471 if (!sw) {
472 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
473 depth);
474 return;
475 }
476
477 remove_switch(sw);
478 tb_switch_put(sw);
479}
480
481static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
482{
483 struct pci_dev *parent;
484
485 parent = pci_upstream_bridge(pdev);
486 while (parent) {
487 if (!pci_is_pcie(parent))
488 return NULL;
489 if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
490 break;
491 parent = pci_upstream_bridge(parent);
492 }
493
494 if (!parent)
495 return NULL;
496
497 switch (parent->device) {
498 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
499 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
500 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
501 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
502 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
503 return parent;
504 }
505
506 return NULL;
507}
508
509static bool icm_ar_is_supported(struct tb *tb)
510{
511 struct pci_dev *upstream_port;
512 struct icm *icm = tb_priv(tb);
513
514 /*
515 * Starting from Alpine Ridge we can use ICM on Apple machines
516 * as well. We just need to reset and re-enable it first.
517 */
518 if (!is_apple())
519 return true;
520
521 /*
522 * Find the upstream PCIe port in case we need to do reset
523 * through its vendor specific registers.
524 */
525 upstream_port = get_upstream_port(tb->nhi->pdev);
526 if (upstream_port) {
527 int cap;
528
529 cap = pci_find_ext_capability(upstream_port,
530 PCI_EXT_CAP_ID_VNDR);
531 if (cap > 0) {
532 icm->upstream_port = upstream_port;
533 icm->vnd_cap = cap;
534
535 return true;
536 }
537 }
538
539 return false;
540}
541
542static int icm_ar_get_mode(struct tb *tb)
543{
544 struct tb_nhi *nhi = tb->nhi;
545 int retries = 5;
546 u32 val;
547
548 do {
549 val = ioread32(nhi->iobase + REG_FW_STS);
550 if (val & REG_FW_STS_NVM_AUTH_DONE)
551 break;
552 msleep(30);
553 } while (--retries);
554
555 if (!retries) {
556 dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
557 return -ENODEV;
558 }
559
560 return nhi_mailbox_mode(nhi);
561}
562
563static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
564{
565 struct icm_ar_pkg_get_route_response reply;
566 struct icm_ar_pkg_get_route request = {
567 .hdr = { .code = ICM_GET_ROUTE },
568 .link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
569 };
570 int ret;
571
572 memset(&reply, 0, sizeof(reply));
573 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
574 1, ICM_TIMEOUT);
575 if (ret)
576 return ret;
577
578 if (reply.hdr.flags & ICM_FLAGS_ERROR)
579 return -EIO;
580
581 *route = get_route(reply.route_hi, reply.route_lo);
582 return 0;
583}
584
585static void icm_handle_notification(struct work_struct *work)
586{
587 struct icm_notification *n = container_of(work, typeof(*n), work);
588 struct tb *tb = n->tb;
589 struct icm *icm = tb_priv(tb);
590
591 mutex_lock(&tb->lock);
592
593 switch (n->pkg->code) {
594 case ICM_EVENT_DEVICE_CONNECTED:
595 icm->device_connected(tb, n->pkg);
596 break;
597 case ICM_EVENT_DEVICE_DISCONNECTED:
598 icm->device_disconnected(tb, n->pkg);
599 break;
600 }
601
602 mutex_unlock(&tb->lock);
603
604 kfree(n->pkg);
605 kfree(n);
606}
607
608static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
609 const void *buf, size_t size)
610{
611 struct icm_notification *n;
612
613 n = kmalloc(sizeof(*n), GFP_KERNEL);
614 if (!n)
615 return;
616
617 INIT_WORK(&n->work, icm_handle_notification);
618 n->pkg = kmemdup(buf, size, GFP_KERNEL);
619 n->tb = tb;
620
621 queue_work(tb->wq, &n->work);
622}
623
624static int
625__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level)
626{
627 struct icm_pkg_driver_ready_response reply;
628 struct icm_pkg_driver_ready request = {
629 .hdr.code = ICM_DRIVER_READY,
630 };
631 unsigned int retries = 10;
632 int ret;
633
634 memset(&reply, 0, sizeof(reply));
635 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
636 1, ICM_TIMEOUT);
637 if (ret)
638 return ret;
639
640 if (security_level)
641 *security_level = reply.security_level & 0xf;
642
643 /*
644 * Hold on here until the switch config space is accessible so
645 * that we can read root switch config successfully.
646 */
647 do {
648 struct tb_cfg_result res;
649 u32 tmp;
650
651 res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
652 0, 1, 100);
653 if (!res.err)
654 return 0;
655
656 msleep(50);
657 } while (--retries);
658
659 return -ETIMEDOUT;
660}
661
662static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
663{
664 unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
665 u32 cmd;
666
667 do {
668 pci_read_config_dword(icm->upstream_port,
669 icm->vnd_cap + PCIE2CIO_CMD, &cmd);
670 if (!(cmd & PCIE2CIO_CMD_START)) {
671 if (cmd & PCIE2CIO_CMD_TIMEOUT)
672 break;
673 return 0;
674 }
675
676 msleep(50);
677 } while (time_before(jiffies, end));
678
679 return -ETIMEDOUT;
680}
681
682static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
683 unsigned int port, unsigned int index, u32 *data)
684{
685 struct pci_dev *pdev = icm->upstream_port;
686 int ret, vnd_cap = icm->vnd_cap;
687 u32 cmd;
688
689 cmd = index;
690 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
691 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
692 cmd |= PCIE2CIO_CMD_START;
693 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
694
695 ret = pci2cio_wait_completion(icm, 5000);
696 if (ret)
697 return ret;
698
699 pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
700 return 0;
701}
702
703static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
704 unsigned int port, unsigned int index, u32 data)
705{
706 struct pci_dev *pdev = icm->upstream_port;
707 int vnd_cap = icm->vnd_cap;
708 u32 cmd;
709
710 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
711
712 cmd = index;
713 cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
714 cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
715 cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
716 pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
717
718 return pci2cio_wait_completion(icm, 5000);
719}
720
721static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
722{
723 struct icm *icm = tb_priv(tb);
724 u32 val;
725
726 /* Put ARC to wait for CIO reset event to happen */
727 val = ioread32(nhi->iobase + REG_FW_STS);
728 val |= REG_FW_STS_CIO_RESET_REQ;
729 iowrite32(val, nhi->iobase + REG_FW_STS);
730
731 /* Re-start ARC */
732 val = ioread32(nhi->iobase + REG_FW_STS);
733 val |= REG_FW_STS_ICM_EN_INVERT;
734 val |= REG_FW_STS_ICM_EN_CPU;
735 iowrite32(val, nhi->iobase + REG_FW_STS);
736
737 /* Trigger CIO reset now */
738 return pcie2cio_write(icm, TB_CFG_SWITCH, 0, 0x50, BIT(9));
739}
740
741static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
742{
743 unsigned int retries = 10;
744 int ret;
745 u32 val;
746
747 /* Check if the ICM firmware is already running */
748 val = ioread32(nhi->iobase + REG_FW_STS);
749 if (val & REG_FW_STS_ICM_EN)
750 return 0;
751
752 dev_info(&nhi->pdev->dev, "starting ICM firmware\n");
753
754 ret = icm_firmware_reset(tb, nhi);
755 if (ret)
756 return ret;
757
758 /* Wait until the ICM firmware tells us it is up and running */
759 do {
760 /* Check that the ICM firmware is running */
761 val = ioread32(nhi->iobase + REG_FW_STS);
762 if (val & REG_FW_STS_NVM_AUTH_DONE)
763 return 0;
764
765 msleep(300);
766 } while (--retries);
767
768 return -ETIMEDOUT;
769}
770
771static int icm_reset_phy_port(struct tb *tb, int phy_port)
772{
773 struct icm *icm = tb_priv(tb);
774 u32 state0, state1;
775 int port0, port1;
776 u32 val0, val1;
777 int ret;
778
779 if (!icm->upstream_port)
780 return 0;
781
782 if (phy_port) {
783 port0 = 3;
784 port1 = 4;
785 } else {
786 port0 = 1;
787 port1 = 2;
788 }
789
790 /*
791 * Read link status of both null ports belonging to a single
792 * physical port.
793 */
794 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
795 if (ret)
796 return ret;
797 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
798 if (ret)
799 return ret;
800
801 state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
802 state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
803 state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
804 state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
805
806 /* If they are both up we need to reset them now */
807 if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
808 return 0;
809
810 val0 |= PHY_PORT_CS1_LINK_DISABLE;
811 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
812 if (ret)
813 return ret;
814
815 val1 |= PHY_PORT_CS1_LINK_DISABLE;
816 ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
817 if (ret)
818 return ret;
819
820 /* Wait a bit and then re-enable both ports */
821 usleep_range(10, 100);
822
823 ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
824 if (ret)
825 return ret;
826 ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
827 if (ret)
828 return ret;
829
830 val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
831 ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
832 if (ret)
833 return ret;
834
835 val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
836 return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
837}
838
839static int icm_firmware_init(struct tb *tb)
840{
841 struct icm *icm = tb_priv(tb);
842 struct tb_nhi *nhi = tb->nhi;
843 int ret;
844
845 ret = icm_firmware_start(tb, nhi);
846 if (ret) {
847 dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
848 return ret;
849 }
850
851 if (icm->get_mode) {
852 ret = icm->get_mode(tb);
853
854 switch (ret) {
855 case NHI_FW_CM_MODE:
856 /* Ask ICM to accept all Thunderbolt devices */
857 nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
858 break;
859
860 default:
861 tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
862 return -ENODEV;
863 }
864 }
865
866 /*
867 * Reset both physical ports if there is anything connected to
868 * them already.
869 */
870 ret = icm_reset_phy_port(tb, 0);
871 if (ret)
872 dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
873 ret = icm_reset_phy_port(tb, 1);
874 if (ret)
875 dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
876
877 return 0;
878}
879
880static int icm_driver_ready(struct tb *tb)
881{
882 int ret;
883
884 ret = icm_firmware_init(tb);
885 if (ret)
886 return ret;
887
888 return __icm_driver_ready(tb, &tb->security_level);
889}
890
891static int icm_suspend(struct tb *tb)
892{
893 return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
894}
895
896/*
897 * Mark all switches (except root switch) below this one unplugged. ICM
898 * firmware will send us an updated list of switches after we have send
899 * it driver ready command. If a switch is not in that list it will be
900 * removed when we perform rescan.
901 */
902static void icm_unplug_children(struct tb_switch *sw)
903{
904 unsigned int i;
905
906 if (tb_route(sw))
907 sw->is_unplugged = true;
908
909 for (i = 1; i <= sw->config.max_port_number; i++) {
910 struct tb_port *port = &sw->ports[i];
911
912 if (tb_is_upstream_port(port))
913 continue;
914 if (!port->remote)
915 continue;
916
917 icm_unplug_children(port->remote->sw);
918 }
919}
920
921static void icm_free_unplugged_children(struct tb_switch *sw)
922{
923 unsigned int i;
924
925 for (i = 1; i <= sw->config.max_port_number; i++) {
926 struct tb_port *port = &sw->ports[i];
927
928 if (tb_is_upstream_port(port))
929 continue;
930 if (!port->remote)
931 continue;
932
933 if (port->remote->sw->is_unplugged) {
934 tb_switch_remove(port->remote->sw);
935 port->remote = NULL;
936 } else {
937 icm_free_unplugged_children(port->remote->sw);
938 }
939 }
940}
941
942static void icm_rescan_work(struct work_struct *work)
943{
944 struct icm *icm = container_of(work, struct icm, rescan_work.work);
945 struct tb *tb = icm_to_tb(icm);
946
947 mutex_lock(&tb->lock);
948 if (tb->root_switch)
949 icm_free_unplugged_children(tb->root_switch);
950 mutex_unlock(&tb->lock);
951}
952
953static void icm_complete(struct tb *tb)
954{
955 struct icm *icm = tb_priv(tb);
956
957 if (tb->nhi->going_away)
958 return;
959
960 icm_unplug_children(tb->root_switch);
961
962 /*
963 * Now all existing children should be resumed, start events
964 * from ICM to get updated status.
965 */
966 __icm_driver_ready(tb, NULL);
967
968 /*
969 * We do not get notifications of devices that have been
970 * unplugged during suspend so schedule rescan to clean them up
971 * if any.
972 */
973 queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
974}
975
976static int icm_start(struct tb *tb)
977{
978 int ret;
979
980 tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
981 if (!tb->root_switch)
982 return -ENODEV;
983
984 ret = tb_switch_add(tb->root_switch);
985 if (ret)
986 tb_switch_put(tb->root_switch);
987
988 return ret;
989}
990
991static void icm_stop(struct tb *tb)
992{
993 struct icm *icm = tb_priv(tb);
994
995 cancel_delayed_work(&icm->rescan_work);
996 tb_switch_remove(tb->root_switch);
997 tb->root_switch = NULL;
998 nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
999}
1000
1001/* Falcon Ridge and Alpine Ridge */
1002static const struct tb_cm_ops icm_fr_ops = {
1003 .driver_ready = icm_driver_ready,
1004 .start = icm_start,
1005 .stop = icm_stop,
1006 .suspend = icm_suspend,
1007 .complete = icm_complete,
1008 .handle_event = icm_handle_event,
1009 .approve_switch = icm_fr_approve_switch,
1010 .add_switch_key = icm_fr_add_switch_key,
1011 .challenge_switch_key = icm_fr_challenge_switch_key,
1012};
1013
1014struct tb *icm_probe(struct tb_nhi *nhi)
1015{
1016 struct icm *icm;
1017 struct tb *tb;
1018
1019 tb = tb_domain_alloc(nhi, sizeof(struct icm));
1020 if (!tb)
1021 return NULL;
1022
1023 icm = tb_priv(tb);
1024 INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
1025 mutex_init(&icm->request_lock);
1026
1027 switch (nhi->pdev->device) {
1028 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
1029 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
1030 icm->is_supported = icm_fr_is_supported;
1031 icm->get_route = icm_fr_get_route;
1032 icm->device_connected = icm_fr_device_connected;
1033 icm->device_disconnected = icm_fr_device_disconnected;
1034 tb->cm_ops = &icm_fr_ops;
1035 break;
1036
1037 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
1038 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
1039 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
1040 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
1041 case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
1042 icm->is_supported = icm_ar_is_supported;
1043 icm->get_mode = icm_ar_get_mode;
1044 icm->get_route = icm_ar_get_route;
1045 icm->device_connected = icm_fr_device_connected;
1046 icm->device_disconnected = icm_fr_device_disconnected;
1047 tb->cm_ops = &icm_fr_ops;
1048 break;
1049 }
1050
1051 if (!icm->is_supported || !icm->is_supported(tb)) {
1052 dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
1053 tb_domain_put(tb);
1054 return NULL;
1055 }
1056
1057 return tb;
1058}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 14311535661d..05af126a2435 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -13,7 +13,6 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/dmi.h>
17#include <linux/delay.h> 16#include <linux/delay.h>
18 17
19#include "nhi.h" 18#include "nhi.h"
@@ -668,6 +667,22 @@ static int nhi_resume_noirq(struct device *dev)
668 return tb_domain_resume_noirq(tb); 667 return tb_domain_resume_noirq(tb);
669} 668}
670 669
670static int nhi_suspend(struct device *dev)
671{
672 struct pci_dev *pdev = to_pci_dev(dev);
673 struct tb *tb = pci_get_drvdata(pdev);
674
675 return tb_domain_suspend(tb);
676}
677
678static void nhi_complete(struct device *dev)
679{
680 struct pci_dev *pdev = to_pci_dev(dev);
681 struct tb *tb = pci_get_drvdata(pdev);
682
683 tb_domain_complete(tb);
684}
685
671static void nhi_shutdown(struct tb_nhi *nhi) 686static void nhi_shutdown(struct tb_nhi *nhi)
672{ 687{
673 int i; 688 int i;
@@ -784,10 +799,16 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
784 /* magic value - clock related? */ 799 /* magic value - clock related? */
785 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00); 800 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
786 801
787 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); 802 tb = icm_probe(nhi);
788 tb = tb_probe(nhi);
789 if (!tb) 803 if (!tb)
804 tb = tb_probe(nhi);
805 if (!tb) {
806 dev_err(&nhi->pdev->dev,
807 "failed to determine connection manager, aborting\n");
790 return -ENODEV; 808 return -ENODEV;
809 }
810
811 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
791 812
792 res = tb_domain_add(tb); 813 res = tb_domain_add(tb);
793 if (res) { 814 if (res) {
@@ -826,6 +847,10 @@ static const struct dev_pm_ops nhi_pm_ops = {
826 * pci-tunnels stay alive. 847 * pci-tunnels stay alive.
827 */ 848 */
828 .restore_noirq = nhi_resume_noirq, 849 .restore_noirq = nhi_resume_noirq,
850 .suspend = nhi_suspend,
851 .freeze = nhi_suspend,
852 .poweroff = nhi_suspend,
853 .complete = nhi_complete,
829}; 854};
830 855
831static struct pci_device_id nhi_ids[] = { 856static struct pci_device_id nhi_ids[] = {
@@ -886,8 +911,6 @@ static int __init nhi_init(void)
886{ 911{
887 int ret; 912 int ret;
888 913
889 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
890 return -ENOSYS;
891 ret = tb_domain_init(); 914 ret = tb_domain_init();
892 if (ret) 915 if (ret)
893 return ret; 916 return ret;
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 322fe1fa3a3c..09ed574e92ff 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -118,4 +118,11 @@ struct ring_desc {
118#define REG_OUTMAIL_CMD_OPMODE_SHIFT 8 118#define REG_OUTMAIL_CMD_OPMODE_SHIFT 8
119#define REG_OUTMAIL_CMD_OPMODE_MASK GENMASK(11, 8) 119#define REG_OUTMAIL_CMD_OPMODE_MASK GENMASK(11, 8)
120 120
121#define REG_FW_STS 0x39944
122#define REG_FW_STS_NVM_AUTH_DONE BIT(31)
123#define REG_FW_STS_CIO_RESET_REQ BIT(30)
124#define REG_FW_STS_ICM_EN_CPU BIT(2)
125#define REG_FW_STS_ICM_EN_INVERT BIT(1)
126#define REG_FW_STS_ICM_EN BIT(0)
127
121#endif 128#endif
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 4b47e0999cda..1524edf42ee8 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -9,6 +9,9 @@
9 9
10#include "tb.h" 10#include "tb.h"
11 11
12/* Switch authorization from userspace is serialized by this lock */
13static DEFINE_MUTEX(switch_lock);
14
12/* port utility functions */ 15/* port utility functions */
13 16
14static const char *tb_port_type(struct tb_regs_port_header *port) 17static const char *tb_port_type(struct tb_regs_port_header *port)
@@ -310,6 +313,75 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active)
310 sw->cap_plug_events + 1, 1); 313 sw->cap_plug_events + 1, 1);
311} 314}
312 315
316static ssize_t authorized_show(struct device *dev,
317 struct device_attribute *attr,
318 char *buf)
319{
320 struct tb_switch *sw = tb_to_switch(dev);
321
322 return sprintf(buf, "%u\n", sw->authorized);
323}
324
325static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
326{
327 int ret = -EINVAL;
328
329 if (mutex_lock_interruptible(&switch_lock))
330 return -ERESTARTSYS;
331
332 if (sw->authorized)
333 goto unlock;
334
335 switch (val) {
336 /* Approve switch */
337 case 1:
338 if (sw->key)
339 ret = tb_domain_approve_switch_key(sw->tb, sw);
340 else
341 ret = tb_domain_approve_switch(sw->tb, sw);
342 break;
343
344 /* Challenge switch */
345 case 2:
346 if (sw->key)
347 ret = tb_domain_challenge_switch_key(sw->tb, sw);
348 break;
349
350 default:
351 break;
352 }
353
354 if (!ret) {
355 sw->authorized = val;
356 /* Notify status change to the userspace */
357 kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
358 }
359
360unlock:
361 mutex_unlock(&switch_lock);
362 return ret;
363}
364
365static ssize_t authorized_store(struct device *dev,
366 struct device_attribute *attr,
367 const char *buf, size_t count)
368{
369 struct tb_switch *sw = tb_to_switch(dev);
370 unsigned int val;
371 ssize_t ret;
372
373 ret = kstrtouint(buf, 0, &val);
374 if (ret)
375 return ret;
376 if (val > 2)
377 return -EINVAL;
378
379 ret = tb_switch_set_authorized(sw, val);
380
381 return ret ? ret : count;
382}
383static DEVICE_ATTR_RW(authorized);
384
313static ssize_t device_show(struct device *dev, struct device_attribute *attr, 385static ssize_t device_show(struct device *dev, struct device_attribute *attr,
314 char *buf) 386 char *buf)
315{ 387{
@@ -328,6 +400,54 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
328} 400}
329static DEVICE_ATTR_RO(device_name); 401static DEVICE_ATTR_RO(device_name);
330 402
403static ssize_t key_show(struct device *dev, struct device_attribute *attr,
404 char *buf)
405{
406 struct tb_switch *sw = tb_to_switch(dev);
407 ssize_t ret;
408
409 if (mutex_lock_interruptible(&switch_lock))
410 return -ERESTARTSYS;
411
412 if (sw->key)
413 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
414 else
415 ret = sprintf(buf, "\n");
416
417 mutex_unlock(&switch_lock);
418 return ret;
419}
420
421static ssize_t key_store(struct device *dev, struct device_attribute *attr,
422 const char *buf, size_t count)
423{
424 struct tb_switch *sw = tb_to_switch(dev);
425 u8 key[TB_SWITCH_KEY_SIZE];
426 ssize_t ret = count;
427
428 if (count < 64)
429 return -EINVAL;
430
431 if (hex2bin(key, buf, sizeof(key)))
432 return -EINVAL;
433
434 if (mutex_lock_interruptible(&switch_lock))
435 return -ERESTARTSYS;
436
437 if (sw->authorized) {
438 ret = -EBUSY;
439 } else {
440 kfree(sw->key);
441 sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
442 if (!sw->key)
443 ret = -ENOMEM;
444 }
445
446 mutex_unlock(&switch_lock);
447 return ret;
448}
449static DEVICE_ATTR_RW(key);
450
331static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 451static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
332 char *buf) 452 char *buf)
333{ 453{
@@ -356,15 +476,35 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
356static DEVICE_ATTR_RO(unique_id); 476static DEVICE_ATTR_RO(unique_id);
357 477
358static struct attribute *switch_attrs[] = { 478static struct attribute *switch_attrs[] = {
479 &dev_attr_authorized.attr,
359 &dev_attr_device.attr, 480 &dev_attr_device.attr,
360 &dev_attr_device_name.attr, 481 &dev_attr_device_name.attr,
482 &dev_attr_key.attr,
361 &dev_attr_vendor.attr, 483 &dev_attr_vendor.attr,
362 &dev_attr_vendor_name.attr, 484 &dev_attr_vendor_name.attr,
363 &dev_attr_unique_id.attr, 485 &dev_attr_unique_id.attr,
364 NULL, 486 NULL,
365}; 487};
366 488
489static umode_t switch_attr_is_visible(struct kobject *kobj,
490 struct attribute *attr, int n)
491{
492 struct device *dev = container_of(kobj, struct device, kobj);
493 struct tb_switch *sw = tb_to_switch(dev);
494
495 if (attr == &dev_attr_key.attr) {
496 if (tb_route(sw) &&
497 sw->tb->security_level == TB_SECURITY_SECURE &&
498 sw->security_level == TB_SECURITY_SECURE)
499 return attr->mode;
500 return 0;
501 }
502
503 return attr->mode;
504}
505
367static struct attribute_group switch_group = { 506static struct attribute_group switch_group = {
507 .is_visible = switch_attr_is_visible,
368 .attrs = switch_attrs, 508 .attrs = switch_attrs,
369}; 509};
370 510
@@ -384,6 +524,7 @@ static void tb_switch_release(struct device *dev)
384 kfree(sw->vendor_name); 524 kfree(sw->vendor_name);
385 kfree(sw->ports); 525 kfree(sw->ports);
386 kfree(sw->drom); 526 kfree(sw->drom);
527 kfree(sw->key);
387 kfree(sw); 528 kfree(sw);
388} 529}
389 530
@@ -490,6 +631,10 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
490 } 631 }
491 sw->cap_plug_events = cap; 632 sw->cap_plug_events = cap;
492 633
634 /* Root switch is always authorized */
635 if (!route)
636 sw->authorized = true;
637
493 device_initialize(&sw->dev); 638 device_initialize(&sw->dev);
494 sw->dev.parent = parent; 639 sw->dev.parent = parent;
495 sw->dev.bus = &tb_bus_type; 640 sw->dev.bus = &tb_bus_type;
@@ -754,3 +899,80 @@ void tb_switch_suspend(struct tb_switch *sw)
754 * effect? 899 * effect?
755 */ 900 */
756} 901}
902
903struct tb_sw_lookup {
904 struct tb *tb;
905 u8 link;
906 u8 depth;
907 const uuid_be *uuid;
908};
909
910static int tb_switch_match(struct device *dev, void *data)
911{
912 struct tb_switch *sw = tb_to_switch(dev);
913 struct tb_sw_lookup *lookup = data;
914
915 if (!sw)
916 return 0;
917 if (sw->tb != lookup->tb)
918 return 0;
919
920 if (lookup->uuid)
921 return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
922
923 /* Root switch is matched only by depth */
924 if (!lookup->depth)
925 return !sw->depth;
926
927 return sw->link == lookup->link && sw->depth == lookup->depth;
928}
929
930/**
931 * tb_switch_find_by_link_depth() - Find switch by link and depth
932 * @tb: Domain the switch belongs
933 * @link: Link number the switch is connected
934 * @depth: Depth of the switch in link
935 *
936 * Returned switch has reference count increased so the caller needs to
937 * call tb_switch_put() when done with the switch.
938 */
939struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
940{
941 struct tb_sw_lookup lookup;
942 struct device *dev;
943
944 memset(&lookup, 0, sizeof(lookup));
945 lookup.tb = tb;
946 lookup.link = link;
947 lookup.depth = depth;
948
949 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
950 if (dev)
951 return tb_to_switch(dev);
952
953 return NULL;
954}
955
956/**
957 * tb_switch_find_by_link_depth() - Find switch by UUID
958 * @tb: Domain the switch belongs
959 * @uuid: UUID to look for
960 *
961 * Returned switch has reference count increased so the caller needs to
962 * call tb_switch_put() when done with the switch.
963 */
964struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
965{
966 struct tb_sw_lookup lookup;
967 struct device *dev;
968
969 memset(&lookup, 0, sizeof(lookup));
970 lookup.tb = tb;
971 lookup.uuid = uuid;
972
973 dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
974 if (dev)
975 return tb_to_switch(dev);
976
977 return NULL;
978}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index ea9de49b5e10..ad2304bad592 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -7,6 +7,7 @@
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/errno.h> 8#include <linux/errno.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
10#include <linux/dmi.h>
10 11
11#include "tb.h" 12#include "tb.h"
12#include "tb_regs.h" 13#include "tb_regs.h"
@@ -71,6 +72,8 @@ static void tb_scan_port(struct tb_port *port)
71 return; 72 return;
72 } 73 }
73 74
75 sw->authorized = true;
76
74 if (tb_switch_add(sw)) { 77 if (tb_switch_add(sw)) {
75 tb_switch_put(sw); 78 tb_switch_put(sw);
76 return; 79 return;
@@ -443,10 +446,14 @@ struct tb *tb_probe(struct tb_nhi *nhi)
443 struct tb_cm *tcm; 446 struct tb_cm *tcm;
444 struct tb *tb; 447 struct tb *tb;
445 448
449 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
450 return NULL;
451
446 tb = tb_domain_alloc(nhi, sizeof(*tcm)); 452 tb = tb_domain_alloc(nhi, sizeof(*tcm));
447 if (!tb) 453 if (!tb)
448 return NULL; 454 return NULL;
449 455
456 tb->security_level = TB_SECURITY_NONE;
450 tb->cm_ops = &tb_cm_ops; 457 tb->cm_ops = &tb_cm_ops;
451 458
452 tcm = tb_priv(tb); 459 tcm = tb_priv(tb);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 31521c531715..a998b3a251d5 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -15,6 +15,24 @@
15#include "dma_port.h" 15#include "dma_port.h"
16 16
17/** 17/**
18 * enum tb_security_level - Thunderbolt security level
19 * @TB_SECURITY_NONE: No security, legacy mode
20 * @TB_SECURITY_USER: User approval required at minimum
21 * @TB_SECURITY_SECURE: One time saved key required at minimum
22 * @TB_SECURITY_DPONLY: Only tunnel Display port (and USB)
23 */
24enum tb_security_level {
25 TB_SECURITY_NONE,
26 TB_SECURITY_USER,
27 TB_SECURITY_SECURE,
28 TB_SECURITY_DPONLY,
29};
30
31#define TB_SWITCH_KEY_SIZE 32
32/* Each physical port contains 2 links on modern controllers */
33#define TB_SWITCH_LINKS_PER_PHY_PORT 2
34
35/**
18 * struct tb_switch - a thunderbolt switch 36 * struct tb_switch - a thunderbolt switch
19 * @dev: Device for the switch 37 * @dev: Device for the switch
20 * @config: Switch configuration 38 * @config: Switch configuration
@@ -33,6 +51,19 @@
33 * @cap_plug_events: Offset to the plug events capability (%0 if not found) 51 * @cap_plug_events: Offset to the plug events capability (%0 if not found)
34 * @is_unplugged: The switch is going away 52 * @is_unplugged: The switch is going away
35 * @drom: DROM of the switch (%NULL if not found) 53 * @drom: DROM of the switch (%NULL if not found)
54 * @authorized: Whether the switch is authorized by user or policy
55 * @work: Work used to automatically authorize a switch
56 * @security_level: Switch supported security level
57 * @key: Contains the key used to challenge the device or %NULL if not
58 * supported. Size of the key is %TB_SWITCH_KEY_SIZE.
59 * @connection_id: Connection ID used with ICM messaging
60 * @connection_key: Connection key used with ICM messaging
61 * @link: Root switch link this switch is connected (ICM only)
62 * @depth: Depth in the chain this switch is connected (ICM only)
63 *
64 * When the switch is being added or removed to the domain (other
65 * switches) you need to have domain lock held. For switch authorization
66 * internal switch_lock is enough.
36 */ 67 */
37struct tb_switch { 68struct tb_switch {
38 struct device dev; 69 struct device dev;
@@ -50,6 +81,14 @@ struct tb_switch {
50 int cap_plug_events; 81 int cap_plug_events;
51 bool is_unplugged; 82 bool is_unplugged;
52 u8 *drom; 83 u8 *drom;
84 unsigned int authorized;
85 struct work_struct work;
86 enum tb_security_level security_level;
87 u8 *key;
88 u8 connection_id;
89 u8 connection_key;
90 u8 link;
91 u8 depth;
53}; 92};
54 93
55/** 94/**
@@ -121,19 +160,33 @@ struct tb_path {
121 160
122/** 161/**
123 * struct tb_cm_ops - Connection manager specific operations vector 162 * struct tb_cm_ops - Connection manager specific operations vector
163 * @driver_ready: Called right after control channel is started. Used by
164 * ICM to send driver ready message to the firmware.
124 * @start: Starts the domain 165 * @start: Starts the domain
125 * @stop: Stops the domain 166 * @stop: Stops the domain
126 * @suspend_noirq: Connection manager specific suspend_noirq 167 * @suspend_noirq: Connection manager specific suspend_noirq
127 * @resume_noirq: Connection manager specific resume_noirq 168 * @resume_noirq: Connection manager specific resume_noirq
169 * @suspend: Connection manager specific suspend
170 * @complete: Connection manager specific complete
128 * @handle_event: Handle thunderbolt event 171 * @handle_event: Handle thunderbolt event
172 * @approve_switch: Approve switch
173 * @add_switch_key: Add key to switch
174 * @challenge_switch_key: Challenge switch using key
129 */ 175 */
130struct tb_cm_ops { 176struct tb_cm_ops {
177 int (*driver_ready)(struct tb *tb);
131 int (*start)(struct tb *tb); 178 int (*start)(struct tb *tb);
132 void (*stop)(struct tb *tb); 179 void (*stop)(struct tb *tb);
133 int (*suspend_noirq)(struct tb *tb); 180 int (*suspend_noirq)(struct tb *tb);
134 int (*resume_noirq)(struct tb *tb); 181 int (*resume_noirq)(struct tb *tb);
182 int (*suspend)(struct tb *tb);
183 void (*complete)(struct tb *tb);
135 void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type, 184 void (*handle_event)(struct tb *tb, enum tb_cfg_pkg_type,
136 const void *buf, size_t size); 185 const void *buf, size_t size);
186 int (*approve_switch)(struct tb *tb, struct tb_switch *sw);
187 int (*add_switch_key)(struct tb *tb, struct tb_switch *sw);
188 int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
189 const u8 *challenge, u8 *response);
137}; 190};
138 191
139/** 192/**
@@ -147,6 +200,7 @@ struct tb_cm_ops {
147 * @root_switch: Root switch of this domain 200 * @root_switch: Root switch of this domain
148 * @cm_ops: Connection manager specific operations vector 201 * @cm_ops: Connection manager specific operations vector
149 * @index: Linux assigned domain number 202 * @index: Linux assigned domain number
203 * @security_level: Current security level
150 * @privdata: Private connection manager specific data 204 * @privdata: Private connection manager specific data
151 */ 205 */
152struct tb { 206struct tb {
@@ -158,6 +212,7 @@ struct tb {
158 struct tb_switch *root_switch; 212 struct tb_switch *root_switch;
159 const struct tb_cm_ops *cm_ops; 213 const struct tb_cm_ops *cm_ops;
160 int index; 214 int index;
215 enum tb_security_level security_level;
161 unsigned long privdata[0]; 216 unsigned long privdata[0];
162}; 217};
163 218
@@ -188,6 +243,16 @@ static inline u64 tb_route(struct tb_switch *sw)
188 return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo; 243 return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo;
189} 244}
190 245
246static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw)
247{
248 u8 port;
249
250 port = route >> (sw->config.depth * 8);
251 if (WARN_ON(port > sw->config.max_port_number))
252 return NULL;
253 return &sw->ports[port];
254}
255
191static inline int tb_sw_read(struct tb_switch *sw, void *buffer, 256static inline int tb_sw_read(struct tb_switch *sw, void *buffer,
192 enum tb_cfg_space space, u32 offset, u32 length) 257 enum tb_cfg_space space, u32 offset, u32 length)
193{ 258{
@@ -266,6 +331,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
266#define tb_port_info(port, fmt, arg...) \ 331#define tb_port_info(port, fmt, arg...) \
267 __TB_PORT_PRINT(tb_info, port, fmt, ##arg) 332 __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
268 333
334struct tb *icm_probe(struct tb_nhi *nhi);
269struct tb *tb_probe(struct tb_nhi *nhi); 335struct tb *tb_probe(struct tb_nhi *nhi);
270 336
271extern struct bus_type tb_bus_type; 337extern struct bus_type tb_bus_type;
@@ -280,6 +346,11 @@ int tb_domain_add(struct tb *tb);
280void tb_domain_remove(struct tb *tb); 346void tb_domain_remove(struct tb *tb);
281int tb_domain_suspend_noirq(struct tb *tb); 347int tb_domain_suspend_noirq(struct tb *tb);
282int tb_domain_resume_noirq(struct tb *tb); 348int tb_domain_resume_noirq(struct tb *tb);
349int tb_domain_suspend(struct tb *tb);
350void tb_domain_complete(struct tb *tb);
351int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
352int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
353int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
283 354
284static inline void tb_domain_put(struct tb *tb) 355static inline void tb_domain_put(struct tb *tb)
285{ 356{
@@ -296,6 +367,14 @@ int tb_switch_resume(struct tb_switch *sw);
296int tb_switch_reset(struct tb *tb, u64 route); 367int tb_switch_reset(struct tb *tb, u64 route);
297void tb_sw_set_unplugged(struct tb_switch *sw); 368void tb_sw_set_unplugged(struct tb_switch *sw);
298struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); 369struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route);
370struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link,
371 u8 depth);
372struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid);
373
374static inline unsigned int tb_switch_phy_port_from_link(unsigned int link)
375{
376 return (link - 1) / TB_SWITCH_LINKS_PER_PHY_PORT;
377}
299 378
300static inline void tb_switch_put(struct tb_switch *sw) 379static inline void tb_switch_put(struct tb_switch *sw)
301{ 380{
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 761d56287149..85b6d33c0919 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -13,6 +13,7 @@
13#define _TB_MSGS 13#define _TB_MSGS
14 14
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/uuid.h>
16 17
17enum tb_cfg_pkg_type { 18enum tb_cfg_pkg_type {
18 TB_CFG_PKG_READ = 1, 19 TB_CFG_PKG_READ = 1,
@@ -24,6 +25,9 @@ enum tb_cfg_pkg_type {
24 TB_CFG_PKG_XDOMAIN_RESP = 7, 25 TB_CFG_PKG_XDOMAIN_RESP = 7,
25 TB_CFG_PKG_OVERRIDE = 8, 26 TB_CFG_PKG_OVERRIDE = 8,
26 TB_CFG_PKG_RESET = 9, 27 TB_CFG_PKG_RESET = 9,
28 TB_CFG_PKG_ICM_EVENT = 10,
29 TB_CFG_PKG_ICM_CMD = 11,
30 TB_CFG_PKG_ICM_RESP = 12,
27 TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd, 31 TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
28 32
29}; 33};
@@ -105,4 +109,152 @@ struct cfg_pts_pkg {
105 u32 data; 109 u32 data;
106} __packed; 110} __packed;
107 111
112/* ICM messages */
113
114enum icm_pkg_code {
115 ICM_GET_TOPOLOGY = 0x1,
116 ICM_DRIVER_READY = 0x3,
117 ICM_APPROVE_DEVICE = 0x4,
118 ICM_CHALLENGE_DEVICE = 0x5,
119 ICM_ADD_DEVICE_KEY = 0x6,
120 ICM_GET_ROUTE = 0xa,
121};
122
123enum icm_event_code {
124 ICM_EVENT_DEVICE_CONNECTED = 3,
125 ICM_EVENT_DEVICE_DISCONNECTED = 4,
126};
127
128struct icm_pkg_header {
129 u8 code;
130 u8 flags;
131 u8 packet_id;
132 u8 total_packets;
133} __packed;
134
135#define ICM_FLAGS_ERROR BIT(0)
136#define ICM_FLAGS_NO_KEY BIT(1)
137#define ICM_FLAGS_SLEVEL_SHIFT 3
138#define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3)
139
140struct icm_pkg_driver_ready {
141 struct icm_pkg_header hdr;
142} __packed;
143
144struct icm_pkg_driver_ready_response {
145 struct icm_pkg_header hdr;
146 u8 romver;
147 u8 ramver;
148 u16 security_level;
149} __packed;
150
151/* Falcon Ridge & Alpine Ridge common messages */
152
153struct icm_fr_pkg_get_topology {
154 struct icm_pkg_header hdr;
155} __packed;
156
157#define ICM_GET_TOPOLOGY_PACKETS 14
158
159struct icm_fr_pkg_get_topology_response {
160 struct icm_pkg_header hdr;
161 u32 route_lo;
162 u32 route_hi;
163 u8 first_data;
164 u8 second_data;
165 u8 drom_i2c_address_index;
166 u8 switch_index;
167 u32 reserved[2];
168 u32 ports[16];
169 u32 port_hop_info[16];
170} __packed;
171
172#define ICM_SWITCH_USED BIT(0)
173#define ICM_SWITCH_UPSTREAM_PORT_MASK GENMASK(7, 1)
174#define ICM_SWITCH_UPSTREAM_PORT_SHIFT 1
175
176#define ICM_PORT_TYPE_MASK GENMASK(23, 0)
177#define ICM_PORT_INDEX_SHIFT 24
178#define ICM_PORT_INDEX_MASK GENMASK(31, 24)
179
180struct icm_fr_event_device_connected {
181 struct icm_pkg_header hdr;
182 uuid_be ep_uuid;
183 u8 connection_key;
184 u8 connection_id;
185 u16 link_info;
186 u32 ep_name[55];
187} __packed;
188
189#define ICM_LINK_INFO_LINK_MASK 0x7
190#define ICM_LINK_INFO_DEPTH_SHIFT 4
191#define ICM_LINK_INFO_DEPTH_MASK GENMASK(7, 4)
192#define ICM_LINK_INFO_APPROVED BIT(8)
193
194struct icm_fr_pkg_approve_device {
195 struct icm_pkg_header hdr;
196 uuid_be ep_uuid;
197 u8 connection_key;
198 u8 connection_id;
199 u16 reserved;
200} __packed;
201
202struct icm_fr_event_device_disconnected {
203 struct icm_pkg_header hdr;
204 u16 reserved;
205 u16 link_info;
206} __packed;
207
208struct icm_fr_pkg_add_device_key {
209 struct icm_pkg_header hdr;
210 uuid_be ep_uuid;
211 u8 connection_key;
212 u8 connection_id;
213 u16 reserved;
214 u32 key[8];
215} __packed;
216
217struct icm_fr_pkg_add_device_key_response {
218 struct icm_pkg_header hdr;
219 uuid_be ep_uuid;
220 u8 connection_key;
221 u8 connection_id;
222 u16 reserved;
223} __packed;
224
225struct icm_fr_pkg_challenge_device {
226 struct icm_pkg_header hdr;
227 uuid_be ep_uuid;
228 u8 connection_key;
229 u8 connection_id;
230 u16 reserved;
231 u32 challenge[8];
232} __packed;
233
234struct icm_fr_pkg_challenge_device_response {
235 struct icm_pkg_header hdr;
236 uuid_be ep_uuid;
237 u8 connection_key;
238 u8 connection_id;
239 u16 reserved;
240 u32 challenge[8];
241 u32 response[8];
242} __packed;
243
244/* Alpine Ridge only messages */
245
246struct icm_ar_pkg_get_route {
247 struct icm_pkg_header hdr;
248 u16 reserved;
249 u16 link_info;
250} __packed;
251
252struct icm_ar_pkg_get_route_response {
253 struct icm_pkg_header hdr;
254 u16 reserved;
255 u16 link_info;
256 u32 route_hi;
257 u32 route_lo;
258} __packed;
259
108#endif 260#endif