aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2017-10-02 06:38:34 -0400
committerDavid S. Miller <davem@davemloft.net>2017-10-02 14:24:41 -0400
commitd1ff70241a275133e1a0258b7c23588b122276c8 (patch)
tree6a17afeb8c80542e7444d7e1530fd856dc7d0fc8
parente69b71f8458b78a2ef44e3d07374a8f46e45123d (diff)
thunderbolt: Add support for XDomain discovery protocol
When two hosts are connected over a Thunderbolt cable, there is a protocol they can use to communicate capabilities supported by the host. The discovery protocol uses automatically configured control channel (ring 0) and is build on top of request/response transactions using special XDomain primitives provided by the Thunderbolt base protocol. The capabilities consists of a root directory block of basic properties used for identification of the host, and then there can be zero or more directories each describing a Thunderbolt service and its capabilities. Once both sides have discovered what is supported the two hosts can setup high-speed DMA paths and transfer data to the other side using whatever protocol was agreed based on the properties. The software protocol used to communicate which DMA paths to enable is service specific. This patch adds support for the XDomain discovery protocol to the Thunderbolt bus. We model each remote host connection as a Linux XDomain device. For each Thunderbolt service found supported on the XDomain device, we create Linux Thunderbolt service device which Thunderbolt service drivers can then bind to based on the protocol identification information retrieved from the property directory describing the service. This code is based on the work done by Amir Levy and Michael Jamet. Signed-off-by: Michael Jamet <michael.jamet@intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/ABI/testing/sysfs-bus-thunderbolt48
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/ctl.c11
-rw-r--r--drivers/thunderbolt/ctl.h2
-rw-r--r--drivers/thunderbolt/domain.c197
-rw-r--r--drivers/thunderbolt/icm.c218
-rw-r--r--drivers/thunderbolt/nhi.h2
-rw-r--r--drivers/thunderbolt/switch.c7
-rw-r--r--drivers/thunderbolt/tb.h39
-rw-r--r--drivers/thunderbolt/tb_msgs.h123
-rw-r--r--drivers/thunderbolt/xdomain.c1576
-rw-r--r--include/linux/mod_devicetable.h26
-rw-r--r--include/linux/thunderbolt.h242
-rw-r--r--scripts/mod/devicetable-offsets.c7
-rw-r--r--scripts/mod/file2alias.c25
15 files changed, 2507 insertions, 18 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt
index 392bef5bd399..93798c02e28b 100644
--- a/Documentation/ABI/testing/sysfs-bus-thunderbolt
+++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt
@@ -110,3 +110,51 @@ Description: When new NVM image is written to the non-active NVM
110 is directly the status value from the DMA configuration 110 is directly the status value from the DMA configuration
111 based mailbox before the device is power cycled. Writing 111 based mailbox before the device is power cycled. Writing
112 0 here clears the status. 112 0 here clears the status.
113
114What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/key
115Date: Jan 2018
116KernelVersion: 4.15
117Contact: thunderbolt-software@lists.01.org
118Description: This contains name of the property directory the XDomain
119 service exposes. This entry describes the protocol in
120 question. Following directories are already reserved by
121 the Apple XDomain specification:
122
123 network: IP/ethernet over Thunderbolt
124 targetdm: Target disk mode protocol over Thunderbolt
125 extdisp: External display mode protocol over Thunderbolt
126
127What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/modalias
128Date: Jan 2018
129KernelVersion: 4.15
130Contact: thunderbolt-software@lists.01.org
131Description: Stores the same MODALIAS value emitted by uevent for
132 the XDomain service. Format: tbtsvc:kSpNvNrN
133
134What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/prtcid
135Date: Jan 2018
136KernelVersion: 4.15
137Contact: thunderbolt-software@lists.01.org
138Description: This contains XDomain protocol identifier the XDomain
139 service supports.
140
141What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/prtcvers
142Date: Jan 2018
143KernelVersion: 4.15
144Contact: thunderbolt-software@lists.01.org
145Description: This contains XDomain protocol version the XDomain
146 service supports.
147
148What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/prtcrevs
149Date: Jan 2018
150KernelVersion: 4.15
151Contact: thunderbolt-software@lists.01.org
152Description: This contains XDomain software version the XDomain
153 service supports.
154
155What: /sys/bus/thunderbolt/devices/<xdomain>.<service>/prtcstns
156Date: Jan 2018
157KernelVersion: 4.15
158Contact: thunderbolt-software@lists.01.org
159Description: This contains XDomain service specific settings as
160 bitmask. Format: %x
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 7afd21f5383a..f2f0de27252b 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o 1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o 2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
3thunderbolt-objs += domain.o dma_port.o icm.o property.o 3thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index e6a4c9458c76..46e393c5fd1d 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -368,10 +368,10 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
368/** 368/**
369 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback 369 * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
370 */ 370 */
371static void tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, 371static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
372 struct ctl_pkg *pkg, size_t size) 372 struct ctl_pkg *pkg, size_t size)
373{ 373{
374 ctl->callback(ctl->callback_data, type, pkg->buffer, size); 374 return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
375} 375}
376 376
377static void tb_ctl_rx_submit(struct ctl_pkg *pkg) 377static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
@@ -444,6 +444,8 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
444 break; 444 break;
445 445
446 case TB_CFG_PKG_EVENT: 446 case TB_CFG_PKG_EVENT:
447 case TB_CFG_PKG_XDOMAIN_RESP:
448 case TB_CFG_PKG_XDOMAIN_REQ:
447 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) { 449 if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
448 tb_ctl_err(pkg->ctl, 450 tb_ctl_err(pkg->ctl,
449 "RX: checksum mismatch, dropping packet\n"); 451 "RX: checksum mismatch, dropping packet\n");
@@ -451,8 +453,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
451 } 453 }
452 /* Fall through */ 454 /* Fall through */
453 case TB_CFG_PKG_ICM_EVENT: 455 case TB_CFG_PKG_ICM_EVENT:
454 tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size); 456 if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
455 goto rx; 457 goto rx;
458 break;
456 459
457 default: 460 default:
458 break; 461 break;
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index d0f21e1e0b8b..85c49dd301ea 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -16,7 +16,7 @@
16/* control channel */ 16/* control channel */
17struct tb_ctl; 17struct tb_ctl;
18 18
19typedef void (*event_cb)(void *data, enum tb_cfg_pkg_type type, 19typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
20 const void *buf, size_t size); 20 const void *buf, size_t size);
21 21
22struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data); 22struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 9f2dcd48974d..9b90115319ce 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -20,6 +20,98 @@
20 20
21static DEFINE_IDA(tb_domain_ida); 21static DEFINE_IDA(tb_domain_ida);
22 22
23static bool match_service_id(const struct tb_service_id *id,
24 const struct tb_service *svc)
25{
26 if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
27 if (strcmp(id->protocol_key, svc->key))
28 return false;
29 }
30
31 if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
32 if (id->protocol_id != svc->prtcid)
33 return false;
34 }
35
36 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
37 if (id->protocol_version != svc->prtcvers)
38 return false;
39 }
40
41 if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
42 if (id->protocol_revision != svc->prtcrevs)
43 return false;
44 }
45
46 return true;
47}
48
49static const struct tb_service_id *__tb_service_match(struct device *dev,
50 struct device_driver *drv)
51{
52 struct tb_service_driver *driver;
53 const struct tb_service_id *ids;
54 struct tb_service *svc;
55
56 svc = tb_to_service(dev);
57 if (!svc)
58 return NULL;
59
60 driver = container_of(drv, struct tb_service_driver, driver);
61 if (!driver->id_table)
62 return NULL;
63
64 for (ids = driver->id_table; ids->match_flags != 0; ids++) {
65 if (match_service_id(ids, svc))
66 return ids;
67 }
68
69 return NULL;
70}
71
72static int tb_service_match(struct device *dev, struct device_driver *drv)
73{
74 return !!__tb_service_match(dev, drv);
75}
76
77static int tb_service_probe(struct device *dev)
78{
79 struct tb_service *svc = tb_to_service(dev);
80 struct tb_service_driver *driver;
81 const struct tb_service_id *id;
82
83 driver = container_of(dev->driver, struct tb_service_driver, driver);
84 id = __tb_service_match(dev, &driver->driver);
85
86 return driver->probe(svc, id);
87}
88
89static int tb_service_remove(struct device *dev)
90{
91 struct tb_service *svc = tb_to_service(dev);
92 struct tb_service_driver *driver;
93
94 driver = container_of(dev->driver, struct tb_service_driver, driver);
95 if (driver->remove)
96 driver->remove(svc);
97
98 return 0;
99}
100
101static void tb_service_shutdown(struct device *dev)
102{
103 struct tb_service_driver *driver;
104 struct tb_service *svc;
105
106 svc = tb_to_service(dev);
107 if (!svc || !dev->driver)
108 return;
109
110 driver = container_of(dev->driver, struct tb_service_driver, driver);
111 if (driver->shutdown)
112 driver->shutdown(svc);
113}
114
23static const char * const tb_security_names[] = { 115static const char * const tb_security_names[] = {
24 [TB_SECURITY_NONE] = "none", 116 [TB_SECURITY_NONE] = "none",
25 [TB_SECURITY_USER] = "user", 117 [TB_SECURITY_USER] = "user",
@@ -52,6 +144,10 @@ static const struct attribute_group *domain_attr_groups[] = {
52 144
53struct bus_type tb_bus_type = { 145struct bus_type tb_bus_type = {
54 .name = "thunderbolt", 146 .name = "thunderbolt",
147 .match = tb_service_match,
148 .probe = tb_service_probe,
149 .remove = tb_service_remove,
150 .shutdown = tb_service_shutdown,
55}; 151};
56 152
57static void tb_domain_release(struct device *dev) 153static void tb_domain_release(struct device *dev)
@@ -128,17 +224,26 @@ err_free:
128 return NULL; 224 return NULL;
129} 225}
130 226
131static void tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type, 227static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
132 const void *buf, size_t size) 228 const void *buf, size_t size)
133{ 229{
134 struct tb *tb = data; 230 struct tb *tb = data;
135 231
136 if (!tb->cm_ops->handle_event) { 232 if (!tb->cm_ops->handle_event) {
137 tb_warn(tb, "domain does not have event handler\n"); 233 tb_warn(tb, "domain does not have event handler\n");
138 return; 234 return true;
139 } 235 }
140 236
141 tb->cm_ops->handle_event(tb, type, buf, size); 237 switch (type) {
238 case TB_CFG_PKG_XDOMAIN_REQ:
239 case TB_CFG_PKG_XDOMAIN_RESP:
240 return tb_xdomain_handle_request(tb, type, buf, size);
241
242 default:
243 tb->cm_ops->handle_event(tb, type, buf, size);
244 }
245
246 return true;
142} 247}
143 248
144/** 249/**
@@ -443,9 +548,92 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb)
443 return tb->cm_ops->disconnect_pcie_paths(tb); 548 return tb->cm_ops->disconnect_pcie_paths(tb);
444} 549}
445 550
551/**
552 * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
553 * @tb: Domain enabling the DMA paths
554 * @xd: XDomain DMA paths are created to
555 *
556 * Calls connection manager specific method to enable DMA paths to the
557 * XDomain in question.
558 *
559 * Return: 0% in case of success and negative errno otherwise. In
560 * particular returns %-ENOTSUPP if the connection manager
561 * implementation does not support XDomains.
562 */
563int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
564{
565 if (!tb->cm_ops->approve_xdomain_paths)
566 return -ENOTSUPP;
567
568 return tb->cm_ops->approve_xdomain_paths(tb, xd);
569}
570
571/**
572 * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
573 * @tb: Domain disabling the DMA paths
574 * @xd: XDomain whose DMA paths are disconnected
575 *
576 * Calls connection manager specific method to disconnect DMA paths to
577 * the XDomain in question.
578 *
579 * Return: 0% in case of success and negative errno otherwise. In
580 * particular returns %-ENOTSUPP if the connection manager
581 * implementation does not support XDomains.
582 */
583int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
584{
585 if (!tb->cm_ops->disconnect_xdomain_paths)
586 return -ENOTSUPP;
587
588 return tb->cm_ops->disconnect_xdomain_paths(tb, xd);
589}
590
591static int disconnect_xdomain(struct device *dev, void *data)
592{
593 struct tb_xdomain *xd;
594 struct tb *tb = data;
595 int ret = 0;
596
597 xd = tb_to_xdomain(dev);
598 if (xd && xd->tb == tb)
599 ret = tb_xdomain_disable_paths(xd);
600
601 return ret;
602}
603
604/**
605 * tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
606 * @tb: Domain whose paths are disconnected
607 *
608 * This function can be used to disconnect all paths (PCIe, XDomain) for
609 * example in preparation for host NVM firmware upgrade. After this is
610 * called the paths cannot be established without resetting the switch.
611 *
612 * Return: %0 in case of success and negative errno otherwise.
613 */
614int tb_domain_disconnect_all_paths(struct tb *tb)
615{
616 int ret;
617
618 ret = tb_domain_disconnect_pcie_paths(tb);
619 if (ret)
620 return ret;
621
622 return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
623}
624
446int tb_domain_init(void) 625int tb_domain_init(void)
447{ 626{
448 return bus_register(&tb_bus_type); 627 int ret;
628
629 ret = tb_xdomain_init();
630 if (ret)
631 return ret;
632 ret = bus_register(&tb_bus_type);
633 if (ret)
634 tb_xdomain_exit();
635
636 return ret;
449} 637}
450 638
451void tb_domain_exit(void) 639void tb_domain_exit(void)
@@ -453,4 +641,5 @@ void tb_domain_exit(void)
453 bus_unregister(&tb_bus_type); 641 bus_unregister(&tb_bus_type);
454 ida_destroy(&tb_domain_ida); 642 ida_destroy(&tb_domain_ida);
455 tb_switch_exit(); 643 tb_switch_exit();
644 tb_xdomain_exit();
456} 645}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 8c22b91ed040..ab02d13f40b7 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -60,6 +60,8 @@
60 * @get_route: Find a route string for given switch 60 * @get_route: Find a route string for given switch
61 * @device_connected: Handle device connected ICM message 61 * @device_connected: Handle device connected ICM message
62 * @device_disconnected: Handle device disconnected ICM message 62 * @device_disconnected: Handle device disconnected ICM message
63 * @xdomain_connected - Handle XDomain connected ICM message
64 * @xdomain_disconnected - Handle XDomain disconnected ICM message
63 */ 65 */
64struct icm { 66struct icm {
65 struct mutex request_lock; 67 struct mutex request_lock;
@@ -74,6 +76,10 @@ struct icm {
74 const struct icm_pkg_header *hdr); 76 const struct icm_pkg_header *hdr);
75 void (*device_disconnected)(struct tb *tb, 77 void (*device_disconnected)(struct tb *tb,
76 const struct icm_pkg_header *hdr); 78 const struct icm_pkg_header *hdr);
79 void (*xdomain_connected)(struct tb *tb,
80 const struct icm_pkg_header *hdr);
81 void (*xdomain_disconnected)(struct tb *tb,
82 const struct icm_pkg_header *hdr);
77}; 83};
78 84
79struct icm_notification { 85struct icm_notification {
@@ -89,7 +95,10 @@ static inline struct tb *icm_to_tb(struct icm *icm)
89 95
90static inline u8 phy_port_from_route(u64 route, u8 depth) 96static inline u8 phy_port_from_route(u64 route, u8 depth)
91{ 97{
92 return tb_phy_port_from_link(route >> ((depth - 1) * 8)); 98 u8 link;
99
100 link = depth ? route >> ((depth - 1) * 8) : route;
101 return tb_phy_port_from_link(link);
93} 102}
94 103
95static inline u8 dual_link_from_link(u8 link) 104static inline u8 dual_link_from_link(u8 link)
@@ -320,6 +329,51 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
320 return 0; 329 return 0;
321} 330}
322 331
332static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
333{
334 struct icm_fr_pkg_approve_xdomain_response reply;
335 struct icm_fr_pkg_approve_xdomain request;
336 int ret;
337
338 memset(&request, 0, sizeof(request));
339 request.hdr.code = ICM_APPROVE_XDOMAIN;
340 request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
341 memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
342
343 request.transmit_path = xd->transmit_path;
344 request.transmit_ring = xd->transmit_ring;
345 request.receive_path = xd->receive_path;
346 request.receive_ring = xd->receive_ring;
347
348 memset(&reply, 0, sizeof(reply));
349 ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
350 1, ICM_TIMEOUT);
351 if (ret)
352 return ret;
353
354 if (reply.hdr.flags & ICM_FLAGS_ERROR)
355 return -EIO;
356
357 return 0;
358}
359
360static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
361{
362 u8 phy_port;
363 u8 cmd;
364
365 phy_port = tb_phy_port_from_link(xd->link);
366 if (phy_port == 0)
367 cmd = NHI_MAILBOX_DISCONNECT_PA;
368 else
369 cmd = NHI_MAILBOX_DISCONNECT_PB;
370
371 nhi_mailbox_cmd(tb->nhi, cmd, 1);
372 usleep_range(10, 50);
373 nhi_mailbox_cmd(tb->nhi, cmd, 2);
374 return 0;
375}
376
323static void remove_switch(struct tb_switch *sw) 377static void remove_switch(struct tb_switch *sw)
324{ 378{
325 struct tb_switch *parent_sw; 379 struct tb_switch *parent_sw;
@@ -475,6 +529,141 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
475 tb_switch_put(sw); 529 tb_switch_put(sw);
476} 530}
477 531
532static void remove_xdomain(struct tb_xdomain *xd)
533{
534 struct tb_switch *sw;
535
536 sw = tb_to_switch(xd->dev.parent);
537 tb_port_at(xd->route, sw)->xdomain = NULL;
538 tb_xdomain_remove(xd);
539}
540
541static void
542icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
543{
544 const struct icm_fr_event_xdomain_connected *pkg =
545 (const struct icm_fr_event_xdomain_connected *)hdr;
546 struct tb_xdomain *xd;
547 struct tb_switch *sw;
548 u8 link, depth;
549 bool approved;
550 u64 route;
551
552 /*
553 * After NVM upgrade adding root switch device fails because we
554 * initiated reset. During that time ICM might still send
555 * XDomain connected message which we ignore here.
556 */
557 if (!tb->root_switch)
558 return;
559
560 link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
561 depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
562 ICM_LINK_INFO_DEPTH_SHIFT;
563 approved = pkg->link_info & ICM_LINK_INFO_APPROVED;
564
565 if (link > ICM_MAX_LINK || depth > ICM_MAX_DEPTH) {
566 tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
567 return;
568 }
569
570 route = get_route(pkg->local_route_hi, pkg->local_route_lo);
571
572 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
573 if (xd) {
574 u8 xd_phy_port, phy_port;
575
576 xd_phy_port = phy_port_from_route(xd->route, xd->depth);
577 phy_port = phy_port_from_route(route, depth);
578
579 if (xd->depth == depth && xd_phy_port == phy_port) {
580 xd->link = link;
581 xd->route = route;
582 xd->is_unplugged = false;
583 tb_xdomain_put(xd);
584 return;
585 }
586
587 /*
588 * If we find an existing XDomain connection remove it
589 * now. We need to go through login handshake and
590 * everything anyway to be able to re-establish the
591 * connection.
592 */
593 remove_xdomain(xd);
594 tb_xdomain_put(xd);
595 }
596
597 /*
598 * Look if there already exists an XDomain in the same place
599 * than the new one and in that case remove it because it is
600 * most likely another host that got disconnected.
601 */
602 xd = tb_xdomain_find_by_link_depth(tb, link, depth);
603 if (!xd) {
604 u8 dual_link;
605
606 dual_link = dual_link_from_link(link);
607 if (dual_link)
608 xd = tb_xdomain_find_by_link_depth(tb, dual_link,
609 depth);
610 }
611 if (xd) {
612 remove_xdomain(xd);
613 tb_xdomain_put(xd);
614 }
615
616 /*
617 * If the user disconnected a switch during suspend and
618 * connected another host to the same port, remove the switch
619 * first.
620 */
621 sw = get_switch_at_route(tb->root_switch, route);
622 if (sw)
623 remove_switch(sw);
624
625 sw = tb_switch_find_by_link_depth(tb, link, depth);
626 if (!sw) {
627 tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
628 depth);
629 return;
630 }
631
632 xd = tb_xdomain_alloc(sw->tb, &sw->dev, route,
633 &pkg->local_uuid, &pkg->remote_uuid);
634 if (!xd) {
635 tb_switch_put(sw);
636 return;
637 }
638
639 xd->link = link;
640 xd->depth = depth;
641
642 tb_port_at(route, sw)->xdomain = xd;
643
644 tb_xdomain_add(xd);
645 tb_switch_put(sw);
646}
647
648static void
649icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
650{
651 const struct icm_fr_event_xdomain_disconnected *pkg =
652 (const struct icm_fr_event_xdomain_disconnected *)hdr;
653 struct tb_xdomain *xd;
654
655 /*
656 * If the connection is through one or multiple devices, the
657 * XDomain device is removed along with them so it is fine if we
658 * cannot find it here.
659 */
660 xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
661 if (xd) {
662 remove_xdomain(xd);
663 tb_xdomain_put(xd);
664 }
665}
666
478static struct pci_dev *get_upstream_port(struct pci_dev *pdev) 667static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
479{ 668{
480 struct pci_dev *parent; 669 struct pci_dev *parent;
@@ -594,6 +783,12 @@ static void icm_handle_notification(struct work_struct *work)
594 case ICM_EVENT_DEVICE_DISCONNECTED: 783 case ICM_EVENT_DEVICE_DISCONNECTED:
595 icm->device_disconnected(tb, n->pkg); 784 icm->device_disconnected(tb, n->pkg);
596 break; 785 break;
786 case ICM_EVENT_XDOMAIN_CONNECTED:
787 icm->xdomain_connected(tb, n->pkg);
788 break;
789 case ICM_EVENT_XDOMAIN_DISCONNECTED:
790 icm->xdomain_disconnected(tb, n->pkg);
791 break;
597 } 792 }
598 793
599 mutex_unlock(&tb->lock); 794 mutex_unlock(&tb->lock);
@@ -927,6 +1122,10 @@ static void icm_unplug_children(struct tb_switch *sw)
927 1122
928 if (tb_is_upstream_port(port)) 1123 if (tb_is_upstream_port(port))
929 continue; 1124 continue;
1125 if (port->xdomain) {
1126 port->xdomain->is_unplugged = true;
1127 continue;
1128 }
930 if (!port->remote) 1129 if (!port->remote)
931 continue; 1130 continue;
932 1131
@@ -943,6 +1142,13 @@ static void icm_free_unplugged_children(struct tb_switch *sw)
943 1142
944 if (tb_is_upstream_port(port)) 1143 if (tb_is_upstream_port(port))
945 continue; 1144 continue;
1145
1146 if (port->xdomain && port->xdomain->is_unplugged) {
1147 tb_xdomain_remove(port->xdomain);
1148 port->xdomain = NULL;
1149 continue;
1150 }
1151
946 if (!port->remote) 1152 if (!port->remote)
947 continue; 1153 continue;
948 1154
@@ -1009,8 +1215,10 @@ static int icm_start(struct tb *tb)
1009 tb->root_switch->no_nvm_upgrade = x86_apple_machine; 1215 tb->root_switch->no_nvm_upgrade = x86_apple_machine;
1010 1216
1011 ret = tb_switch_add(tb->root_switch); 1217 ret = tb_switch_add(tb->root_switch);
1012 if (ret) 1218 if (ret) {
1013 tb_switch_put(tb->root_switch); 1219 tb_switch_put(tb->root_switch);
1220 tb->root_switch = NULL;
1221 }
1014 1222
1015 return ret; 1223 return ret;
1016} 1224}
@@ -1042,6 +1250,8 @@ static const struct tb_cm_ops icm_fr_ops = {
1042 .add_switch_key = icm_fr_add_switch_key, 1250 .add_switch_key = icm_fr_add_switch_key,
1043 .challenge_switch_key = icm_fr_challenge_switch_key, 1251 .challenge_switch_key = icm_fr_challenge_switch_key,
1044 .disconnect_pcie_paths = icm_disconnect_pcie_paths, 1252 .disconnect_pcie_paths = icm_disconnect_pcie_paths,
1253 .approve_xdomain_paths = icm_fr_approve_xdomain_paths,
1254 .disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
1045}; 1255};
1046 1256
1047struct tb *icm_probe(struct tb_nhi *nhi) 1257struct tb *icm_probe(struct tb_nhi *nhi)
@@ -1064,6 +1274,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
1064 icm->get_route = icm_fr_get_route; 1274 icm->get_route = icm_fr_get_route;
1065 icm->device_connected = icm_fr_device_connected; 1275 icm->device_connected = icm_fr_device_connected;
1066 icm->device_disconnected = icm_fr_device_disconnected; 1276 icm->device_disconnected = icm_fr_device_disconnected;
1277 icm->xdomain_connected = icm_fr_xdomain_connected;
1278 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1067 tb->cm_ops = &icm_fr_ops; 1279 tb->cm_ops = &icm_fr_ops;
1068 break; 1280 break;
1069 1281
@@ -1077,6 +1289,8 @@ struct tb *icm_probe(struct tb_nhi *nhi)
1077 icm->get_route = icm_ar_get_route; 1289 icm->get_route = icm_ar_get_route;
1078 icm->device_connected = icm_fr_device_connected; 1290 icm->device_connected = icm_fr_device_connected;
1079 icm->device_disconnected = icm_fr_device_disconnected; 1291 icm->device_disconnected = icm_fr_device_disconnected;
1292 icm->xdomain_connected = icm_fr_xdomain_connected;
1293 icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
1080 tb->cm_ops = &icm_fr_ops; 1294 tb->cm_ops = &icm_fr_ops;
1081 break; 1295 break;
1082 } 1296 }
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 5b5bb2c436be..0e05828983db 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -157,6 +157,8 @@ enum nhi_mailbox_cmd {
157 NHI_MAILBOX_SAVE_DEVS = 0x05, 157 NHI_MAILBOX_SAVE_DEVS = 0x05,
158 NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06, 158 NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06,
159 NHI_MAILBOX_DRV_UNLOADS = 0x07, 159 NHI_MAILBOX_DRV_UNLOADS = 0x07,
160 NHI_MAILBOX_DISCONNECT_PA = 0x10,
161 NHI_MAILBOX_DISCONNECT_PB = 0x11,
160 NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23, 162 NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23,
161}; 163};
162 164
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 53f40c57df59..dfc357d33e1e 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -171,11 +171,11 @@ static int nvm_authenticate_host(struct tb_switch *sw)
171 171
172 /* 172 /*
173 * Root switch NVM upgrade requires that we disconnect the 173 * Root switch NVM upgrade requires that we disconnect the
174 * existing PCIe paths first (in case it is not in safe mode 174 * existing paths first (in case it is not in safe mode
175 * already). 175 * already).
176 */ 176 */
177 if (!sw->safe_mode) { 177 if (!sw->safe_mode) {
178 ret = tb_domain_disconnect_pcie_paths(sw->tb); 178 ret = tb_domain_disconnect_all_paths(sw->tb);
179 if (ret) 179 if (ret)
180 return ret; 180 return ret;
181 /* 181 /*
@@ -1363,6 +1363,9 @@ void tb_switch_remove(struct tb_switch *sw)
1363 if (sw->ports[i].remote) 1363 if (sw->ports[i].remote)
1364 tb_switch_remove(sw->ports[i].remote->sw); 1364 tb_switch_remove(sw->ports[i].remote->sw);
1365 sw->ports[i].remote = NULL; 1365 sw->ports[i].remote = NULL;
1366 if (sw->ports[i].xdomain)
1367 tb_xdomain_remove(sw->ports[i].xdomain);
1368 sw->ports[i].xdomain = NULL;
1366 } 1369 }
1367 1370
1368 if (!sw->is_unplugged) 1371 if (!sw->is_unplugged)
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index ea21d927bd09..74af9d4929ab 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -9,6 +9,7 @@
9 9
10#include <linux/nvmem-provider.h> 10#include <linux/nvmem-provider.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/thunderbolt.h>
12#include <linux/uuid.h> 13#include <linux/uuid.h>
13 14
14#include "tb_regs.h" 15#include "tb_regs.h"
@@ -109,14 +110,25 @@ struct tb_switch {
109 110
110/** 111/**
111 * struct tb_port - a thunderbolt port, part of a tb_switch 112 * struct tb_port - a thunderbolt port, part of a tb_switch
113 * @config: Cached port configuration read from registers
114 * @sw: Switch the port belongs to
115 * @remote: Remote port (%NULL if not connected)
116 * @xdomain: Remote host (%NULL if not connected)
117 * @cap_phy: Offset, zero if not found
118 * @port: Port number on switch
119 * @disabled: Disabled by eeprom
120 * @dual_link_port: If the switch is connected using two ports, points
121 * to the other port.
122 * @link_nr: Is this primary or secondary port on the dual_link.
112 */ 123 */
113struct tb_port { 124struct tb_port {
114 struct tb_regs_port_header config; 125 struct tb_regs_port_header config;
115 struct tb_switch *sw; 126 struct tb_switch *sw;
116 struct tb_port *remote; /* remote port, NULL if not connected */ 127 struct tb_port *remote;
117 int cap_phy; /* offset, zero if not found */ 128 struct tb_xdomain *xdomain;
118 u8 port; /* port number on switch */ 129 int cap_phy;
119 bool disabled; /* disabled by eeprom */ 130 u8 port;
131 bool disabled;
120 struct tb_port *dual_link_port; 132 struct tb_port *dual_link_port;
121 u8 link_nr:1; 133 u8 link_nr:1;
122}; 134};
@@ -189,6 +201,8 @@ struct tb_path {
189 * @add_switch_key: Add key to switch 201 * @add_switch_key: Add key to switch
190 * @challenge_switch_key: Challenge switch using key 202 * @challenge_switch_key: Challenge switch using key
191 * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update 203 * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update
204 * @approve_xdomain_paths: Approve (establish) XDomain DMA paths
205 * @disconnect_xdomain_paths: Disconnect XDomain DMA paths
192 */ 206 */
193struct tb_cm_ops { 207struct tb_cm_ops {
194 int (*driver_ready)(struct tb *tb); 208 int (*driver_ready)(struct tb *tb);
@@ -205,6 +219,8 @@ struct tb_cm_ops {
205 int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, 219 int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw,
206 const u8 *challenge, u8 *response); 220 const u8 *challenge, u8 *response);
207 int (*disconnect_pcie_paths)(struct tb *tb); 221 int (*disconnect_pcie_paths)(struct tb *tb);
222 int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
223 int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd);
208}; 224};
209 225
210static inline void *tb_priv(struct tb *tb) 226static inline void *tb_priv(struct tb *tb)
@@ -331,6 +347,8 @@ extern struct device_type tb_switch_type;
331int tb_domain_init(void); 347int tb_domain_init(void);
332void tb_domain_exit(void); 348void tb_domain_exit(void);
333void tb_switch_exit(void); 349void tb_switch_exit(void);
350int tb_xdomain_init(void);
351void tb_xdomain_exit(void);
334 352
335struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize); 353struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
336int tb_domain_add(struct tb *tb); 354int tb_domain_add(struct tb *tb);
@@ -343,6 +361,9 @@ int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw);
343int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); 361int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw);
344int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); 362int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw);
345int tb_domain_disconnect_pcie_paths(struct tb *tb); 363int tb_domain_disconnect_pcie_paths(struct tb *tb);
364int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
365int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd);
366int tb_domain_disconnect_all_paths(struct tb *tb);
346 367
347static inline void tb_domain_put(struct tb *tb) 368static inline void tb_domain_put(struct tb *tb)
348{ 369{
@@ -422,4 +443,14 @@ static inline u64 tb_downstream_route(struct tb_port *port)
422 | ((u64) port->port << (port->sw->config.depth * 8)); 443 | ((u64) port->port << (port->sw->config.depth * 8));
423} 444}
424 445
446bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
447 const void *buf, size_t size);
448struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
449 u64 route, const uuid_t *local_uuid,
450 const uuid_t *remote_uuid);
451void tb_xdomain_add(struct tb_xdomain *xd);
452void tb_xdomain_remove(struct tb_xdomain *xd);
453struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
454 u8 depth);
455
425#endif 456#endif
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index f2b2550cd97c..b0a092baa605 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -101,11 +101,14 @@ enum icm_pkg_code {
101 ICM_CHALLENGE_DEVICE = 0x5, 101 ICM_CHALLENGE_DEVICE = 0x5,
102 ICM_ADD_DEVICE_KEY = 0x6, 102 ICM_ADD_DEVICE_KEY = 0x6,
103 ICM_GET_ROUTE = 0xa, 103 ICM_GET_ROUTE = 0xa,
104 ICM_APPROVE_XDOMAIN = 0x10,
104}; 105};
105 106
106enum icm_event_code { 107enum icm_event_code {
107 ICM_EVENT_DEVICE_CONNECTED = 3, 108 ICM_EVENT_DEVICE_CONNECTED = 3,
108 ICM_EVENT_DEVICE_DISCONNECTED = 4, 109 ICM_EVENT_DEVICE_DISCONNECTED = 4,
110 ICM_EVENT_XDOMAIN_CONNECTED = 6,
111 ICM_EVENT_XDOMAIN_DISCONNECTED = 7,
109}; 112};
110 113
111struct icm_pkg_header { 114struct icm_pkg_header {
@@ -188,6 +191,25 @@ struct icm_fr_event_device_disconnected {
188 u16 link_info; 191 u16 link_info;
189}; 192};
190 193
194struct icm_fr_event_xdomain_connected {
195 struct icm_pkg_header hdr;
196 u16 reserved;
197 u16 link_info;
198 uuid_t remote_uuid;
199 uuid_t local_uuid;
200 u32 local_route_hi;
201 u32 local_route_lo;
202 u32 remote_route_hi;
203 u32 remote_route_lo;
204};
205
206struct icm_fr_event_xdomain_disconnected {
207 struct icm_pkg_header hdr;
208 u16 reserved;
209 u16 link_info;
210 uuid_t remote_uuid;
211};
212
191struct icm_fr_pkg_add_device_key { 213struct icm_fr_pkg_add_device_key {
192 struct icm_pkg_header hdr; 214 struct icm_pkg_header hdr;
193 uuid_t ep_uuid; 215 uuid_t ep_uuid;
@@ -224,6 +246,28 @@ struct icm_fr_pkg_challenge_device_response {
224 u32 response[8]; 246 u32 response[8];
225}; 247};
226 248
249struct icm_fr_pkg_approve_xdomain {
250 struct icm_pkg_header hdr;
251 u16 reserved;
252 u16 link_info;
253 uuid_t remote_uuid;
254 u16 transmit_path;
255 u16 transmit_ring;
256 u16 receive_path;
257 u16 receive_ring;
258};
259
260struct icm_fr_pkg_approve_xdomain_response {
261 struct icm_pkg_header hdr;
262 u16 reserved;
263 u16 link_info;
264 uuid_t remote_uuid;
265 u16 transmit_path;
266 u16 transmit_ring;
267 u16 receive_path;
268 u16 receive_ring;
269};
270
227/* Alpine Ridge only messages */ 271/* Alpine Ridge only messages */
228 272
229struct icm_ar_pkg_get_route { 273struct icm_ar_pkg_get_route {
@@ -240,4 +284,83 @@ struct icm_ar_pkg_get_route_response {
240 u32 route_lo; 284 u32 route_lo;
241}; 285};
242 286
287/* XDomain messages */
288
289struct tb_xdomain_header {
290 u32 route_hi;
291 u32 route_lo;
292 u32 length_sn;
293};
294
295#define TB_XDOMAIN_LENGTH_MASK GENMASK(5, 0)
296#define TB_XDOMAIN_SN_MASK GENMASK(28, 27)
297#define TB_XDOMAIN_SN_SHIFT 27
298
299enum tb_xdp_type {
300 UUID_REQUEST_OLD = 1,
301 UUID_RESPONSE = 2,
302 PROPERTIES_REQUEST,
303 PROPERTIES_RESPONSE,
304 PROPERTIES_CHANGED_REQUEST,
305 PROPERTIES_CHANGED_RESPONSE,
306 ERROR_RESPONSE,
307 UUID_REQUEST = 12,
308};
309
310struct tb_xdp_header {
311 struct tb_xdomain_header xd_hdr;
312 uuid_t uuid;
313 u32 type;
314};
315
316struct tb_xdp_properties {
317 struct tb_xdp_header hdr;
318 uuid_t src_uuid;
319 uuid_t dst_uuid;
320 u16 offset;
321 u16 reserved;
322};
323
324struct tb_xdp_properties_response {
325 struct tb_xdp_header hdr;
326 uuid_t src_uuid;
327 uuid_t dst_uuid;
328 u16 offset;
329 u16 data_length;
330 u32 generation;
331 u32 data[0];
332};
333
334/*
335 * Max length of data array single XDomain property response is allowed
336 * to carry.
337 */
338#define TB_XDP_PROPERTIES_MAX_DATA_LENGTH \
339 (((256 - 4 - sizeof(struct tb_xdp_properties_response))) / 4)
340
341/* Maximum size of the total property block in dwords we allow */
342#define TB_XDP_PROPERTIES_MAX_LENGTH 500
343
344struct tb_xdp_properties_changed {
345 struct tb_xdp_header hdr;
346 uuid_t src_uuid;
347};
348
349struct tb_xdp_properties_changed_response {
350 struct tb_xdp_header hdr;
351};
352
353enum tb_xdp_error {
354 ERROR_SUCCESS,
355 ERROR_UNKNOWN_PACKET,
356 ERROR_UNKNOWN_DOMAIN,
357 ERROR_NOT_SUPPORTED,
358 ERROR_NOT_READY,
359};
360
361struct tb_xdp_error_response {
362 struct tb_xdp_header hdr;
363 u32 error;
364};
365
243#endif 366#endif
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
new file mode 100644
index 000000000000..f2d06f6f7be9
--- /dev/null
+++ b/drivers/thunderbolt/xdomain.c
@@ -0,0 +1,1576 @@
1/*
2 * Thunderbolt XDomain discovery protocol support
3 *
4 * Copyright (C) 2017, Intel Corporation
5 * Authors: Michael Jamet <michael.jamet@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/device.h>
14#include <linux/kmod.h>
15#include <linux/module.h>
16#include <linux/utsname.h>
17#include <linux/uuid.h>
18#include <linux/workqueue.h>
19
20#include "tb.h"
21
22#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */
23#define XDOMAIN_PROPERTIES_RETRIES 60
24#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10
25
26struct xdomain_request_work {
27 struct work_struct work;
28 struct tb_xdp_header *pkg;
29 struct tb *tb;
30};
31
32/* Serializes access to the properties and protocol handlers below */
33static DEFINE_MUTEX(xdomain_lock);
34
35/* Properties exposed to the remote domains */
36static struct tb_property_dir *xdomain_property_dir;
37static u32 *xdomain_property_block;
38static u32 xdomain_property_block_len;
39static u32 xdomain_property_block_gen;
40
41/* Additional protocol handlers */
42static LIST_HEAD(protocol_handlers);
43
44/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
45static const uuid_t tb_xdp_uuid =
46 UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
47 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
48
49static bool tb_xdomain_match(const struct tb_cfg_request *req,
50 const struct ctl_pkg *pkg)
51{
52 switch (pkg->frame.eof) {
53 case TB_CFG_PKG_ERROR:
54 return true;
55
56 case TB_CFG_PKG_XDOMAIN_RESP: {
57 const struct tb_xdp_header *res_hdr = pkg->buffer;
58 const struct tb_xdp_header *req_hdr = req->request;
59 u8 req_seq, res_seq;
60
61 if (pkg->frame.size < req->response_size / 4)
62 return false;
63
64 /* Make sure route matches */
65 if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
66 req_hdr->xd_hdr.route_hi)
67 return false;
68 if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
69 return false;
70
71 /* Then check that the sequence number matches */
72 res_seq = res_hdr->xd_hdr.length_sn & TB_XDOMAIN_SN_MASK;
73 res_seq >>= TB_XDOMAIN_SN_SHIFT;
74 req_seq = req_hdr->xd_hdr.length_sn & TB_XDOMAIN_SN_MASK;
75 req_seq >>= TB_XDOMAIN_SN_SHIFT;
76 if (res_seq != req_seq)
77 return false;
78
79 /* Check that the XDomain protocol matches */
80 if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
81 return false;
82
83 return true;
84 }
85
86 default:
87 return false;
88 }
89}
90
91static bool tb_xdomain_copy(struct tb_cfg_request *req,
92 const struct ctl_pkg *pkg)
93{
94 memcpy(req->response, pkg->buffer, req->response_size);
95 req->result.err = 0;
96 return true;
97}
98
99static void response_ready(void *data)
100{
101 tb_cfg_request_put(data);
102}
103
104static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
105 size_t size, enum tb_cfg_pkg_type type)
106{
107 struct tb_cfg_request *req;
108
109 req = tb_cfg_request_alloc();
110 if (!req)
111 return -ENOMEM;
112
113 req->match = tb_xdomain_match;
114 req->copy = tb_xdomain_copy;
115 req->request = response;
116 req->request_size = size;
117 req->request_type = type;
118
119 return tb_cfg_request(ctl, req, response_ready, req);
120}
121
122/**
123 * tb_xdomain_response() - Send a XDomain response message
124 * @xd: XDomain to send the message
125 * @response: Response to send
126 * @size: Size of the response
127 * @type: PDF type of the response
128 *
129 * This can be used to send a XDomain response message to the other
130 * domain. No response for the message is expected.
131 *
132 * Return: %0 in case of success and negative errno in case of failure
133 */
134int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
135 size_t size, enum tb_cfg_pkg_type type)
136{
137 return __tb_xdomain_response(xd->tb->ctl, response, size, type);
138}
139EXPORT_SYMBOL_GPL(tb_xdomain_response);
140
141static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
142 size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
143 size_t response_size, enum tb_cfg_pkg_type response_type,
144 unsigned int timeout_msec)
145{
146 struct tb_cfg_request *req;
147 struct tb_cfg_result res;
148
149 req = tb_cfg_request_alloc();
150 if (!req)
151 return -ENOMEM;
152
153 req->match = tb_xdomain_match;
154 req->copy = tb_xdomain_copy;
155 req->request = request;
156 req->request_size = request_size;
157 req->request_type = request_type;
158 req->response = response;
159 req->response_size = response_size;
160 req->response_type = response_type;
161
162 res = tb_cfg_request_sync(ctl, req, timeout_msec);
163
164 tb_cfg_request_put(req);
165
166 return res.err == 1 ? -EIO : res.err;
167}
168
169/**
170 * tb_xdomain_request() - Send a XDomain request
171 * @xd: XDomain to send the request
172 * @request: Request to send
173 * @request_size: Size of the request in bytes
174 * @request_type: PDF type of the request
175 * @response: Response is copied here
176 * @response_size: Expected size of the response in bytes
177 * @response_type: Expected PDF type of the response
178 * @timeout_msec: Timeout in milliseconds to wait for the response
179 *
180 * This function can be used to send XDomain control channel messages to
181 * the other domain. The function waits until the response is received
182 * or when timeout triggers. Whichever comes first.
183 *
184 * Return: %0 in case of success and negative errno in case of failure
185 */
186int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
187 size_t request_size, enum tb_cfg_pkg_type request_type,
188 void *response, size_t response_size,
189 enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
190{
191 return __tb_xdomain_request(xd->tb->ctl, request, request_size,
192 request_type, response, response_size,
193 response_type, timeout_msec);
194}
195EXPORT_SYMBOL_GPL(tb_xdomain_request);
196
197static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
198 u8 sequence, enum tb_xdp_type type, size_t size)
199{
200 u32 length_sn;
201
202 length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
203 length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
204
205 hdr->xd_hdr.route_hi = upper_32_bits(route);
206 hdr->xd_hdr.route_lo = lower_32_bits(route);
207 hdr->xd_hdr.length_sn = length_sn;
208 hdr->type = type;
209 memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
210}
211
212static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
213{
214 const struct tb_xdp_error_response *error;
215
216 if (hdr->type != ERROR_RESPONSE)
217 return 0;
218
219 error = (const struct tb_xdp_error_response *)hdr;
220
221 switch (error->error) {
222 case ERROR_UNKNOWN_PACKET:
223 case ERROR_UNKNOWN_DOMAIN:
224 return -EIO;
225 case ERROR_NOT_SUPPORTED:
226 return -ENOTSUPP;
227 case ERROR_NOT_READY:
228 return -EAGAIN;
229 default:
230 break;
231 }
232
233 return 0;
234}
235
236static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
237 enum tb_xdp_error error)
238{
239 struct tb_xdp_error_response res;
240
241 memset(&res, 0, sizeof(res));
242 tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
243 sizeof(res));
244 res.error = error;
245
246 return __tb_xdomain_response(ctl, &res, sizeof(res),
247 TB_CFG_PKG_XDOMAIN_RESP);
248}
249
250static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
251 const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
252 u32 **block, u32 *generation)
253{
254 struct tb_xdp_properties_response *res;
255 struct tb_xdp_properties req;
256 u16 data_len, len;
257 size_t total_size;
258 u32 *data = NULL;
259 int ret;
260
261 total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
262 res = kzalloc(total_size, GFP_KERNEL);
263 if (!res)
264 return -ENOMEM;
265
266 memset(&req, 0, sizeof(req));
267 tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
268 sizeof(req));
269 memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
270 memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
271
272 len = 0;
273 data_len = 0;
274
275 do {
276 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
277 TB_CFG_PKG_XDOMAIN_REQ, res,
278 total_size, TB_CFG_PKG_XDOMAIN_RESP,
279 XDOMAIN_DEFAULT_TIMEOUT);
280 if (ret)
281 goto err;
282
283 ret = tb_xdp_handle_error(&res->hdr);
284 if (ret)
285 goto err;
286
287 /*
288 * Package length includes the whole payload without the
289 * XDomain header. Validate first that the package is at
290 * least size of the response structure.
291 */
292 len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
293 if (len < sizeof(*res) / 4) {
294 ret = -EINVAL;
295 goto err;
296 }
297
298 len += sizeof(res->hdr.xd_hdr) / 4;
299 len -= sizeof(*res) / 4;
300
301 if (res->offset != req.offset) {
302 ret = -EINVAL;
303 goto err;
304 }
305
306 /*
307 * First time allocate block that has enough space for
308 * the whole properties block.
309 */
310 if (!data) {
311 data_len = res->data_length;
312 if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
313 ret = -E2BIG;
314 goto err;
315 }
316
317 data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
318 if (!data) {
319 ret = -ENOMEM;
320 goto err;
321 }
322 }
323
324 memcpy(data + req.offset, res->data, len * 4);
325 req.offset += len;
326 } while (!data_len || req.offset < data_len);
327
328 *block = data;
329 *generation = res->generation;
330
331 kfree(res);
332
333 return data_len;
334
335err:
336 kfree(data);
337 kfree(res);
338
339 return ret;
340}
341
342static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
343 u64 route, u8 sequence, const uuid_t *src_uuid,
344 const struct tb_xdp_properties *req)
345{
346 struct tb_xdp_properties_response *res;
347 size_t total_size;
348 u16 len;
349 int ret;
350
351 /*
352 * Currently we expect all requests to be directed to us. The
353 * protocol supports forwarding, though which we might add
354 * support later on.
355 */
356 if (!uuid_equal(src_uuid, &req->dst_uuid)) {
357 tb_xdp_error_response(ctl, route, sequence,
358 ERROR_UNKNOWN_DOMAIN);
359 return 0;
360 }
361
362 mutex_lock(&xdomain_lock);
363
364 if (req->offset >= xdomain_property_block_len) {
365 mutex_unlock(&xdomain_lock);
366 return -EINVAL;
367 }
368
369 len = xdomain_property_block_len - req->offset;
370 len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
371 total_size = sizeof(*res) + len * 4;
372
373 res = kzalloc(total_size, GFP_KERNEL);
374 if (!res) {
375 mutex_unlock(&xdomain_lock);
376 return -ENOMEM;
377 }
378
379 tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE,
380 total_size);
381 res->generation = xdomain_property_block_gen;
382 res->data_length = xdomain_property_block_len;
383 res->offset = req->offset;
384 uuid_copy(&res->src_uuid, src_uuid);
385 uuid_copy(&res->dst_uuid, &req->src_uuid);
386 memcpy(res->data, &xdomain_property_block[req->offset], len * 4);
387
388 mutex_unlock(&xdomain_lock);
389
390 ret = __tb_xdomain_response(ctl, res, total_size,
391 TB_CFG_PKG_XDOMAIN_RESP);
392
393 kfree(res);
394 return ret;
395}
396
397static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
398 int retry, const uuid_t *uuid)
399{
400 struct tb_xdp_properties_changed_response res;
401 struct tb_xdp_properties_changed req;
402 int ret;
403
404 memset(&req, 0, sizeof(req));
405 tb_xdp_fill_header(&req.hdr, route, retry % 4,
406 PROPERTIES_CHANGED_REQUEST, sizeof(req));
407 uuid_copy(&req.src_uuid, uuid);
408
409 memset(&res, 0, sizeof(res));
410 ret = __tb_xdomain_request(ctl, &req, sizeof(req),
411 TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
412 TB_CFG_PKG_XDOMAIN_RESP,
413 XDOMAIN_DEFAULT_TIMEOUT);
414 if (ret)
415 return ret;
416
417 return tb_xdp_handle_error(&res.hdr);
418}
419
420static int
421tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
422{
423 struct tb_xdp_properties_changed_response res;
424
425 memset(&res, 0, sizeof(res));
426 tb_xdp_fill_header(&res.hdr, route, sequence,
427 PROPERTIES_CHANGED_RESPONSE, sizeof(res));
428 return __tb_xdomain_response(ctl, &res, sizeof(res),
429 TB_CFG_PKG_XDOMAIN_RESP);
430}
431
432/**
433 * tb_register_protocol_handler() - Register protocol handler
434 * @handler: Handler to register
435 *
436 * This allows XDomain service drivers to hook into incoming XDomain
437 * messages. After this function is called the service driver needs to
438 * be able to handle calls to callback whenever a package with the
439 * registered protocol is received.
440 */
441int tb_register_protocol_handler(struct tb_protocol_handler *handler)
442{
443 if (!handler->uuid || !handler->callback)
444 return -EINVAL;
445 if (uuid_equal(handler->uuid, &tb_xdp_uuid))
446 return -EINVAL;
447
448 mutex_lock(&xdomain_lock);
449 list_add_tail(&handler->list, &protocol_handlers);
450 mutex_unlock(&xdomain_lock);
451
452 return 0;
453}
454EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
455
456/**
457 * tb_unregister_protocol_handler() - Unregister protocol handler
458 * @handler: Handler to unregister
459 *
460 * Removes the previously registered protocol handler.
461 */
462void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
463{
464 mutex_lock(&xdomain_lock);
465 list_del_init(&handler->list);
466 mutex_unlock(&xdomain_lock);
467}
468EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
469
470static void tb_xdp_handle_request(struct work_struct *work)
471{
472 struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
473 const struct tb_xdp_header *pkg = xw->pkg;
474 const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
475 struct tb *tb = xw->tb;
476 struct tb_ctl *ctl = tb->ctl;
477 const uuid_t *uuid;
478 int ret = 0;
479 u8 sequence;
480 u64 route;
481
482 route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
483 sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
484 sequence >>= TB_XDOMAIN_SN_SHIFT;
485
486 mutex_lock(&tb->lock);
487 if (tb->root_switch)
488 uuid = tb->root_switch->uuid;
489 else
490 uuid = NULL;
491 mutex_unlock(&tb->lock);
492
493 if (!uuid) {
494 tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
495 goto out;
496 }
497
498 switch (pkg->type) {
499 case PROPERTIES_REQUEST:
500 ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid,
501 (const struct tb_xdp_properties *)pkg);
502 break;
503
504 case PROPERTIES_CHANGED_REQUEST: {
505 const struct tb_xdp_properties_changed *xchg =
506 (const struct tb_xdp_properties_changed *)pkg;
507 struct tb_xdomain *xd;
508
509 ret = tb_xdp_properties_changed_response(ctl, route, sequence);
510
511 /*
512 * Since the properties have been changed, let's update
513 * the xdomain related to this connection as well in
514 * case there is a change in services it offers.
515 */
516 xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid);
517 if (xd) {
518 queue_delayed_work(tb->wq, &xd->get_properties_work,
519 msecs_to_jiffies(50));
520 tb_xdomain_put(xd);
521 }
522
523 break;
524 }
525
526 default:
527 break;
528 }
529
530 if (ret) {
531 tb_warn(tb, "failed to send XDomain response for %#x\n",
532 pkg->type);
533 }
534
535out:
536 kfree(xw->pkg);
537 kfree(xw);
538}
539
540static void
541tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
542 size_t size)
543{
544 struct xdomain_request_work *xw;
545
546 xw = kmalloc(sizeof(*xw), GFP_KERNEL);
547 if (!xw)
548 return;
549
550 INIT_WORK(&xw->work, tb_xdp_handle_request);
551 xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
552 xw->tb = tb;
553
554 queue_work(tb->wq, &xw->work);
555}
556
557/**
558 * tb_register_service_driver() - Register XDomain service driver
559 * @drv: Driver to register
560 *
561 * Registers new service driver from @drv to the bus.
562 */
563int tb_register_service_driver(struct tb_service_driver *drv)
564{
565 drv->driver.bus = &tb_bus_type;
566 return driver_register(&drv->driver);
567}
568EXPORT_SYMBOL_GPL(tb_register_service_driver);
569
570/**
571 * tb_unregister_service_driver() - Unregister XDomain service driver
572 * @xdrv: Driver to unregister
573 *
574 * Unregisters XDomain service driver from the bus.
575 */
576void tb_unregister_service_driver(struct tb_service_driver *drv)
577{
578 driver_unregister(&drv->driver);
579}
580EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
581
582static ssize_t key_show(struct device *dev, struct device_attribute *attr,
583 char *buf)
584{
585 struct tb_service *svc = container_of(dev, struct tb_service, dev);
586
587 /*
588 * It should be null terminated but anything else is pretty much
589 * allowed.
590 */
591 return sprintf(buf, "%*pEp\n", (int)strlen(svc->key), svc->key);
592}
593static DEVICE_ATTR_RO(key);
594
595static int get_modalias(struct tb_service *svc, char *buf, size_t size)
596{
597 return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
598 svc->prtcid, svc->prtcvers, svc->prtcrevs);
599}
600
601static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
602 char *buf)
603{
604 struct tb_service *svc = container_of(dev, struct tb_service, dev);
605
606 /* Full buffer size except new line and null termination */
607 get_modalias(svc, buf, PAGE_SIZE - 2);
608 return sprintf(buf, "%s\n", buf);
609}
610static DEVICE_ATTR_RO(modalias);
611
612static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
613 char *buf)
614{
615 struct tb_service *svc = container_of(dev, struct tb_service, dev);
616
617 return sprintf(buf, "%u\n", svc->prtcid);
618}
619static DEVICE_ATTR_RO(prtcid);
620
621static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
622 char *buf)
623{
624 struct tb_service *svc = container_of(dev, struct tb_service, dev);
625
626 return sprintf(buf, "%u\n", svc->prtcvers);
627}
628static DEVICE_ATTR_RO(prtcvers);
629
630static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
631 char *buf)
632{
633 struct tb_service *svc = container_of(dev, struct tb_service, dev);
634
635 return sprintf(buf, "%u\n", svc->prtcrevs);
636}
637static DEVICE_ATTR_RO(prtcrevs);
638
639static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
640 char *buf)
641{
642 struct tb_service *svc = container_of(dev, struct tb_service, dev);
643
644 return sprintf(buf, "0x%08x\n", svc->prtcstns);
645}
646static DEVICE_ATTR_RO(prtcstns);
647
648static struct attribute *tb_service_attrs[] = {
649 &dev_attr_key.attr,
650 &dev_attr_modalias.attr,
651 &dev_attr_prtcid.attr,
652 &dev_attr_prtcvers.attr,
653 &dev_attr_prtcrevs.attr,
654 &dev_attr_prtcstns.attr,
655 NULL,
656};
657
658static struct attribute_group tb_service_attr_group = {
659 .attrs = tb_service_attrs,
660};
661
662static const struct attribute_group *tb_service_attr_groups[] = {
663 &tb_service_attr_group,
664 NULL,
665};
666
667static int tb_service_uevent(struct device *dev, struct kobj_uevent_env *env)
668{
669 struct tb_service *svc = container_of(dev, struct tb_service, dev);
670 char modalias[64];
671
672 get_modalias(svc, modalias, sizeof(modalias));
673 return add_uevent_var(env, "MODALIAS=%s", modalias);
674}
675
676static void tb_service_release(struct device *dev)
677{
678 struct tb_service *svc = container_of(dev, struct tb_service, dev);
679 struct tb_xdomain *xd = tb_service_parent(svc);
680
681 ida_simple_remove(&xd->service_ids, svc->id);
682 kfree(svc->key);
683 kfree(svc);
684}
685
686struct device_type tb_service_type = {
687 .name = "thunderbolt_service",
688 .groups = tb_service_attr_groups,
689 .uevent = tb_service_uevent,
690 .release = tb_service_release,
691};
692EXPORT_SYMBOL_GPL(tb_service_type);
693
694static int remove_missing_service(struct device *dev, void *data)
695{
696 struct tb_xdomain *xd = data;
697 struct tb_service *svc;
698
699 svc = tb_to_service(dev);
700 if (!svc)
701 return 0;
702
703 if (!tb_property_find(xd->properties, svc->key,
704 TB_PROPERTY_TYPE_DIRECTORY))
705 device_unregister(dev);
706
707 return 0;
708}
709
710static int find_service(struct device *dev, void *data)
711{
712 const struct tb_property *p = data;
713 struct tb_service *svc;
714
715 svc = tb_to_service(dev);
716 if (!svc)
717 return 0;
718
719 return !strcmp(svc->key, p->key);
720}
721
722static int populate_service(struct tb_service *svc,
723 struct tb_property *property)
724{
725 struct tb_property_dir *dir = property->value.dir;
726 struct tb_property *p;
727
728 /* Fill in standard properties */
729 p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
730 if (p)
731 svc->prtcid = p->value.immediate;
732 p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
733 if (p)
734 svc->prtcvers = p->value.immediate;
735 p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
736 if (p)
737 svc->prtcrevs = p->value.immediate;
738 p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
739 if (p)
740 svc->prtcstns = p->value.immediate;
741
742 svc->key = kstrdup(property->key, GFP_KERNEL);
743 if (!svc->key)
744 return -ENOMEM;
745
746 return 0;
747}
748
749static void enumerate_services(struct tb_xdomain *xd)
750{
751 struct tb_service *svc;
752 struct tb_property *p;
753 struct device *dev;
754
755 /*
756 * First remove all services that are not available anymore in
757 * the updated property block.
758 */
759 device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
760
761 /* Then re-enumerate properties creating new services as we go */
762 tb_property_for_each(xd->properties, p) {
763 if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
764 continue;
765
766 /* If the service exists already we are fine */
767 dev = device_find_child(&xd->dev, p, find_service);
768 if (dev) {
769 put_device(dev);
770 continue;
771 }
772
773 svc = kzalloc(sizeof(*svc), GFP_KERNEL);
774 if (!svc)
775 break;
776
777 if (populate_service(svc, p)) {
778 kfree(svc);
779 break;
780 }
781
782 svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
783 svc->dev.bus = &tb_bus_type;
784 svc->dev.type = &tb_service_type;
785 svc->dev.parent = &xd->dev;
786 dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
787
788 if (device_register(&svc->dev)) {
789 put_device(&svc->dev);
790 break;
791 }
792 }
793}
794
795static int populate_properties(struct tb_xdomain *xd,
796 struct tb_property_dir *dir)
797{
798 const struct tb_property *p;
799
800 /* Required properties */
801 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
802 if (!p)
803 return -EINVAL;
804 xd->device = p->value.immediate;
805
806 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
807 if (!p)
808 return -EINVAL;
809 xd->vendor = p->value.immediate;
810
811 kfree(xd->device_name);
812 xd->device_name = NULL;
813 kfree(xd->vendor_name);
814 xd->vendor_name = NULL;
815
816 /* Optional properties */
817 p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
818 if (p)
819 xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
820 p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
821 if (p)
822 xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
823
824 return 0;
825}
826
827/* Called with @xd->lock held */
828static void tb_xdomain_restore_paths(struct tb_xdomain *xd)
829{
830 if (!xd->resume)
831 return;
832
833 xd->resume = false;
834 if (xd->transmit_path) {
835 dev_dbg(&xd->dev, "re-establishing DMA path\n");
836 tb_domain_approve_xdomain_paths(xd->tb, xd);
837 }
838}
839
840static void tb_xdomain_get_properties(struct work_struct *work)
841{
842 struct tb_xdomain *xd = container_of(work, typeof(*xd),
843 get_properties_work.work);
844 struct tb_property_dir *dir;
845 struct tb *tb = xd->tb;
846 bool update = false;
847 u32 *block = NULL;
848 u32 gen = 0;
849 int ret;
850
851 ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
852 xd->remote_uuid, xd->properties_retries,
853 &block, &gen);
854 if (ret < 0) {
855 if (xd->properties_retries-- > 0) {
856 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
857 msecs_to_jiffies(1000));
858 } else {
859 /* Give up now */
860 dev_err(&xd->dev,
861 "failed read XDomain properties from %pUb\n",
862 xd->remote_uuid);
863 }
864 return;
865 }
866
867 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
868
869 mutex_lock(&xd->lock);
870
871 /* Only accept newer generation properties */
872 if (xd->properties && gen <= xd->property_block_gen) {
873 /*
874 * On resume it is likely that the properties block is
875 * not changed (unless the other end added or removed
876 * services). However, we need to make sure the existing
877 * DMA paths are restored properly.
878 */
879 tb_xdomain_restore_paths(xd);
880 goto err_free_block;
881 }
882
883 dir = tb_property_parse_dir(block, ret);
884 if (!dir) {
885 dev_err(&xd->dev, "failed to parse XDomain properties\n");
886 goto err_free_block;
887 }
888
889 ret = populate_properties(xd, dir);
890 if (ret) {
891 dev_err(&xd->dev, "missing XDomain properties in response\n");
892 goto err_free_dir;
893 }
894
895 /* Release the existing one */
896 if (xd->properties) {
897 tb_property_free_dir(xd->properties);
898 update = true;
899 }
900
901 xd->properties = dir;
902 xd->property_block_gen = gen;
903
904 tb_xdomain_restore_paths(xd);
905
906 mutex_unlock(&xd->lock);
907
908 kfree(block);
909
910 /*
911 * Now the device should be ready enough so we can add it to the
912 * bus and let userspace know about it. If the device is already
913 * registered, we notify the userspace that it has changed.
914 */
915 if (!update) {
916 if (device_add(&xd->dev)) {
917 dev_err(&xd->dev, "failed to add XDomain device\n");
918 return;
919 }
920 } else {
921 kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
922 }
923
924 enumerate_services(xd);
925 return;
926
927err_free_dir:
928 tb_property_free_dir(dir);
929err_free_block:
930 kfree(block);
931 mutex_unlock(&xd->lock);
932}
933
934static void tb_xdomain_properties_changed(struct work_struct *work)
935{
936 struct tb_xdomain *xd = container_of(work, typeof(*xd),
937 properties_changed_work.work);
938 int ret;
939
940 ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
941 xd->properties_changed_retries, xd->local_uuid);
942 if (ret) {
943 if (xd->properties_changed_retries-- > 0)
944 queue_delayed_work(xd->tb->wq,
945 &xd->properties_changed_work,
946 msecs_to_jiffies(1000));
947 return;
948 }
949
950 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
951}
952
953static ssize_t device_show(struct device *dev, struct device_attribute *attr,
954 char *buf)
955{
956 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
957
958 return sprintf(buf, "%#x\n", xd->device);
959}
960static DEVICE_ATTR_RO(device);
961
962static ssize_t
963device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
964{
965 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
966 int ret;
967
968 if (mutex_lock_interruptible(&xd->lock))
969 return -ERESTARTSYS;
970 ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : "");
971 mutex_unlock(&xd->lock);
972
973 return ret;
974}
975static DEVICE_ATTR_RO(device_name);
976
977static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
978 char *buf)
979{
980 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
981
982 return sprintf(buf, "%#x\n", xd->vendor);
983}
984static DEVICE_ATTR_RO(vendor);
985
986static ssize_t
987vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
988{
989 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
990 int ret;
991
992 if (mutex_lock_interruptible(&xd->lock))
993 return -ERESTARTSYS;
994 ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : "");
995 mutex_unlock(&xd->lock);
996
997 return ret;
998}
999static DEVICE_ATTR_RO(vendor_name);
1000
1001static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
1002 char *buf)
1003{
1004 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1005
1006 return sprintf(buf, "%pUb\n", xd->remote_uuid);
1007}
1008static DEVICE_ATTR_RO(unique_id);
1009
1010static struct attribute *xdomain_attrs[] = {
1011 &dev_attr_device.attr,
1012 &dev_attr_device_name.attr,
1013 &dev_attr_unique_id.attr,
1014 &dev_attr_vendor.attr,
1015 &dev_attr_vendor_name.attr,
1016 NULL,
1017};
1018
1019static struct attribute_group xdomain_attr_group = {
1020 .attrs = xdomain_attrs,
1021};
1022
1023static const struct attribute_group *xdomain_attr_groups[] = {
1024 &xdomain_attr_group,
1025 NULL,
1026};
1027
1028static void tb_xdomain_release(struct device *dev)
1029{
1030 struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
1031
1032 put_device(xd->dev.parent);
1033
1034 tb_property_free_dir(xd->properties);
1035 ida_destroy(&xd->service_ids);
1036
1037 kfree(xd->local_uuid);
1038 kfree(xd->remote_uuid);
1039 kfree(xd->device_name);
1040 kfree(xd->vendor_name);
1041 kfree(xd);
1042}
1043
1044static void start_handshake(struct tb_xdomain *xd)
1045{
1046 xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES;
1047 xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES;
1048
1049 /* Start exchanging properties with the other host */
1050 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1051 msecs_to_jiffies(100));
1052 queue_delayed_work(xd->tb->wq, &xd->get_properties_work,
1053 msecs_to_jiffies(1000));
1054}
1055
1056static void stop_handshake(struct tb_xdomain *xd)
1057{
1058 xd->properties_retries = 0;
1059 xd->properties_changed_retries = 0;
1060
1061 cancel_delayed_work_sync(&xd->get_properties_work);
1062 cancel_delayed_work_sync(&xd->properties_changed_work);
1063}
1064
1065static int __maybe_unused tb_xdomain_suspend(struct device *dev)
1066{
1067 stop_handshake(tb_to_xdomain(dev));
1068 return 0;
1069}
1070
1071static int __maybe_unused tb_xdomain_resume(struct device *dev)
1072{
1073 struct tb_xdomain *xd = tb_to_xdomain(dev);
1074
1075 /*
1076 * Ask tb_xdomain_get_properties() restore any existing DMA
1077 * paths after properties are re-read.
1078 */
1079 xd->resume = true;
1080 start_handshake(xd);
1081
1082 return 0;
1083}
1084
1085static const struct dev_pm_ops tb_xdomain_pm_ops = {
1086 SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
1087};
1088
1089struct device_type tb_xdomain_type = {
1090 .name = "thunderbolt_xdomain",
1091 .release = tb_xdomain_release,
1092 .pm = &tb_xdomain_pm_ops,
1093};
1094EXPORT_SYMBOL_GPL(tb_xdomain_type);
1095
1096/**
1097 * tb_xdomain_alloc() - Allocate new XDomain object
1098 * @tb: Domain where the XDomain belongs
1099 * @parent: Parent device (the switch through the connection to the
1100 * other domain is reached).
1101 * @route: Route string used to reach the other domain
1102 * @local_uuid: Our local domain UUID
1103 * @remote_uuid: UUID of the other domain
1104 *
1105 * Allocates new XDomain structure and returns pointer to that. The
1106 * object must be released by calling tb_xdomain_put().
1107 */
1108struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
1109 u64 route, const uuid_t *local_uuid,
1110 const uuid_t *remote_uuid)
1111{
1112 struct tb_xdomain *xd;
1113
1114 xd = kzalloc(sizeof(*xd), GFP_KERNEL);
1115 if (!xd)
1116 return NULL;
1117
1118 xd->tb = tb;
1119 xd->route = route;
1120 ida_init(&xd->service_ids);
1121 mutex_init(&xd->lock);
1122 INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties);
1123 INIT_DELAYED_WORK(&xd->properties_changed_work,
1124 tb_xdomain_properties_changed);
1125
1126 xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
1127 if (!xd->local_uuid)
1128 goto err_free;
1129
1130 xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t), GFP_KERNEL);
1131 if (!xd->remote_uuid)
1132 goto err_free_local_uuid;
1133
1134 device_initialize(&xd->dev);
1135 xd->dev.parent = get_device(parent);
1136 xd->dev.bus = &tb_bus_type;
1137 xd->dev.type = &tb_xdomain_type;
1138 xd->dev.groups = xdomain_attr_groups;
1139 dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
1140
1141 return xd;
1142
1143err_free_local_uuid:
1144 kfree(xd->local_uuid);
1145err_free:
1146 kfree(xd);
1147
1148 return NULL;
1149}
1150
1151/**
1152 * tb_xdomain_add() - Add XDomain to the bus
1153 * @xd: XDomain to add
1154 *
1155 * This function starts XDomain discovery protocol handshake and
1156 * eventually adds the XDomain to the bus. After calling this function
1157 * the caller needs to call tb_xdomain_remove() in order to remove and
1158 * release the object regardless whether the handshake succeeded or not.
1159 */
1160void tb_xdomain_add(struct tb_xdomain *xd)
1161{
1162 /* Start exchanging properties with the other host */
1163 start_handshake(xd);
1164}
1165
1166static int unregister_service(struct device *dev, void *data)
1167{
1168 device_unregister(dev);
1169 return 0;
1170}
1171
1172/**
1173 * tb_xdomain_remove() - Remove XDomain from the bus
1174 * @xd: XDomain to remove
1175 *
1176 * This will stop all ongoing configuration work and remove the XDomain
1177 * along with any services from the bus. When the last reference to @xd
1178 * is released the object will be released as well.
1179 */
1180void tb_xdomain_remove(struct tb_xdomain *xd)
1181{
1182 stop_handshake(xd);
1183
1184 device_for_each_child_reverse(&xd->dev, xd, unregister_service);
1185
1186 if (!device_is_registered(&xd->dev))
1187 put_device(&xd->dev);
1188 else
1189 device_unregister(&xd->dev);
1190}
1191
1192/**
1193 * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
1194 * @xd: XDomain connection
1195 * @transmit_path: HopID of the transmit path the other end is using to
1196 * send packets
1197 * @transmit_ring: DMA ring used to receive packets from the other end
1198 * @receive_path: HopID of the receive path the other end is using to
1199 * receive packets
1200 * @receive_ring: DMA ring used to send packets to the other end
1201 *
1202 * The function enables DMA paths accordingly so that after successful
1203 * return the caller can send and receive packets using high-speed DMA
1204 * path.
1205 *
1206 * Return: %0 in case of success and negative errno in case of error
1207 */
1208int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
1209 u16 transmit_ring, u16 receive_path,
1210 u16 receive_ring)
1211{
1212 int ret;
1213
1214 mutex_lock(&xd->lock);
1215
1216 if (xd->transmit_path) {
1217 ret = xd->transmit_path == transmit_path ? 0 : -EBUSY;
1218 goto exit_unlock;
1219 }
1220
1221 xd->transmit_path = transmit_path;
1222 xd->transmit_ring = transmit_ring;
1223 xd->receive_path = receive_path;
1224 xd->receive_ring = receive_ring;
1225
1226 ret = tb_domain_approve_xdomain_paths(xd->tb, xd);
1227
1228exit_unlock:
1229 mutex_unlock(&xd->lock);
1230
1231 return ret;
1232}
1233EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
1234
1235/**
1236 * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
1237 * @xd: XDomain connection
1238 *
1239 * This does the opposite of tb_xdomain_enable_paths(). After call to
1240 * this the caller is not expected to use the rings anymore.
1241 *
1242 * Return: %0 in case of success and negative errno in case of error
1243 */
1244int tb_xdomain_disable_paths(struct tb_xdomain *xd)
1245{
1246 int ret = 0;
1247
1248 mutex_lock(&xd->lock);
1249 if (xd->transmit_path) {
1250 xd->transmit_path = 0;
1251 xd->transmit_ring = 0;
1252 xd->receive_path = 0;
1253 xd->receive_ring = 0;
1254
1255 ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd);
1256 }
1257 mutex_unlock(&xd->lock);
1258
1259 return ret;
1260}
1261EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
1262
1263struct tb_xdomain_lookup {
1264 const uuid_t *uuid;
1265 u8 link;
1266 u8 depth;
1267};
1268
1269static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
1270 const struct tb_xdomain_lookup *lookup)
1271{
1272 int i;
1273
1274 for (i = 1; i <= sw->config.max_port_number; i++) {
1275 struct tb_port *port = &sw->ports[i];
1276 struct tb_xdomain *xd;
1277
1278 if (tb_is_upstream_port(port))
1279 continue;
1280
1281 if (port->xdomain) {
1282 xd = port->xdomain;
1283
1284 if (lookup->uuid) {
1285 if (uuid_equal(xd->remote_uuid, lookup->uuid))
1286 return xd;
1287 } else if (lookup->link == xd->link &&
1288 lookup->depth == xd->depth) {
1289 return xd;
1290 }
1291 } else if (port->remote) {
1292 xd = switch_find_xdomain(port->remote->sw, lookup);
1293 if (xd)
1294 return xd;
1295 }
1296 }
1297
1298 return NULL;
1299}
1300
1301/**
1302 * tb_xdomain_find_by_uuid() - Find an XDomain by UUID
1303 * @tb: Domain where the XDomain belongs to
1304 * @uuid: UUID to look for
1305 *
1306 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1307 * The returned XDomain will have its reference count increased so the
1308 * caller needs to call tb_xdomain_put() when it is done with the
1309 * object.
1310 *
1311 * This will find all XDomains including the ones that are not yet added
1312 * to the bus (handshake is still in progress).
1313 *
1314 * The caller needs to hold @tb->lock.
1315 */
1316struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
1317{
1318 struct tb_xdomain_lookup lookup;
1319 struct tb_xdomain *xd;
1320
1321 memset(&lookup, 0, sizeof(lookup));
1322 lookup.uuid = uuid;
1323
1324 xd = switch_find_xdomain(tb->root_switch, &lookup);
1325 if (xd) {
1326 get_device(&xd->dev);
1327 return xd;
1328 }
1329
1330 return NULL;
1331}
1332EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
1333
1334/**
1335 * tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
1336 * @tb: Domain where the XDomain belongs to
1337 * @link: Root switch link number
1338 * @depth: Depth in the link
1339 *
1340 * Finds XDomain by walking through the Thunderbolt topology below @tb.
1341 * The returned XDomain will have its reference count increased so the
1342 * caller needs to call tb_xdomain_put() when it is done with the
1343 * object.
1344 *
1345 * This will find all XDomains including the ones that are not yet added
1346 * to the bus (handshake is still in progress).
1347 *
1348 * The caller needs to hold @tb->lock.
1349 */
1350struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
1351 u8 depth)
1352{
1353 struct tb_xdomain_lookup lookup;
1354 struct tb_xdomain *xd;
1355
1356 memset(&lookup, 0, sizeof(lookup));
1357 lookup.link = link;
1358 lookup.depth = depth;
1359
1360 xd = switch_find_xdomain(tb->root_switch, &lookup);
1361 if (xd) {
1362 get_device(&xd->dev);
1363 return xd;
1364 }
1365
1366 return NULL;
1367}
1368
1369bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
1370 const void *buf, size_t size)
1371{
1372 const struct tb_protocol_handler *handler, *tmp;
1373 const struct tb_xdp_header *hdr = buf;
1374 unsigned int length;
1375 int ret = 0;
1376
1377 /* We expect the packet is at least size of the header */
1378 length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
1379 if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
1380 return true;
1381 if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
1382 return true;
1383
1384 /*
1385 * Handle XDomain discovery protocol packets directly here. For
1386 * other protocols (based on their UUID) we call registered
1387 * handlers in turn.
1388 */
1389 if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
1390 if (type == TB_CFG_PKG_XDOMAIN_REQ) {
1391 tb_xdp_schedule_request(tb, hdr, size);
1392 return true;
1393 }
1394 return false;
1395 }
1396
1397 mutex_lock(&xdomain_lock);
1398 list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
1399 if (!uuid_equal(&hdr->uuid, handler->uuid))
1400 continue;
1401
1402 mutex_unlock(&xdomain_lock);
1403 ret = handler->callback(buf, size, handler->data);
1404 mutex_lock(&xdomain_lock);
1405
1406 if (ret)
1407 break;
1408 }
1409 mutex_unlock(&xdomain_lock);
1410
1411 return ret > 0;
1412}
1413
1414static int rebuild_property_block(void)
1415{
1416 u32 *block, len;
1417 int ret;
1418
1419 ret = tb_property_format_dir(xdomain_property_dir, NULL, 0);
1420 if (ret < 0)
1421 return ret;
1422
1423 len = ret;
1424
1425 block = kcalloc(len, sizeof(u32), GFP_KERNEL);
1426 if (!block)
1427 return -ENOMEM;
1428
1429 ret = tb_property_format_dir(xdomain_property_dir, block, len);
1430 if (ret) {
1431 kfree(block);
1432 return ret;
1433 }
1434
1435 kfree(xdomain_property_block);
1436 xdomain_property_block = block;
1437 xdomain_property_block_len = len;
1438 xdomain_property_block_gen++;
1439
1440 return 0;
1441}
1442
1443static int update_xdomain(struct device *dev, void *data)
1444{
1445 struct tb_xdomain *xd;
1446
1447 xd = tb_to_xdomain(dev);
1448 if (xd) {
1449 queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
1450 msecs_to_jiffies(50));
1451 }
1452
1453 return 0;
1454}
1455
1456static void update_all_xdomains(void)
1457{
1458 bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
1459}
1460
1461static bool remove_directory(const char *key, const struct tb_property_dir *dir)
1462{
1463 struct tb_property *p;
1464
1465 p = tb_property_find(xdomain_property_dir, key,
1466 TB_PROPERTY_TYPE_DIRECTORY);
1467 if (p && p->value.dir == dir) {
1468 tb_property_remove(p);
1469 return true;
1470 }
1471 return false;
1472}
1473
1474/**
1475 * tb_register_property_dir() - Register property directory to the host
1476 * @key: Key (name) of the directory to add
1477 * @dir: Directory to add
1478 *
1479 * Service drivers can use this function to add new property directory
1480 * to the host available properties. The other connected hosts are
1481 * notified so they can re-read properties of this host if they are
1482 * interested.
1483 *
1484 * Return: %0 on success and negative errno on failure
1485 */
1486int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
1487{
1488 int ret;
1489
1490 if (!key || strlen(key) > 8)
1491 return -EINVAL;
1492
1493 mutex_lock(&xdomain_lock);
1494 if (tb_property_find(xdomain_property_dir, key,
1495 TB_PROPERTY_TYPE_DIRECTORY)) {
1496 ret = -EEXIST;
1497 goto err_unlock;
1498 }
1499
1500 ret = tb_property_add_dir(xdomain_property_dir, key, dir);
1501 if (ret)
1502 goto err_unlock;
1503
1504 ret = rebuild_property_block();
1505 if (ret) {
1506 remove_directory(key, dir);
1507 goto err_unlock;
1508 }
1509
1510 mutex_unlock(&xdomain_lock);
1511 update_all_xdomains();
1512 return 0;
1513
1514err_unlock:
1515 mutex_unlock(&xdomain_lock);
1516 return ret;
1517}
1518EXPORT_SYMBOL_GPL(tb_register_property_dir);
1519
1520/**
1521 * tb_unregister_property_dir() - Removes property directory from host
1522 * @key: Key (name) of the directory
1523 * @dir: Directory to remove
1524 *
1525 * This will remove the existing directory from this host and notify the
1526 * connected hosts about the change.
1527 */
1528void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
1529{
1530 int ret = 0;
1531
1532 mutex_lock(&xdomain_lock);
1533 if (remove_directory(key, dir))
1534 ret = rebuild_property_block();
1535 mutex_unlock(&xdomain_lock);
1536
1537 if (!ret)
1538 update_all_xdomains();
1539}
1540EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
1541
1542int tb_xdomain_init(void)
1543{
1544 int ret;
1545
1546 xdomain_property_dir = tb_property_create_dir(NULL);
1547 if (!xdomain_property_dir)
1548 return -ENOMEM;
1549
1550 /*
1551 * Initialize standard set of properties without any service
1552 * directories. Those will be added by service drivers
1553 * themselves when they are loaded.
1554 */
1555 tb_property_add_immediate(xdomain_property_dir, "vendorid",
1556 PCI_VENDOR_ID_INTEL);
1557 tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
1558 tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
1559 tb_property_add_text(xdomain_property_dir, "deviceid",
1560 utsname()->nodename);
1561 tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
1562
1563 ret = rebuild_property_block();
1564 if (ret) {
1565 tb_property_free_dir(xdomain_property_dir);
1566 xdomain_property_dir = NULL;
1567 }
1568
1569 return ret;
1570}
1571
1572void tb_xdomain_exit(void)
1573{
1574 kfree(xdomain_property_block);
1575 tb_property_free_dir(xdomain_property_dir);
1576}
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 694cebb50f72..7625c3b81f84 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -683,5 +683,31 @@ struct fsl_mc_device_id {
683 const char obj_type[16]; 683 const char obj_type[16];
684}; 684};
685 685
686/**
687 * struct tb_service_id - Thunderbolt service identifiers
688 * @match_flags: Flags used to match the structure
689 * @protocol_key: Protocol key the service supports
690 * @protocol_id: Protocol id the service supports
691 * @protocol_version: Version of the protocol
692 * @protocol_revision: Revision of the protocol software
693 * @driver_data: Driver specific data
694 *
695 * Thunderbolt XDomain services are exposed as devices where each device
696 * carries the protocol information the service supports. Thunderbolt
697 * XDomain service drivers match against that information.
698 */
699struct tb_service_id {
700 __u32 match_flags;
701 char protocol_key[8 + 1];
702 __u32 protocol_id;
703 __u32 protocol_version;
704 __u32 protocol_revision;
705 kernel_ulong_t driver_data;
706};
707
708#define TBSVC_MATCH_PROTOCOL_KEY 0x0001
709#define TBSVC_MATCH_PROTOCOL_ID 0x0002
710#define TBSVC_MATCH_PROTOCOL_VERSION 0x0004
711#define TBSVC_MATCH_PROTOCOL_REVISION 0x0008
686 712
687#endif /* LINUX_MOD_DEVICETABLE_H */ 713#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 43b8d1e09341..18c0e3d5e85c 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -17,6 +17,7 @@
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/mutex.h> 19#include <linux/mutex.h>
20#include <linux/mod_devicetable.h>
20#include <linux/uuid.h> 21#include <linux/uuid.h>
21 22
22enum tb_cfg_pkg_type { 23enum tb_cfg_pkg_type {
@@ -77,6 +78,8 @@ struct tb {
77}; 78};
78 79
79extern struct bus_type tb_bus_type; 80extern struct bus_type tb_bus_type;
81extern struct device_type tb_service_type;
82extern struct device_type tb_xdomain_type;
80 83
81#define TB_LINKS_PER_PHY_PORT 2 84#define TB_LINKS_PER_PHY_PORT 2
82 85
@@ -155,4 +158,243 @@ struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
155 property; \ 158 property; \
156 property = tb_property_get_next(dir, property)) 159 property = tb_property_get_next(dir, property))
157 160
161int tb_register_property_dir(const char *key, struct tb_property_dir *dir);
162void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
163
164/**
165 * struct tb_xdomain - Cross-domain (XDomain) connection
166 * @dev: XDomain device
167 * @tb: Pointer to the domain
168 * @remote_uuid: UUID of the remote domain (host)
169 * @local_uuid: Cached local UUID
170 * @route: Route string the other domain can be reached
171 * @vendor: Vendor ID of the remote domain
172 * @device: Device ID of the demote domain
173 * @lock: Lock to serialize access to the following fields of this structure
174 * @vendor_name: Name of the vendor (or %NULL if not known)
175 * @device_name: Name of the device (or %NULL if not known)
176 * @is_unplugged: The XDomain is unplugged
177 * @resume: The XDomain is being resumed
178 * @transmit_path: HopID which the remote end expects us to transmit
179 * @transmit_ring: Local ring (hop) where outgoing packets are pushed
180 * @receive_path: HopID which we expect the remote end to transmit
181 * @receive_ring: Local ring (hop) where incoming packets arrive
182 * @service_ids: Used to generate IDs for the services
183 * @properties: Properties exported by the remote domain
184 * @property_block_gen: Generation of @properties
185 * @properties_lock: Lock protecting @properties.
186 * @get_properties_work: Work used to get remote domain properties
187 * @properties_retries: Number of times left to read properties
188 * @properties_changed_work: Work used to notify the remote domain that
189 * our properties have changed
190 * @properties_changed_retries: Number of times left to send properties
191 * changed notification
192 * @link: Root switch link the remote domain is connected (ICM only)
193 * @depth: Depth in the chain the remote domain is connected (ICM only)
194 *
195 * This structure represents connection across two domains (hosts).
196 * Each XDomain contains zero or more services which are exposed as
197 * &struct tb_service objects.
198 *
199 * Service drivers may access this structure if they need to enumerate
200 * non-standard properties but they need hold @lock when doing so
201 * because properties can be changed asynchronously in response to
202 * changes in the remote domain.
203 */
204struct tb_xdomain {
205 struct device dev;
206 struct tb *tb;
207 uuid_t *remote_uuid;
208 const uuid_t *local_uuid;
209 u64 route;
210 u16 vendor;
211 u16 device;
212 struct mutex lock;
213 const char *vendor_name;
214 const char *device_name;
215 bool is_unplugged;
216 bool resume;
217 u16 transmit_path;
218 u16 transmit_ring;
219 u16 receive_path;
220 u16 receive_ring;
221 struct ida service_ids;
222 struct tb_property_dir *properties;
223 u32 property_block_gen;
224 struct delayed_work get_properties_work;
225 int properties_retries;
226 struct delayed_work properties_changed_work;
227 int properties_changed_retries;
228 u8 link;
229 u8 depth;
230};
231
232int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path,
233 u16 transmit_ring, u16 receive_path,
234 u16 receive_ring);
235int tb_xdomain_disable_paths(struct tb_xdomain *xd);
236struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid);
237
238static inline struct tb_xdomain *
239tb_xdomain_find_by_uuid_locked(struct tb *tb, const uuid_t *uuid)
240{
241 struct tb_xdomain *xd;
242
243 mutex_lock(&tb->lock);
244 xd = tb_xdomain_find_by_uuid(tb, uuid);
245 mutex_unlock(&tb->lock);
246
247 return xd;
248}
249
250static inline struct tb_xdomain *tb_xdomain_get(struct tb_xdomain *xd)
251{
252 if (xd)
253 get_device(&xd->dev);
254 return xd;
255}
256
257static inline void tb_xdomain_put(struct tb_xdomain *xd)
258{
259 if (xd)
260 put_device(&xd->dev);
261}
262
263static inline bool tb_is_xdomain(const struct device *dev)
264{
265 return dev->type == &tb_xdomain_type;
266}
267
268static inline struct tb_xdomain *tb_to_xdomain(struct device *dev)
269{
270 if (tb_is_xdomain(dev))
271 return container_of(dev, struct tb_xdomain, dev);
272 return NULL;
273}
274
275int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
276 size_t size, enum tb_cfg_pkg_type type);
277int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
278 size_t request_size, enum tb_cfg_pkg_type request_type,
279 void *response, size_t response_size,
280 enum tb_cfg_pkg_type response_type,
281 unsigned int timeout_msec);
282
283/**
284 * tb_protocol_handler - Protocol specific handler
285 * @uuid: XDomain messages with this UUID are dispatched to this handler
286 * @callback: Callback called with the XDomain message. Returning %1
287 * here tells the XDomain core that the message was handled
288 * by this handler and should not be forwared to other
289 * handlers.
290 * @data: Data passed with the callback
291 * @list: Handlers are linked using this
292 *
293 * Thunderbolt services can hook into incoming XDomain requests by
294 * registering protocol handler. Only limitation is that the XDomain
295 * discovery protocol UUID cannot be registered since it is handled by
296 * the core XDomain code.
297 *
298 * The @callback must check that the message is really directed to the
299 * service the driver implements.
300 */
301struct tb_protocol_handler {
302 const uuid_t *uuid;
303 int (*callback)(const void *buf, size_t size, void *data);
304 void *data;
305 struct list_head list;
306};
307
308int tb_register_protocol_handler(struct tb_protocol_handler *handler);
309void tb_unregister_protocol_handler(struct tb_protocol_handler *handler);
310
311/**
312 * struct tb_service - Thunderbolt service
313 * @dev: XDomain device
314 * @id: ID of the service (shown in sysfs)
315 * @key: Protocol key from the properties directory
316 * @prtcid: Protocol ID from the properties directory
317 * @prtcvers: Protocol version from the properties directory
318 * @prtcrevs: Protocol software revision from the properties directory
319 * @prtcstns: Protocol settings mask from the properties directory
320 *
321 * Each domain exposes set of services it supports as collection of
322 * properties. For each service there will be one corresponding
323 * &struct tb_service. Service drivers are bound to these.
324 */
325struct tb_service {
326 struct device dev;
327 int id;
328 const char *key;
329 u32 prtcid;
330 u32 prtcvers;
331 u32 prtcrevs;
332 u32 prtcstns;
333};
334
335static inline struct tb_service *tb_service_get(struct tb_service *svc)
336{
337 if (svc)
338 get_device(&svc->dev);
339 return svc;
340}
341
342static inline void tb_service_put(struct tb_service *svc)
343{
344 if (svc)
345 put_device(&svc->dev);
346}
347
348static inline bool tb_is_service(const struct device *dev)
349{
350 return dev->type == &tb_service_type;
351}
352
353static inline struct tb_service *tb_to_service(struct device *dev)
354{
355 if (tb_is_service(dev))
356 return container_of(dev, struct tb_service, dev);
357 return NULL;
358}
359
360/**
361 * tb_service_driver - Thunderbolt service driver
362 * @driver: Driver structure
363 * @probe: Called when the driver is probed
364 * @remove: Called when the driver is removed (optional)
365 * @shutdown: Called at shutdown time to stop the service (optional)
366 * @id_table: Table of service identifiers the driver supports
367 */
368struct tb_service_driver {
369 struct device_driver driver;
370 int (*probe)(struct tb_service *svc, const struct tb_service_id *id);
371 void (*remove)(struct tb_service *svc);
372 void (*shutdown)(struct tb_service *svc);
373 const struct tb_service_id *id_table;
374};
375
376#define TB_SERVICE(key, id) \
377 .match_flags = TBSVC_MATCH_PROTOCOL_KEY | \
378 TBSVC_MATCH_PROTOCOL_ID, \
379 .protocol_key = (key), \
380 .protocol_id = (id)
381
382int tb_register_service_driver(struct tb_service_driver *drv);
383void tb_unregister_service_driver(struct tb_service_driver *drv);
384
385static inline void *tb_service_get_drvdata(const struct tb_service *svc)
386{
387 return dev_get_drvdata(&svc->dev);
388}
389
390static inline void tb_service_set_drvdata(struct tb_service *svc, void *data)
391{
392 dev_set_drvdata(&svc->dev, data);
393}
394
395static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
396{
397 return tb_to_xdomain(svc->dev.parent);
398}
399
158#endif /* THUNDERBOLT_H_ */ 400#endif /* THUNDERBOLT_H_ */
diff --git a/scripts/mod/devicetable-offsets.c b/scripts/mod/devicetable-offsets.c
index e4d90e50f6fe..57263f2f8f2f 100644
--- a/scripts/mod/devicetable-offsets.c
+++ b/scripts/mod/devicetable-offsets.c
@@ -206,5 +206,12 @@ int main(void)
206 DEVID_FIELD(fsl_mc_device_id, vendor); 206 DEVID_FIELD(fsl_mc_device_id, vendor);
207 DEVID_FIELD(fsl_mc_device_id, obj_type); 207 DEVID_FIELD(fsl_mc_device_id, obj_type);
208 208
209 DEVID(tb_service_id);
210 DEVID_FIELD(tb_service_id, match_flags);
211 DEVID_FIELD(tb_service_id, protocol_key);
212 DEVID_FIELD(tb_service_id, protocol_id);
213 DEVID_FIELD(tb_service_id, protocol_version);
214 DEVID_FIELD(tb_service_id, protocol_revision);
215
209 return 0; 216 return 0;
210} 217}
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 29d6699d5a06..6ef6e63f96fd 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1301,6 +1301,31 @@ static int do_fsl_mc_entry(const char *filename, void *symval,
1301} 1301}
1302ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry); 1302ADD_TO_DEVTABLE("fslmc", fsl_mc_device_id, do_fsl_mc_entry);
1303 1303
1304/* Looks like: tbsvc:kSpNvNrN */
1305static int do_tbsvc_entry(const char *filename, void *symval, char *alias)
1306{
1307 DEF_FIELD(symval, tb_service_id, match_flags);
1308 DEF_FIELD_ADDR(symval, tb_service_id, protocol_key);
1309 DEF_FIELD(symval, tb_service_id, protocol_id);
1310 DEF_FIELD(symval, tb_service_id, protocol_version);
1311 DEF_FIELD(symval, tb_service_id, protocol_revision);
1312
1313 strcpy(alias, "tbsvc:");
1314 if (match_flags & TBSVC_MATCH_PROTOCOL_KEY)
1315 sprintf(alias + strlen(alias), "k%s", *protocol_key);
1316 else
1317 strcat(alias + strlen(alias), "k*");
1318 ADD(alias, "p", match_flags & TBSVC_MATCH_PROTOCOL_ID, protocol_id);
1319 ADD(alias, "v", match_flags & TBSVC_MATCH_PROTOCOL_VERSION,
1320 protocol_version);
1321 ADD(alias, "r", match_flags & TBSVC_MATCH_PROTOCOL_REVISION,
1322 protocol_revision);
1323
1324 add_wildcard(alias);
1325 return 1;
1326}
1327ADD_TO_DEVTABLE("tbsvc", tb_service_id, do_tbsvc_entry);
1328
1304/* Does namelen bytes of name exactly match the symbol? */ 1329/* Does namelen bytes of name exactly match the symbol? */
1305static bool sym_is(const char *name, unsigned namelen, const char *symbol) 1330static bool sym_is(const char *name, unsigned namelen, const char *symbol)
1306{ 1331{