summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt/tb.c
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2018-09-28 09:41:01 -0400
committerMika Westerberg <mika.westerberg@linux.intel.com>2019-04-18 04:18:53 -0400
commit7ea4cd6b2010eecccf37ac3953ac8ecd3688300f (patch)
tree2ad9923d6af59ba375a936cd46928052ead16c6d /drivers/thunderbolt/tb.c
parent444ac3844895c34ab71ffcec1b3199449d3434a4 (diff)
thunderbolt: Add support for XDomain connections
Two domains (hosts) can be connected through a Thunderbolt cable and in that case they can start software services such as networking over the high-speed DMA paths. Now that we have all the basic building blocks in place to create DMA tunnels over the Thunderbolt fabric we can add this support to the software connection manager as well. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Diffstat (limited to 'drivers/thunderbolt/tb.c')
-rw-r--r--drivers/thunderbolt/tb.c167
1 files changed, 163 insertions, 4 deletions
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index c5e82c4dcb64..e39fc1e35e6b 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -101,6 +101,28 @@ static void tb_discover_tunnels(struct tb_switch *sw)
101 } 101 }
102} 102}
103 103
104static void tb_scan_xdomain(struct tb_port *port)
105{
106 struct tb_switch *sw = port->sw;
107 struct tb *tb = sw->tb;
108 struct tb_xdomain *xd;
109 u64 route;
110
111 route = tb_downstream_route(port);
112 xd = tb_xdomain_find_by_route(tb, route);
113 if (xd) {
114 tb_xdomain_put(xd);
115 return;
116 }
117
118 xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
119 NULL);
120 if (xd) {
121 tb_port_at(route, sw)->xdomain = xd;
122 tb_xdomain_add(xd);
123 }
124}
125
104static void tb_scan_port(struct tb_port *port); 126static void tb_scan_port(struct tb_port *port);
105 127
106/** 128/**
@@ -143,13 +165,21 @@ static void tb_scan_port(struct tb_port *port)
143 if (tb_wait_for_port(port, false) <= 0) 165 if (tb_wait_for_port(port, false) <= 0)
144 return; 166 return;
145 if (port->remote) { 167 if (port->remote) {
146 tb_port_WARN(port, "port already has a remote!\n"); 168 tb_port_dbg(port, "port already has a remote\n");
147 return; 169 return;
148 } 170 }
149 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, 171 sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
150 tb_downstream_route(port)); 172 tb_downstream_route(port));
151 if (IS_ERR(sw)) 173 if (IS_ERR(sw)) {
174 /*
175 * If there is an error accessing the connected switch
176 * it may be connected to another domain. Also we allow
177 * the other domain to be connected to a max depth switch.
178 */
179 if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
180 tb_scan_xdomain(port);
152 return; 181 return;
182 }
153 183
154 if (tb_switch_configure(sw)) { 184 if (tb_switch_configure(sw)) {
155 tb_switch_put(sw); 185 tb_switch_put(sw);
@@ -157,6 +187,15 @@ static void tb_scan_port(struct tb_port *port)
157 } 187 }
158 188
159 /* 189 /*
190 * If there was previously another domain connected remove it
191 * first.
192 */
193 if (port->xdomain) {
194 tb_xdomain_remove(port->xdomain);
195 port->xdomain = NULL;
196 }
197
198 /*
160 * Do not send uevents until we have discovered all existing 199 * Do not send uevents until we have discovered all existing
161 * tunnels and know which switches were authorized already by 200 * tunnels and know which switches were authorized already by
162 * the boot firmware. 201 * the boot firmware.
@@ -393,6 +432,65 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
393 return 0; 432 return 0;
394} 433}
395 434
435static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
436{
437 struct tb_cm *tcm = tb_priv(tb);
438 struct tb_port *nhi_port, *dst_port;
439 struct tb_tunnel *tunnel;
440 struct tb_switch *sw;
441
442 sw = tb_to_switch(xd->dev.parent);
443 dst_port = tb_port_at(xd->route, sw);
444 nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI);
445
446 mutex_lock(&tb->lock);
447 tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring,
448 xd->transmit_path, xd->receive_ring,
449 xd->receive_path);
450 if (!tunnel) {
451 mutex_unlock(&tb->lock);
452 return -ENOMEM;
453 }
454
455 if (tb_tunnel_activate(tunnel)) {
456 tb_port_info(nhi_port,
457 "DMA tunnel activation failed, aborting\n");
458 tb_tunnel_free(tunnel);
459 mutex_unlock(&tb->lock);
460 return -EIO;
461 }
462
463 list_add_tail(&tunnel->list, &tcm->tunnel_list);
464 mutex_unlock(&tb->lock);
465 return 0;
466}
467
468static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
469{
470 struct tb_port *dst_port;
471 struct tb_switch *sw;
472
473 sw = tb_to_switch(xd->dev.parent);
474 dst_port = tb_port_at(xd->route, sw);
475
476 /*
477 * It is possible that the tunnel was already teared down (in
478 * case of cable disconnect) so it is fine if we cannot find it
479 * here anymore.
480 */
481 tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port);
482}
483
484static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd)
485{
486 if (!xd->is_unplugged) {
487 mutex_lock(&tb->lock);
488 __tb_disconnect_xdomain_paths(tb, xd);
489 mutex_unlock(&tb->lock);
490 }
491 return 0;
492}
493
396/* hotplug handling */ 494/* hotplug handling */
397 495
398/** 496/**
@@ -432,13 +530,29 @@ static void tb_handle_hotplug(struct work_struct *work)
432 } 530 }
433 if (ev->unplug) { 531 if (ev->unplug) {
434 if (tb_port_has_remote(port)) { 532 if (tb_port_has_remote(port)) {
435 tb_port_info(port, "unplugged\n"); 533 tb_port_dbg(port, "switch unplugged\n");
436 tb_sw_set_unplugged(port->remote->sw); 534 tb_sw_set_unplugged(port->remote->sw);
437 tb_free_invalid_tunnels(tb); 535 tb_free_invalid_tunnels(tb);
438 tb_switch_remove(port->remote->sw); 536 tb_switch_remove(port->remote->sw);
439 port->remote = NULL; 537 port->remote = NULL;
440 if (port->dual_link_port) 538 if (port->dual_link_port)
441 port->dual_link_port->remote = NULL; 539 port->dual_link_port->remote = NULL;
540 } else if (port->xdomain) {
541 struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
542
543 tb_port_dbg(port, "xdomain unplugged\n");
544 /*
545 * Service drivers are unbound during
546 * tb_xdomain_remove() so setting XDomain as
547 * unplugged here prevents deadlock if they call
548 * tb_xdomain_disable_paths(). We will tear down
549 * the path below.
550 */
551 xd->is_unplugged = true;
552 tb_xdomain_remove(xd);
553 port->xdomain = NULL;
554 __tb_disconnect_xdomain_paths(tb, xd);
555 tb_xdomain_put(xd);
442 } else if (tb_port_is_dpout(port)) { 556 } else if (tb_port_is_dpout(port)) {
443 tb_teardown_dp(tb, port); 557 tb_teardown_dp(tb, port);
444 } else { 558 } else {
@@ -500,8 +614,16 @@ static void tb_stop(struct tb *tb)
500 struct tb_tunnel *n; 614 struct tb_tunnel *n;
501 615
502 /* tunnels are only present after everything has been initialized */ 616 /* tunnels are only present after everything has been initialized */
503 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) 617 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
618 /*
619 * DMA tunnels require the driver to be functional so we
620 * tear them down. Other protocol tunnels can be left
621 * intact.
622 */
623 if (tb_tunnel_is_dma(tunnel))
624 tb_tunnel_deactivate(tunnel);
504 tb_tunnel_free(tunnel); 625 tb_tunnel_free(tunnel);
626 }
505 tb_switch_remove(tb->root_switch); 627 tb_switch_remove(tb->root_switch);
506 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ 628 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
507} 629}
@@ -611,13 +733,50 @@ static int tb_resume_noirq(struct tb *tb)
611 return 0; 733 return 0;
612} 734}
613 735
736static int tb_free_unplugged_xdomains(struct tb_switch *sw)
737{
738 int i, ret = 0;
739
740 for (i = 1; i <= sw->config.max_port_number; i++) {
741 struct tb_port *port = &sw->ports[i];
742
743 if (tb_is_upstream_port(port))
744 continue;
745 if (port->xdomain && port->xdomain->is_unplugged) {
746 tb_xdomain_remove(port->xdomain);
747 port->xdomain = NULL;
748 ret++;
749 } else if (port->remote) {
750 ret += tb_free_unplugged_xdomains(port->remote->sw);
751 }
752 }
753
754 return ret;
755}
756
757static void tb_complete(struct tb *tb)
758{
759 /*
760 * Release any unplugged XDomains and if there is a case where
761 * another domain is swapped in place of unplugged XDomain we
762 * need to run another rescan.
763 */
764 mutex_lock(&tb->lock);
765 if (tb_free_unplugged_xdomains(tb->root_switch))
766 tb_scan_switch(tb->root_switch);
767 mutex_unlock(&tb->lock);
768}
769
614static const struct tb_cm_ops tb_cm_ops = { 770static const struct tb_cm_ops tb_cm_ops = {
615 .start = tb_start, 771 .start = tb_start,
616 .stop = tb_stop, 772 .stop = tb_stop,
617 .suspend_noirq = tb_suspend_noirq, 773 .suspend_noirq = tb_suspend_noirq,
618 .resume_noirq = tb_resume_noirq, 774 .resume_noirq = tb_resume_noirq,
775 .complete = tb_complete,
619 .handle_event = tb_handle_event, 776 .handle_event = tb_handle_event,
620 .approve_switch = tb_tunnel_pci, 777 .approve_switch = tb_tunnel_pci,
778 .approve_xdomain_paths = tb_approve_xdomain_paths,
779 .disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
621}; 780};
622 781
623struct tb *tb_probe(struct tb_nhi *nhi) 782struct tb *tb_probe(struct tb_nhi *nhi)