summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2017-06-06 08:25:00 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-06-09 05:42:41 -0400
commit9d3cce0b613689ee849a505ffac179af0ae9fff2 (patch)
treeabe7bda188831abb170c39ebfc4d0765c77f6b02
parentc9843ebbb83a120094aa3a55bc0190d285e8384a (diff)
thunderbolt: Introduce thunderbolt bus and connection manager
Thunderbolt fabric consists of one or more switches. This fabric is called domain and it is controlled by an entity called connection manager. The connection manager can be either internal (driven by a firmware running on the host controller) or external (software driver). This driver currently implements support for the latter. In order to manage switches and their properties more easily we model this domain structure as a Linux bus. Each host controller adds a domain device to this bus, and these devices are named as domainN where N stands for index or id of the current domain. We then abstract connection manager specific operations into a new structure tb_cm_ops and convert the existing tb.c to fill those accordingly. This makes it easier to add support for the internal connection manager in subsequent patches. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com> Reviewed-by: Michael Jamet <michael.jamet@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Andreas Noever <andreas.noever@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/domain.c230
-rw-r--r--drivers/thunderbolt/nhi.c31
-rw-r--r--drivers/thunderbolt/tb.c156
-rw-r--r--drivers/thunderbolt/tb.h70
-rw-r--r--drivers/thunderbolt/tunnel_pci.c9
6 files changed, 377 insertions, 121 deletions
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index 5d1053cdfa54..e276a9a62261 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o 1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o 2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
3 3thunderbolt-objs += domain.o
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
new file mode 100644
index 000000000000..3302f4c59638
--- /dev/null
+++ b/drivers/thunderbolt/domain.c
@@ -0,0 +1,230 @@
1/*
2 * Thunderbolt bus support
3 *
4 * Copyright (C) 2017, Intel Corporation
5 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/device.h>
13#include <linux/idr.h>
14#include <linux/module.h>
15#include <linux/slab.h>
16
17#include "tb.h"
18
19static DEFINE_IDA(tb_domain_ida);
20
21struct bus_type tb_bus_type = {
22 .name = "thunderbolt",
23};
24
25static void tb_domain_release(struct device *dev)
26{
27 struct tb *tb = container_of(dev, struct tb, dev);
28
29 tb_ctl_free(tb->ctl);
30 destroy_workqueue(tb->wq);
31 ida_simple_remove(&tb_domain_ida, tb->index);
32 mutex_destroy(&tb->lock);
33 kfree(tb);
34}
35
36struct device_type tb_domain_type = {
37 .name = "thunderbolt_domain",
38 .release = tb_domain_release,
39};
40
41/**
42 * tb_domain_alloc() - Allocate a domain
43 * @nhi: Pointer to the host controller
44 * @privsize: Size of the connection manager private data
45 *
46 * Allocates and initializes a new Thunderbolt domain. Connection
47 * managers are expected to call this and then fill in @cm_ops
48 * accordingly.
49 *
50 * Call tb_domain_put() to release the domain before it has been added
51 * to the system.
52 *
53 * Return: allocated domain structure on %NULL in case of error
54 */
55struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize)
56{
57 struct tb *tb;
58
59 /*
60 * Make sure the structure sizes map with that the hardware
61 * expects because bit-fields are being used.
62 */
63 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
64 BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
65 BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
66
67 tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
68 if (!tb)
69 return NULL;
70
71 tb->nhi = nhi;
72 mutex_init(&tb->lock);
73
74 tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
75 if (tb->index < 0)
76 goto err_free;
77
78 tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
79 if (!tb->wq)
80 goto err_remove_ida;
81
82 tb->dev.parent = &nhi->pdev->dev;
83 tb->dev.bus = &tb_bus_type;
84 tb->dev.type = &tb_domain_type;
85 dev_set_name(&tb->dev, "domain%d", tb->index);
86 device_initialize(&tb->dev);
87
88 return tb;
89
90err_remove_ida:
91 ida_simple_remove(&tb_domain_ida, tb->index);
92err_free:
93 kfree(tb);
94
95 return NULL;
96}
97
98/**
99 * tb_domain_add() - Add domain to the system
100 * @tb: Domain to add
101 *
102 * Starts the domain and adds it to the system. Hotplugging devices will
103 * work after this has been returned successfully. In order to remove
104 * and release the domain after this function has been called, call
105 * tb_domain_remove().
106 *
107 * Return: %0 in case of success and negative errno in case of error
108 */
109int tb_domain_add(struct tb *tb)
110{
111 int ret;
112
113 if (WARN_ON(!tb->cm_ops))
114 return -EINVAL;
115
116 mutex_lock(&tb->lock);
117
118 tb->ctl = tb_ctl_alloc(tb->nhi, tb->cm_ops->hotplug, tb);
119 if (!tb->ctl) {
120 ret = -ENOMEM;
121 goto err_unlock;
122 }
123
124 /*
125 * tb_schedule_hotplug_handler may be called as soon as the config
126 * channel is started. Thats why we have to hold the lock here.
127 */
128 tb_ctl_start(tb->ctl);
129
130 ret = device_add(&tb->dev);
131 if (ret)
132 goto err_ctl_stop;
133
134 /* Start the domain */
135 if (tb->cm_ops->start) {
136 ret = tb->cm_ops->start(tb);
137 if (ret)
138 goto err_domain_del;
139 }
140
141 /* This starts event processing */
142 mutex_unlock(&tb->lock);
143
144 return 0;
145
146err_domain_del:
147 device_del(&tb->dev);
148err_ctl_stop:
149 tb_ctl_stop(tb->ctl);
150err_unlock:
151 mutex_unlock(&tb->lock);
152
153 return ret;
154}
155
156/**
157 * tb_domain_remove() - Removes and releases a domain
158 * @tb: Domain to remove
159 *
160 * Stops the domain, removes it from the system and releases all
161 * resources once the last reference has been released.
162 */
163void tb_domain_remove(struct tb *tb)
164{
165 mutex_lock(&tb->lock);
166 if (tb->cm_ops->stop)
167 tb->cm_ops->stop(tb);
168 /* Stop the domain control traffic */
169 tb_ctl_stop(tb->ctl);
170 mutex_unlock(&tb->lock);
171
172 flush_workqueue(tb->wq);
173 device_unregister(&tb->dev);
174}
175
176/**
177 * tb_domain_suspend_noirq() - Suspend a domain
178 * @tb: Domain to suspend
179 *
180 * Suspends all devices in the domain and stops the control channel.
181 */
182int tb_domain_suspend_noirq(struct tb *tb)
183{
184 int ret = 0;
185
186 /*
187 * The control channel interrupt is left enabled during suspend
188 * and taking the lock here prevents any events happening before
189 * we actually have stopped the domain and the control channel.
190 */
191 mutex_lock(&tb->lock);
192 if (tb->cm_ops->suspend_noirq)
193 ret = tb->cm_ops->suspend_noirq(tb);
194 if (!ret)
195 tb_ctl_stop(tb->ctl);
196 mutex_unlock(&tb->lock);
197
198 return ret;
199}
200
201/**
202 * tb_domain_resume_noirq() - Resume a domain
203 * @tb: Domain to resume
204 *
205 * Re-starts the control channel, and resumes all devices connected to
206 * the domain.
207 */
208int tb_domain_resume_noirq(struct tb *tb)
209{
210 int ret = 0;
211
212 mutex_lock(&tb->lock);
213 tb_ctl_start(tb->ctl);
214 if (tb->cm_ops->resume_noirq)
215 ret = tb->cm_ops->resume_noirq(tb);
216 mutex_unlock(&tb->lock);
217
218 return ret;
219}
220
221int tb_domain_init(void)
222{
223 return bus_register(&tb_bus_type);
224}
225
226void tb_domain_exit(void)
227{
228 bus_unregister(&tb_bus_type);
229 ida_destroy(&tb_domain_ida);
230}
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index ed75c49748f5..c1113a3c4128 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -586,16 +586,16 @@ static int nhi_suspend_noirq(struct device *dev)
586{ 586{
587 struct pci_dev *pdev = to_pci_dev(dev); 587 struct pci_dev *pdev = to_pci_dev(dev);
588 struct tb *tb = pci_get_drvdata(pdev); 588 struct tb *tb = pci_get_drvdata(pdev);
589 thunderbolt_suspend(tb); 589
590 return 0; 590 return tb_domain_suspend_noirq(tb);
591} 591}
592 592
593static int nhi_resume_noirq(struct device *dev) 593static int nhi_resume_noirq(struct device *dev)
594{ 594{
595 struct pci_dev *pdev = to_pci_dev(dev); 595 struct pci_dev *pdev = to_pci_dev(dev);
596 struct tb *tb = pci_get_drvdata(pdev); 596 struct tb *tb = pci_get_drvdata(pdev);
597 thunderbolt_resume(tb); 597
598 return 0; 598 return tb_domain_resume_noirq(tb);
599} 599}
600 600
601static void nhi_shutdown(struct tb_nhi *nhi) 601static void nhi_shutdown(struct tb_nhi *nhi)
@@ -715,12 +715,17 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
715 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00); 715 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
716 716
717 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); 717 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
718 tb = thunderbolt_alloc_and_start(nhi); 718 tb = tb_probe(nhi);
719 if (!tb) { 719 if (!tb)
720 return -ENODEV;
721
722 res = tb_domain_add(tb);
723 if (res) {
720 /* 724 /*
721 * At this point the RX/TX rings might already have been 725 * At this point the RX/TX rings might already have been
722 * activated. Do a proper shutdown. 726 * activated. Do a proper shutdown.
723 */ 727 */
728 tb_domain_put(tb);
724 nhi_shutdown(nhi); 729 nhi_shutdown(nhi);
725 return -EIO; 730 return -EIO;
726 } 731 }
@@ -733,7 +738,8 @@ static void nhi_remove(struct pci_dev *pdev)
733{ 738{
734 struct tb *tb = pci_get_drvdata(pdev); 739 struct tb *tb = pci_get_drvdata(pdev);
735 struct tb_nhi *nhi = tb->nhi; 740 struct tb_nhi *nhi = tb->nhi;
736 thunderbolt_shutdown_and_free(tb); 741
742 tb_domain_remove(tb);
737 nhi_shutdown(nhi); 743 nhi_shutdown(nhi);
738} 744}
739 745
@@ -797,14 +803,23 @@ static struct pci_driver nhi_driver = {
797 803
798static int __init nhi_init(void) 804static int __init nhi_init(void)
799{ 805{
806 int ret;
807
800 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) 808 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
801 return -ENOSYS; 809 return -ENOSYS;
802 return pci_register_driver(&nhi_driver); 810 ret = tb_domain_init();
811 if (ret)
812 return ret;
813 ret = pci_register_driver(&nhi_driver);
814 if (ret)
815 tb_domain_exit();
816 return ret;
803} 817}
804 818
805static void __exit nhi_unload(void) 819static void __exit nhi_unload(void)
806{ 820{
807 pci_unregister_driver(&nhi_driver); 821 pci_unregister_driver(&nhi_driver);
822 tb_domain_exit();
808} 823}
809 824
810module_init(nhi_init); 825module_init(nhi_init);
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 6b44076e1380..9f00a0f28d53 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -12,6 +12,18 @@
12#include "tb_regs.h" 12#include "tb_regs.h"
13#include "tunnel_pci.h" 13#include "tunnel_pci.h"
14 14
15/**
16 * struct tb_cm - Simple Thunderbolt connection manager
17 * @tunnel_list: List of active tunnels
18 * @hotplug_active: tb_handle_hotplug will stop progressing plug
19 * events and exit if this is not set (it needs to
20 * acquire the lock one more time). Used to drain wq
21 * after cfg has been paused.
22 */
23struct tb_cm {
24 struct list_head tunnel_list;
25 bool hotplug_active;
26};
15 27
16/* enumeration & hot plug handling */ 28/* enumeration & hot plug handling */
17 29
@@ -62,12 +74,14 @@ static void tb_scan_port(struct tb_port *port)
62 */ 74 */
63static void tb_free_invalid_tunnels(struct tb *tb) 75static void tb_free_invalid_tunnels(struct tb *tb)
64{ 76{
77 struct tb_cm *tcm = tb_priv(tb);
65 struct tb_pci_tunnel *tunnel; 78 struct tb_pci_tunnel *tunnel;
66 struct tb_pci_tunnel *n; 79 struct tb_pci_tunnel *n;
67 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) 80
68 { 81 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
69 if (tb_pci_is_invalid(tunnel)) { 82 if (tb_pci_is_invalid(tunnel)) {
70 tb_pci_deactivate(tunnel); 83 tb_pci_deactivate(tunnel);
84 list_del(&tunnel->list);
71 tb_pci_free(tunnel); 85 tb_pci_free(tunnel);
72 } 86 }
73 } 87 }
@@ -149,6 +163,8 @@ static void tb_activate_pcie_devices(struct tb *tb)
149 struct tb_port *up_port; 163 struct tb_port *up_port;
150 struct tb_port *down_port; 164 struct tb_port *down_port;
151 struct tb_pci_tunnel *tunnel; 165 struct tb_pci_tunnel *tunnel;
166 struct tb_cm *tcm = tb_priv(tb);
167
152 /* scan for pcie devices at depth 1*/ 168 /* scan for pcie devices at depth 1*/
153 for (i = 1; i <= tb->root_switch->config.max_port_number; i++) { 169 for (i = 1; i <= tb->root_switch->config.max_port_number; i++) {
154 if (tb_is_upstream_port(&tb->root_switch->ports[i])) 170 if (tb_is_upstream_port(&tb->root_switch->ports[i]))
@@ -195,6 +211,7 @@ static void tb_activate_pcie_devices(struct tb *tb)
195 tb_pci_free(tunnel); 211 tb_pci_free(tunnel);
196 } 212 }
197 213
214 list_add(&tunnel->list, &tcm->tunnel_list);
198 } 215 }
199} 216}
200 217
@@ -217,10 +234,11 @@ static void tb_handle_hotplug(struct work_struct *work)
217{ 234{
218 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); 235 struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
219 struct tb *tb = ev->tb; 236 struct tb *tb = ev->tb;
237 struct tb_cm *tcm = tb_priv(tb);
220 struct tb_switch *sw; 238 struct tb_switch *sw;
221 struct tb_port *port; 239 struct tb_port *port;
222 mutex_lock(&tb->lock); 240 mutex_lock(&tb->lock);
223 if (!tb->hotplug_active) 241 if (!tcm->hotplug_active)
224 goto out; /* during init, suspend or shutdown */ 242 goto out; /* during init, suspend or shutdown */
225 243
226 sw = get_switch_at_route(tb->root_switch, ev->route); 244 sw = get_switch_at_route(tb->root_switch, ev->route);
@@ -296,22 +314,14 @@ static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port,
296 queue_work(tb->wq, &ev->work); 314 queue_work(tb->wq, &ev->work);
297} 315}
298 316
299/** 317static void tb_stop(struct tb *tb)
300 * thunderbolt_shutdown_and_free() - shutdown everything
301 *
302 * Free all switches and the config channel.
303 *
304 * Used in the error path of thunderbolt_alloc_and_start.
305 */
306void thunderbolt_shutdown_and_free(struct tb *tb)
307{ 318{
319 struct tb_cm *tcm = tb_priv(tb);
308 struct tb_pci_tunnel *tunnel; 320 struct tb_pci_tunnel *tunnel;
309 struct tb_pci_tunnel *n; 321 struct tb_pci_tunnel *n;
310 322
311 mutex_lock(&tb->lock);
312
313 /* tunnels are only present after everything has been initialized */ 323 /* tunnels are only present after everything has been initialized */
314 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) { 324 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
315 tb_pci_deactivate(tunnel); 325 tb_pci_deactivate(tunnel);
316 tb_pci_free(tunnel); 326 tb_pci_free(tunnel);
317 } 327 }
@@ -320,98 +330,44 @@ void thunderbolt_shutdown_and_free(struct tb *tb)
320 tb_switch_free(tb->root_switch); 330 tb_switch_free(tb->root_switch);
321 tb->root_switch = NULL; 331 tb->root_switch = NULL;
322 332
323 if (tb->ctl) { 333 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
324 tb_ctl_stop(tb->ctl);
325 tb_ctl_free(tb->ctl);
326 }
327 tb->ctl = NULL;
328 tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
329
330 /* allow tb_handle_hotplug to acquire the lock */
331 mutex_unlock(&tb->lock);
332 if (tb->wq) {
333 flush_workqueue(tb->wq);
334 destroy_workqueue(tb->wq);
335 tb->wq = NULL;
336 }
337 mutex_destroy(&tb->lock);
338 kfree(tb);
339} 334}
340 335
341/** 336static int tb_start(struct tb *tb)
342 * thunderbolt_alloc_and_start() - setup the thunderbolt bus
343 *
344 * Allocates a tb_cfg control channel, initializes the root switch, enables
345 * plug events and activates pci devices.
346 *
347 * Return: Returns NULL on error.
348 */
349struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi)
350{ 337{
351 struct tb *tb; 338 struct tb_cm *tcm = tb_priv(tb);
352
353 BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
354 BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
355 BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
356
357 tb = kzalloc(sizeof(*tb), GFP_KERNEL);
358 if (!tb)
359 return NULL;
360
361 tb->nhi = nhi;
362 mutex_init(&tb->lock);
363 mutex_lock(&tb->lock);
364 INIT_LIST_HEAD(&tb->tunnel_list);
365
366 tb->wq = alloc_ordered_workqueue("thunderbolt", 0);
367 if (!tb->wq)
368 goto err_locked;
369
370 tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb);
371 if (!tb->ctl)
372 goto err_locked;
373 /*
374 * tb_schedule_hotplug_handler may be called as soon as the config
375 * channel is started. Thats why we have to hold the lock here.
376 */
377 tb_ctl_start(tb->ctl);
378 339
379 tb->root_switch = tb_switch_alloc(tb, 0); 340 tb->root_switch = tb_switch_alloc(tb, 0);
380 if (!tb->root_switch) 341 if (!tb->root_switch)
381 goto err_locked; 342 return -ENOMEM;
382 343
383 /* Full scan to discover devices added before the driver was loaded. */ 344 /* Full scan to discover devices added before the driver was loaded. */
384 tb_scan_switch(tb->root_switch); 345 tb_scan_switch(tb->root_switch);
385 tb_activate_pcie_devices(tb); 346 tb_activate_pcie_devices(tb);
386 347
387 /* Allow tb_handle_hotplug to progress events */ 348 /* Allow tb_handle_hotplug to progress events */
388 tb->hotplug_active = true; 349 tcm->hotplug_active = true;
389 mutex_unlock(&tb->lock); 350 return 0;
390 return tb;
391
392err_locked:
393 mutex_unlock(&tb->lock);
394 thunderbolt_shutdown_and_free(tb);
395 return NULL;
396} 351}
397 352
398void thunderbolt_suspend(struct tb *tb) 353static int tb_suspend_noirq(struct tb *tb)
399{ 354{
355 struct tb_cm *tcm = tb_priv(tb);
356
400 tb_info(tb, "suspending...\n"); 357 tb_info(tb, "suspending...\n");
401 mutex_lock(&tb->lock);
402 tb_switch_suspend(tb->root_switch); 358 tb_switch_suspend(tb->root_switch);
403 tb_ctl_stop(tb->ctl); 359 tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
404 tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */
405 mutex_unlock(&tb->lock);
406 tb_info(tb, "suspend finished\n"); 360 tb_info(tb, "suspend finished\n");
361
362 return 0;
407} 363}
408 364
409void thunderbolt_resume(struct tb *tb) 365static int tb_resume_noirq(struct tb *tb)
410{ 366{
367 struct tb_cm *tcm = tb_priv(tb);
411 struct tb_pci_tunnel *tunnel, *n; 368 struct tb_pci_tunnel *tunnel, *n;
369
412 tb_info(tb, "resuming...\n"); 370 tb_info(tb, "resuming...\n");
413 mutex_lock(&tb->lock);
414 tb_ctl_start(tb->ctl);
415 371
416 /* remove any pci devices the firmware might have setup */ 372 /* remove any pci devices the firmware might have setup */
417 tb_switch_reset(tb, 0); 373 tb_switch_reset(tb, 0);
@@ -419,9 +375,9 @@ void thunderbolt_resume(struct tb *tb)
419 tb_switch_resume(tb->root_switch); 375 tb_switch_resume(tb->root_switch);
420 tb_free_invalid_tunnels(tb); 376 tb_free_invalid_tunnels(tb);
421 tb_free_unplugged_children(tb->root_switch); 377 tb_free_unplugged_children(tb->root_switch);
422 list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) 378 list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
423 tb_pci_restart(tunnel); 379 tb_pci_restart(tunnel);
424 if (!list_empty(&tb->tunnel_list)) { 380 if (!list_empty(&tcm->tunnel_list)) {
425 /* 381 /*
426 * the pcie links need some time to get going. 382 * the pcie links need some time to get going.
427 * 100ms works for me... 383 * 100ms works for me...
@@ -430,7 +386,33 @@ void thunderbolt_resume(struct tb *tb)
430 msleep(100); 386 msleep(100);
431 } 387 }
432 /* Allow tb_handle_hotplug to progress events */ 388 /* Allow tb_handle_hotplug to progress events */
433 tb->hotplug_active = true; 389 tcm->hotplug_active = true;
434 mutex_unlock(&tb->lock);
435 tb_info(tb, "resume finished\n"); 390 tb_info(tb, "resume finished\n");
391
392 return 0;
393}
394
395static const struct tb_cm_ops tb_cm_ops = {
396 .start = tb_start,
397 .stop = tb_stop,
398 .suspend_noirq = tb_suspend_noirq,
399 .resume_noirq = tb_resume_noirq,
400 .hotplug = tb_schedule_hotplug_handler,
401};
402
403struct tb *tb_probe(struct tb_nhi *nhi)
404{
405 struct tb_cm *tcm;
406 struct tb *tb;
407
408 tb = tb_domain_alloc(nhi, sizeof(*tcm));
409 if (!tb)
410 return NULL;
411
412 tb->cm_ops = &tb_cm_ops;
413
414 tcm = tb_priv(tb);
415 INIT_LIST_HEAD(&tcm->tunnel_list);
416
417 return tb;
436} 418}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 0b78bc4fbe61..5fab4c44f124 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -92,29 +92,52 @@ struct tb_path {
92 int path_length; /* number of hops */ 92 int path_length; /* number of hops */
93}; 93};
94 94
95/**
96 * struct tb_cm_ops - Connection manager specific operations vector
97 * @start: Starts the domain
98 * @stop: Stops the domain
99 * @suspend_noirq: Connection manager specific suspend_noirq
100 * @resume_noirq: Connection manager specific resume_noirq
101 * @hotplug: Handle hotplug event
102 */
103struct tb_cm_ops {
104 int (*start)(struct tb *tb);
105 void (*stop)(struct tb *tb);
106 int (*suspend_noirq)(struct tb *tb);
107 int (*resume_noirq)(struct tb *tb);
108 hotplug_cb hotplug;
109};
95 110
96/** 111/**
97 * struct tb - main thunderbolt bus structure 112 * struct tb - main thunderbolt bus structure
113 * @dev: Domain device
114 * @lock: Big lock. Must be held when accessing cfg or any struct
115 * tb_switch / struct tb_port.
116 * @nhi: Pointer to the NHI structure
117 * @ctl: Control channel for this domain
118 * @wq: Ordered workqueue for all domain specific work
119 * @root_switch: Root switch of this domain
120 * @cm_ops: Connection manager specific operations vector
121 * @index: Linux assigned domain number
122 * @privdata: Private connection manager specific data
98 */ 123 */
99struct tb { 124struct tb {
100 struct mutex lock; /* 125 struct device dev;
101 * Big lock. Must be held when accessing cfg or 126 struct mutex lock;
102 * any struct tb_switch / struct tb_port.
103 */
104 struct tb_nhi *nhi; 127 struct tb_nhi *nhi;
105 struct tb_ctl *ctl; 128 struct tb_ctl *ctl;
106 struct workqueue_struct *wq; /* ordered workqueue for plug events */ 129 struct workqueue_struct *wq;
107 struct tb_switch *root_switch; 130 struct tb_switch *root_switch;
108 struct list_head tunnel_list; /* list of active PCIe tunnels */ 131 const struct tb_cm_ops *cm_ops;
109 bool hotplug_active; /* 132 int index;
110 * tb_handle_hotplug will stop progressing plug 133 unsigned long privdata[0];
111 * events and exit if this is not set (it needs to
112 * acquire the lock one more time). Used to drain
113 * wq after cfg has been paused.
114 */
115
116}; 134};
117 135
136static inline void *tb_priv(struct tb *tb)
137{
138 return (void *)tb->privdata;
139}
140
118/* helper functions & macros */ 141/* helper functions & macros */
119 142
120/** 143/**
@@ -215,11 +238,24 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
215#define tb_port_info(port, fmt, arg...) \ 238#define tb_port_info(port, fmt, arg...) \
216 __TB_PORT_PRINT(tb_info, port, fmt, ##arg) 239 __TB_PORT_PRINT(tb_info, port, fmt, ##arg)
217 240
241struct tb *tb_probe(struct tb_nhi *nhi);
242
243extern struct bus_type tb_bus_type;
244extern struct device_type tb_domain_type;
245
246int tb_domain_init(void);
247void tb_domain_exit(void);
218 248
219struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi); 249struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize);
220void thunderbolt_shutdown_and_free(struct tb *tb); 250int tb_domain_add(struct tb *tb);
221void thunderbolt_suspend(struct tb *tb); 251void tb_domain_remove(struct tb *tb);
222void thunderbolt_resume(struct tb *tb); 252int tb_domain_suspend_noirq(struct tb *tb);
253int tb_domain_resume_noirq(struct tb *tb);
254
255static inline void tb_domain_put(struct tb *tb)
256{
257 put_device(&tb->dev);
258}
223 259
224struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route); 260struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route);
225void tb_switch_free(struct tb_switch *sw); 261void tb_switch_free(struct tb_switch *sw);
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c
index f4ce9845e42a..ca4475907d7a 100644
--- a/drivers/thunderbolt/tunnel_pci.c
+++ b/drivers/thunderbolt/tunnel_pci.c
@@ -194,19 +194,13 @@ err:
194 */ 194 */
195int tb_pci_activate(struct tb_pci_tunnel *tunnel) 195int tb_pci_activate(struct tb_pci_tunnel *tunnel)
196{ 196{
197 int res;
198 if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) { 197 if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) {
199 tb_tunnel_WARN(tunnel, 198 tb_tunnel_WARN(tunnel,
200 "trying to activate an already activated tunnel\n"); 199 "trying to activate an already activated tunnel\n");
201 return -EINVAL; 200 return -EINVAL;
202 } 201 }
203 202
204 res = tb_pci_restart(tunnel); 203 return tb_pci_restart(tunnel);
205 if (res)
206 return res;
207
208 list_add(&tunnel->list, &tunnel->tb->tunnel_list);
209 return 0;
210} 204}
211 205
212 206
@@ -227,6 +221,5 @@ void tb_pci_deactivate(struct tb_pci_tunnel *tunnel)
227 tb_path_deactivate(tunnel->path_to_down); 221 tb_path_deactivate(tunnel->path_to_down);
228 if (tunnel->path_to_up->activated) 222 if (tunnel->path_to_up->activated)
229 tb_path_deactivate(tunnel->path_to_up); 223 tb_path_deactivate(tunnel->path_to_up);
230 list_del_init(&tunnel->list);
231} 224}
232 225