summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt/switch.c
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2017-06-06 08:25:17 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-06-09 05:42:43 -0400
commite6b245ccd524441f462f1ca1fe726123dcedeeee (patch)
treee4b37479fce488999f3948c75e6f6c9e71e93440 /drivers/thunderbolt/switch.c
parentf67cf491175a315ca86c9b349708bfed7b1f40c1 (diff)
thunderbolt: Add support for host and device NVM firmware upgrade
Starting from Intel Falcon Ridge the NVM firmware can be upgraded by using DMA configuration based mailbox commands. If we detect that the host or device (device support starts from Intel Alpine Ridge) has the DMA configuration based mailbox we expose NVM information to the userspace as two separate Linux NVMem devices: nvm_active and nvm_non_active. The former is read-only portion of the active NVM which firmware upgrade tools can be use to find out suitable NVM image if the device identification strings are not enough. The latter is write-only portion where the new NVM image is to be written by the userspace. It is up to the userspace to find out right NVM image (the kernel does very minimal validation). The ICM firmware itself authenticates the new NVM firmware and fails the operation if it is not what is expected. We also expose two new sysfs files per each switch: nvm_version and nvm_authenticate which can be used to read the active NVM version and start the upgrade process. We also introduce safe mode which is the mode a switch goes when it does not have properly authenticated firmware. In this mode the switch only accepts a couple of commands including flashing a new NVM firmware image and triggering power cycle. This code is based on the work done by Amir Levy and Michael Jamet. Signed-off-by: Michael Jamet <michael.jamet@intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Andreas Noever <andreas.noever@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/thunderbolt/switch.c')
-rw-r--r--drivers/thunderbolt/switch.c603
1 files changed, 582 insertions, 21 deletions
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 1524edf42ee8..ab3e8f410444 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -5,13 +5,395 @@
5 */ 5 */
6 6
7#include <linux/delay.h> 7#include <linux/delay.h>
8#include <linux/idr.h>
9#include <linux/nvmem-provider.h>
10#include <linux/sizes.h>
8#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/vmalloc.h>
9 13
10#include "tb.h" 14#include "tb.h"
11 15
12/* Switch authorization from userspace is serialized by this lock */ 16/* Switch authorization from userspace is serialized by this lock */
13static DEFINE_MUTEX(switch_lock); 17static DEFINE_MUTEX(switch_lock);
14 18
19/* Switch NVM support */
20
21#define NVM_DEVID 0x05
22#define NVM_VERSION 0x08
23#define NVM_CSS 0x10
24#define NVM_FLASH_SIZE 0x45
25
26#define NVM_MIN_SIZE SZ_32K
27#define NVM_MAX_SIZE SZ_512K
28
29static DEFINE_IDA(nvm_ida);
30
31struct nvm_auth_status {
32 struct list_head list;
33 uuid_be uuid;
34 u32 status;
35};
36
37/*
38 * Hold NVM authentication failure status per switch This information
39 * needs to stay around even when the switch gets power cycled so we
40 * keep it separately.
41 */
42static LIST_HEAD(nvm_auth_status_cache);
43static DEFINE_MUTEX(nvm_auth_status_lock);
44
45static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
46{
47 struct nvm_auth_status *st;
48
49 list_for_each_entry(st, &nvm_auth_status_cache, list) {
50 if (!uuid_be_cmp(st->uuid, *sw->uuid))
51 return st;
52 }
53
54 return NULL;
55}
56
57static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
58{
59 struct nvm_auth_status *st;
60
61 mutex_lock(&nvm_auth_status_lock);
62 st = __nvm_get_auth_status(sw);
63 mutex_unlock(&nvm_auth_status_lock);
64
65 *status = st ? st->status : 0;
66}
67
68static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
69{
70 struct nvm_auth_status *st;
71
72 if (WARN_ON(!sw->uuid))
73 return;
74
75 mutex_lock(&nvm_auth_status_lock);
76 st = __nvm_get_auth_status(sw);
77
78 if (!st) {
79 st = kzalloc(sizeof(*st), GFP_KERNEL);
80 if (!st)
81 goto unlock;
82
83 memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
84 INIT_LIST_HEAD(&st->list);
85 list_add_tail(&st->list, &nvm_auth_status_cache);
86 }
87
88 st->status = status;
89unlock:
90 mutex_unlock(&nvm_auth_status_lock);
91}
92
93static void nvm_clear_auth_status(const struct tb_switch *sw)
94{
95 struct nvm_auth_status *st;
96
97 mutex_lock(&nvm_auth_status_lock);
98 st = __nvm_get_auth_status(sw);
99 if (st) {
100 list_del(&st->list);
101 kfree(st);
102 }
103 mutex_unlock(&nvm_auth_status_lock);
104}
105
106static int nvm_validate_and_write(struct tb_switch *sw)
107{
108 unsigned int image_size, hdr_size;
109 const u8 *buf = sw->nvm->buf;
110 u16 ds_size;
111 int ret;
112
113 if (!buf)
114 return -EINVAL;
115
116 image_size = sw->nvm->buf_data_size;
117 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
118 return -EINVAL;
119
120 /*
121 * FARB pointer must point inside the image and must at least
122 * contain parts of the digital section we will be reading here.
123 */
124 hdr_size = (*(u32 *)buf) & 0xffffff;
125 if (hdr_size + NVM_DEVID + 2 >= image_size)
126 return -EINVAL;
127
128 /* Digital section start should be aligned to 4k page */
129 if (!IS_ALIGNED(hdr_size, SZ_4K))
130 return -EINVAL;
131
132 /*
133 * Read digital section size and check that it also fits inside
134 * the image.
135 */
136 ds_size = *(u16 *)(buf + hdr_size);
137 if (ds_size >= image_size)
138 return -EINVAL;
139
140 if (!sw->safe_mode) {
141 u16 device_id;
142
143 /*
144 * Make sure the device ID in the image matches the one
145 * we read from the switch config space.
146 */
147 device_id = *(u16 *)(buf + hdr_size + NVM_DEVID);
148 if (device_id != sw->config.device_id)
149 return -EINVAL;
150
151 if (sw->generation < 3) {
152 /* Write CSS headers first */
153 ret = dma_port_flash_write(sw->dma_port,
154 DMA_PORT_CSS_ADDRESS, buf + NVM_CSS,
155 DMA_PORT_CSS_MAX_SIZE);
156 if (ret)
157 return ret;
158 }
159
160 /* Skip headers in the image */
161 buf += hdr_size;
162 image_size -= hdr_size;
163 }
164
165 return dma_port_flash_write(sw->dma_port, 0, buf, image_size);
166}
167
168static int nvm_authenticate_host(struct tb_switch *sw)
169{
170 int ret;
171
172 /*
173 * Root switch NVM upgrade requires that we disconnect the
174 * existing PCIe paths first (in case it is not in safe mode
175 * already).
176 */
177 if (!sw->safe_mode) {
178 ret = tb_domain_disconnect_pcie_paths(sw->tb);
179 if (ret)
180 return ret;
181 /*
182 * The host controller goes away pretty soon after this if
183 * everything goes well so getting timeout is expected.
184 */
185 ret = dma_port_flash_update_auth(sw->dma_port);
186 return ret == -ETIMEDOUT ? 0 : ret;
187 }
188
189 /*
190 * From safe mode we can get out by just power cycling the
191 * switch.
192 */
193 dma_port_power_cycle(sw->dma_port);
194 return 0;
195}
196
197static int nvm_authenticate_device(struct tb_switch *sw)
198{
199 int ret, retries = 10;
200
201 ret = dma_port_flash_update_auth(sw->dma_port);
202 if (ret && ret != -ETIMEDOUT)
203 return ret;
204
205 /*
206 * Poll here for the authentication status. It takes some time
207 * for the device to respond (we get timeout for a while). Once
208 * we get response the device needs to be power cycled in order
209 * to the new NVM to be taken into use.
210 */
211 do {
212 u32 status;
213
214 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
215 if (ret < 0 && ret != -ETIMEDOUT)
216 return ret;
217 if (ret > 0) {
218 if (status) {
219 tb_sw_warn(sw, "failed to authenticate NVM\n");
220 nvm_set_auth_status(sw, status);
221 }
222
223 tb_sw_info(sw, "power cycling the switch now\n");
224 dma_port_power_cycle(sw->dma_port);
225 return 0;
226 }
227
228 msleep(500);
229 } while (--retries);
230
231 return -ETIMEDOUT;
232}
233
234static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val,
235 size_t bytes)
236{
237 struct tb_switch *sw = priv;
238
239 return dma_port_flash_read(sw->dma_port, offset, val, bytes);
240}
241
242static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
243 size_t bytes)
244{
245 struct tb_switch *sw = priv;
246 int ret = 0;
247
248 if (mutex_lock_interruptible(&switch_lock))
249 return -ERESTARTSYS;
250
251 /*
252 * Since writing the NVM image might require some special steps,
253 * for example when CSS headers are written, we cache the image
254 * locally here and handle the special cases when the user asks
255 * us to authenticate the image.
256 */
257 if (!sw->nvm->buf) {
258 sw->nvm->buf = vmalloc(NVM_MAX_SIZE);
259 if (!sw->nvm->buf) {
260 ret = -ENOMEM;
261 goto unlock;
262 }
263 }
264
265 sw->nvm->buf_data_size = offset + bytes;
266 memcpy(sw->nvm->buf + offset, val, bytes);
267
268unlock:
269 mutex_unlock(&switch_lock);
270
271 return ret;
272}
273
274static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
275 size_t size, bool active)
276{
277 struct nvmem_config config;
278
279 memset(&config, 0, sizeof(config));
280
281 if (active) {
282 config.name = "nvm_active";
283 config.reg_read = tb_switch_nvm_read;
284 } else {
285 config.name = "nvm_non_active";
286 config.reg_write = tb_switch_nvm_write;
287 }
288
289 config.id = id;
290 config.stride = 4;
291 config.word_size = 4;
292 config.size = size;
293 config.dev = &sw->dev;
294 config.owner = THIS_MODULE;
295 config.root_only = true;
296 config.priv = sw;
297
298 return nvmem_register(&config);
299}
300
301static int tb_switch_nvm_add(struct tb_switch *sw)
302{
303 struct nvmem_device *nvm_dev;
304 struct tb_switch_nvm *nvm;
305 u32 val;
306 int ret;
307
308 if (!sw->dma_port)
309 return 0;
310
311 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
312 if (!nvm)
313 return -ENOMEM;
314
315 nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
316
317 /*
318 * If the switch is in safe-mode the only accessible portion of
319 * the NVM is the non-active one where userspace is expected to
320 * write new functional NVM.
321 */
322 if (!sw->safe_mode) {
323 u32 nvm_size, hdr_size;
324
325 ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val,
326 sizeof(val));
327 if (ret)
328 goto err_ida;
329
330 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
331 nvm_size = (SZ_1M << (val & 7)) / 8;
332 nvm_size = (nvm_size - hdr_size) / 2;
333
334 ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val,
335 sizeof(val));
336 if (ret)
337 goto err_ida;
338
339 nvm->major = val >> 16;
340 nvm->minor = val >> 8;
341
342 nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true);
343 if (IS_ERR(nvm_dev)) {
344 ret = PTR_ERR(nvm_dev);
345 goto err_ida;
346 }
347 nvm->active = nvm_dev;
348 }
349
350 nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false);
351 if (IS_ERR(nvm_dev)) {
352 ret = PTR_ERR(nvm_dev);
353 goto err_nvm_active;
354 }
355 nvm->non_active = nvm_dev;
356
357 mutex_lock(&switch_lock);
358 sw->nvm = nvm;
359 mutex_unlock(&switch_lock);
360
361 return 0;
362
363err_nvm_active:
364 if (nvm->active)
365 nvmem_unregister(nvm->active);
366err_ida:
367 ida_simple_remove(&nvm_ida, nvm->id);
368 kfree(nvm);
369
370 return ret;
371}
372
373static void tb_switch_nvm_remove(struct tb_switch *sw)
374{
375 struct tb_switch_nvm *nvm;
376
377 mutex_lock(&switch_lock);
378 nvm = sw->nvm;
379 sw->nvm = NULL;
380 mutex_unlock(&switch_lock);
381
382 if (!nvm)
383 return;
384
385 /* Remove authentication status in case the switch is unplugged */
386 if (!nvm->authenticating)
387 nvm_clear_auth_status(sw);
388
389 nvmem_unregister(nvm->non_active);
390 if (nvm->active)
391 nvmem_unregister(nvm->active);
392 ida_simple_remove(&nvm_ida, nvm->id);
393 vfree(nvm->buf);
394 kfree(nvm);
395}
396
15/* port utility functions */ 397/* port utility functions */
16 398
17static const char *tb_port_type(struct tb_regs_port_header *port) 399static const char *tb_port_type(struct tb_regs_port_header *port)
@@ -448,6 +830,83 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
448} 830}
449static DEVICE_ATTR_RW(key); 831static DEVICE_ATTR_RW(key);
450 832
833static ssize_t nvm_authenticate_show(struct device *dev,
834 struct device_attribute *attr, char *buf)
835{
836 struct tb_switch *sw = tb_to_switch(dev);
837 u32 status;
838
839 nvm_get_auth_status(sw, &status);
840 return sprintf(buf, "%#x\n", status);
841}
842
843static ssize_t nvm_authenticate_store(struct device *dev,
844 struct device_attribute *attr, const char *buf, size_t count)
845{
846 struct tb_switch *sw = tb_to_switch(dev);
847 bool val;
848 int ret;
849
850 if (mutex_lock_interruptible(&switch_lock))
851 return -ERESTARTSYS;
852
853 /* If NVMem devices are not yet added */
854 if (!sw->nvm) {
855 ret = -EAGAIN;
856 goto exit_unlock;
857 }
858
859 ret = kstrtobool(buf, &val);
860 if (ret)
861 goto exit_unlock;
862
863 /* Always clear the authentication status */
864 nvm_clear_auth_status(sw);
865
866 if (val) {
867 ret = nvm_validate_and_write(sw);
868 if (ret)
869 goto exit_unlock;
870
871 sw->nvm->authenticating = true;
872
873 if (!tb_route(sw))
874 ret = nvm_authenticate_host(sw);
875 else
876 ret = nvm_authenticate_device(sw);
877 }
878
879exit_unlock:
880 mutex_unlock(&switch_lock);
881
882 if (ret)
883 return ret;
884 return count;
885}
886static DEVICE_ATTR_RW(nvm_authenticate);
887
888static ssize_t nvm_version_show(struct device *dev,
889 struct device_attribute *attr, char *buf)
890{
891 struct tb_switch *sw = tb_to_switch(dev);
892 int ret;
893
894 if (mutex_lock_interruptible(&switch_lock))
895 return -ERESTARTSYS;
896
897 if (sw->safe_mode)
898 ret = -ENODATA;
899 else if (!sw->nvm)
900 ret = -EAGAIN;
901 else
902 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
903
904 mutex_unlock(&switch_lock);
905
906 return ret;
907}
908static DEVICE_ATTR_RO(nvm_version);
909
451static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 910static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
452 char *buf) 911 char *buf)
453{ 912{
@@ -480,6 +939,8 @@ static struct attribute *switch_attrs[] = {
480 &dev_attr_device.attr, 939 &dev_attr_device.attr,
481 &dev_attr_device_name.attr, 940 &dev_attr_device_name.attr,
482 &dev_attr_key.attr, 941 &dev_attr_key.attr,
942 &dev_attr_nvm_authenticate.attr,
943 &dev_attr_nvm_version.attr,
483 &dev_attr_vendor.attr, 944 &dev_attr_vendor.attr,
484 &dev_attr_vendor_name.attr, 945 &dev_attr_vendor_name.attr,
485 &dev_attr_unique_id.attr, 946 &dev_attr_unique_id.attr,
@@ -498,9 +959,14 @@ static umode_t switch_attr_is_visible(struct kobject *kobj,
498 sw->security_level == TB_SECURITY_SECURE) 959 sw->security_level == TB_SECURITY_SECURE)
499 return attr->mode; 960 return attr->mode;
500 return 0; 961 return 0;
962 } else if (attr == &dev_attr_nvm_authenticate.attr ||
963 attr == &dev_attr_nvm_version.attr) {
964 if (sw->dma_port)
965 return attr->mode;
966 return 0;
501 } 967 }
502 968
503 return attr->mode; 969 return sw->safe_mode ? 0 : attr->mode;
504} 970}
505 971
506static struct attribute_group switch_group = { 972static struct attribute_group switch_group = {
@@ -652,6 +1118,45 @@ err_free_sw_ports:
652} 1118}
653 1119
654/** 1120/**
1121 * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
1122 * @tb: Pointer to the owning domain
1123 * @parent: Parent device for this switch
1124 * @route: Route string for this switch
1125 *
1126 * This creates a switch in safe mode. This means the switch pretty much
1127 * lacks all capabilities except DMA configuration port before it is
1128 * flashed with a valid NVM firmware.
1129 *
1130 * The returned switch must be released by calling tb_switch_put().
1131 *
1132 * Return: Pointer to the allocated switch or %NULL in case of failure
1133 */
1134struct tb_switch *
1135tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
1136{
1137 struct tb_switch *sw;
1138
1139 sw = kzalloc(sizeof(*sw), GFP_KERNEL);
1140 if (!sw)
1141 return NULL;
1142
1143 sw->tb = tb;
1144 sw->config.depth = tb_route_length(route);
1145 sw->config.route_hi = upper_32_bits(route);
1146 sw->config.route_lo = lower_32_bits(route);
1147 sw->safe_mode = true;
1148
1149 device_initialize(&sw->dev);
1150 sw->dev.parent = parent;
1151 sw->dev.bus = &tb_bus_type;
1152 sw->dev.type = &tb_switch_type;
1153 sw->dev.groups = switch_groups;
1154 dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
1155
1156 return sw;
1157}
1158
1159/**
655 * tb_switch_configure() - Uploads configuration to the switch 1160 * tb_switch_configure() - Uploads configuration to the switch
656 * @sw: Switch to configure 1161 * @sw: Switch to configure
657 * 1162 *
@@ -717,8 +1222,11 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
717 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 1222 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
718} 1223}
719 1224
720static void tb_switch_add_dma_port(struct tb_switch *sw) 1225static int tb_switch_add_dma_port(struct tb_switch *sw)
721{ 1226{
1227 u32 status;
1228 int ret;
1229
722 switch (sw->generation) { 1230 switch (sw->generation) {
723 case 3: 1231 case 3:
724 break; 1232 break;
@@ -726,14 +1234,49 @@ static void tb_switch_add_dma_port(struct tb_switch *sw)
726 case 2: 1234 case 2:
727 /* Only root switch can be upgraded */ 1235 /* Only root switch can be upgraded */
728 if (tb_route(sw)) 1236 if (tb_route(sw))
729 return; 1237 return 0;
730 break; 1238 break;
731 1239
732 default: 1240 default:
733 return; 1241 /*
1242 * DMA port is the only thing available when the switch
1243 * is in safe mode.
1244 */
1245 if (!sw->safe_mode)
1246 return 0;
1247 break;
734 } 1248 }
735 1249
1250 if (sw->no_nvm_upgrade)
1251 return 0;
1252
736 sw->dma_port = dma_port_alloc(sw); 1253 sw->dma_port = dma_port_alloc(sw);
1254 if (!sw->dma_port)
1255 return 0;
1256
1257 /*
1258 * Check status of the previous flash authentication. If there
1259 * is one we need to power cycle the switch in any case to make
1260 * it functional again.
1261 */
1262 ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
1263 if (ret <= 0)
1264 return ret;
1265
1266 if (status) {
1267 tb_sw_info(sw, "switch flash authentication failed\n");
1268 tb_switch_set_uuid(sw);
1269 nvm_set_auth_status(sw, status);
1270 }
1271
1272 tb_sw_info(sw, "power cycling the switch now\n");
1273 dma_port_power_cycle(sw->dma_port);
1274
1275 /*
1276 * We return error here which causes the switch adding failure.
1277 * It should appear back after power cycle is complete.
1278 */
1279 return -ESHUTDOWN;
737} 1280}
738 1281
739/** 1282/**
@@ -759,29 +1302,41 @@ int tb_switch_add(struct tb_switch *sw)
759 * to the userspace. NVM can be accessed through DMA 1302 * to the userspace. NVM can be accessed through DMA
760 * configuration based mailbox. 1303 * configuration based mailbox.
761 */ 1304 */
762 tb_switch_add_dma_port(sw); 1305 ret = tb_switch_add_dma_port(sw);
763 1306 if (ret)
764 /* read drom */
765 ret = tb_drom_read(sw);
766 if (ret) {
767 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
768 return ret; 1307 return ret;
769 }
770 tb_sw_info(sw, "uid: %#llx\n", sw->uid);
771 1308
772 tb_switch_set_uuid(sw); 1309 if (!sw->safe_mode) {
1310 /* read drom */
1311 ret = tb_drom_read(sw);
1312 if (ret) {
1313 tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
1314 return ret;
1315 }
1316 tb_sw_info(sw, "uid: %#llx\n", sw->uid);
773 1317
774 for (i = 0; i <= sw->config.max_port_number; i++) { 1318 tb_switch_set_uuid(sw);
775 if (sw->ports[i].disabled) { 1319
776 tb_port_info(&sw->ports[i], "disabled by eeprom\n"); 1320 for (i = 0; i <= sw->config.max_port_number; i++) {
777 continue; 1321 if (sw->ports[i].disabled) {
1322 tb_port_info(&sw->ports[i], "disabled by eeprom\n");
1323 continue;
1324 }
1325 ret = tb_init_port(&sw->ports[i]);
1326 if (ret)
1327 return ret;
778 } 1328 }
779 ret = tb_init_port(&sw->ports[i]);
780 if (ret)
781 return ret;
782 } 1329 }
783 1330
784 return device_add(&sw->dev); 1331 ret = device_add(&sw->dev);
1332 if (ret)
1333 return ret;
1334
1335 ret = tb_switch_nvm_add(sw);
1336 if (ret)
1337 device_del(&sw->dev);
1338
1339 return ret;
785} 1340}
786 1341
787/** 1342/**
@@ -808,6 +1363,7 @@ void tb_switch_remove(struct tb_switch *sw)
808 if (!sw->is_unplugged) 1363 if (!sw->is_unplugged)
809 tb_plug_events_active(sw, false); 1364 tb_plug_events_active(sw, false);
810 1365
1366 tb_switch_nvm_remove(sw);
811 device_unregister(&sw->dev); 1367 device_unregister(&sw->dev);
812} 1368}
813 1369
@@ -976,3 +1532,8 @@ struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid)
976 1532
977 return NULL; 1533 return NULL;
978} 1534}
1535
1536void tb_switch_exit(void)
1537{
1538 ida_destroy(&nvm_ida);
1539}