summaryrefslogtreecommitdiffstats
path: root/drivers/thunderbolt
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2017-06-06 08:25:14 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-06-09 05:42:43 -0400
commit3e13676862f90dbf5b00d57d5599e57788289897 (patch)
treee0dc4f8ab2798c10c404a1cf52f8fda62c5b9863 /drivers/thunderbolt
parent2c3c4197c9dd878e39e249e1da64bcffceb8a5c4 (diff)
thunderbolt: Add support for DMA configuration based mailbox
The DMA (NHI) port of a switch provides access to the NVM of the host controller (and devices starting from Intel Alpine Ridge). The NVM contains also more complete DROM for the root switch including vendor and device identification strings. This will look for the DMA port capability for each switch and if found populates sw->dma_port. We then teach tb_drom_read() to read the DROM information from NVM if available for the root switch. The DMA port capability also supports upgrading the NVM for both host controller and devices which will be added in subsequent patches. This code is based on the work done by Amir Levy and Michael Jamet. Signed-off-by: Michael Jamet <michael.jamet@intel.com> Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Andreas Noever <andreas.noever@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r--drivers/thunderbolt/Makefile2
-rw-r--r--drivers/thunderbolt/dma_port.c524
-rw-r--r--drivers/thunderbolt/dma_port.h34
-rw-r--r--drivers/thunderbolt/eeprom.c51
-rw-r--r--drivers/thunderbolt/switch.c30
-rw-r--r--drivers/thunderbolt/tb.h5
6 files changed, 644 insertions, 2 deletions
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index e276a9a62261..9828e862dd35 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,3 +1,3 @@
1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o 1obj-${CONFIG_THUNDERBOLT} := thunderbolt.o
2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o 2thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o
3thunderbolt-objs += domain.o 3thunderbolt-objs += domain.o dma_port.o
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
new file mode 100644
index 000000000000..af6dde347bee
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.c
@@ -0,0 +1,524 @@
1/*
2 * Thunderbolt DMA configuration based mailbox support
3 *
4 * Copyright (C) 2017, Intel Corporation
5 * Authors: Michael Jamet <michael.jamet@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/delay.h>
14#include <linux/slab.h>
15
16#include "dma_port.h"
17#include "tb_regs.h"
18
19#define DMA_PORT_CAP 0x3e
20
21#define MAIL_DATA 1
22#define MAIL_DATA_DWORDS 16
23
24#define MAIL_IN 17
25#define MAIL_IN_CMD_SHIFT 28
26#define MAIL_IN_CMD_MASK GENMASK(31, 28)
27#define MAIL_IN_CMD_FLASH_WRITE 0x0
28#define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1
29#define MAIL_IN_CMD_FLASH_READ 0x2
30#define MAIL_IN_CMD_POWER_CYCLE 0x4
31#define MAIL_IN_DWORDS_SHIFT 24
32#define MAIL_IN_DWORDS_MASK GENMASK(27, 24)
33#define MAIL_IN_ADDRESS_SHIFT 2
34#define MAIL_IN_ADDRESS_MASK GENMASK(23, 2)
35#define MAIL_IN_CSS BIT(1)
36#define MAIL_IN_OP_REQUEST BIT(0)
37
38#define MAIL_OUT 18
39#define MAIL_OUT_STATUS_RESPONSE BIT(29)
40#define MAIL_OUT_STATUS_CMD_SHIFT 4
41#define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4)
42#define MAIL_OUT_STATUS_MASK GENMASK(3, 0)
43#define MAIL_OUT_STATUS_COMPLETED 0
44#define MAIL_OUT_STATUS_ERR_AUTH 1
45#define MAIL_OUT_STATUS_ERR_ACCESS 2
46
47#define DMA_PORT_TIMEOUT 5000 /* ms */
48#define DMA_PORT_RETRIES 3
49
50/**
51 * struct tb_dma_port - DMA control port
52 * @sw: Switch the DMA port belongs to
53 * @port: Switch port number where DMA capability is found
54 * @base: Start offset of the mailbox registers
55 * @buf: Temporary buffer to store a single block
56 */
57struct tb_dma_port {
58 struct tb_switch *sw;
59 u8 port;
60 u32 base;
61 u8 *buf;
62};
63
64/*
65 * When the switch is in safe mode it supports very little functionality
66 * so we don't validate that much here.
67 */
68static bool dma_port_match(const struct tb_cfg_request *req,
69 const struct ctl_pkg *pkg)
70{
71 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
72
73 if (pkg->frame.eof == TB_CFG_PKG_ERROR)
74 return true;
75 if (pkg->frame.eof != req->response_type)
76 return false;
77 if (route != tb_cfg_get_route(req->request))
78 return false;
79 if (pkg->frame.size != req->response_size)
80 return false;
81
82 return true;
83}
84
85static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
86{
87 memcpy(req->response, pkg->buffer, req->response_size);
88 return true;
89}
90
91static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
92 u32 port, u32 offset, u32 length, int timeout_msec)
93{
94 struct cfg_read_pkg request = {
95 .header = tb_cfg_make_header(route),
96 .addr = {
97 .seq = 1,
98 .port = port,
99 .space = TB_CFG_PORT,
100 .offset = offset,
101 .length = length,
102 },
103 };
104 struct tb_cfg_request *req;
105 struct cfg_write_pkg reply;
106 struct tb_cfg_result res;
107
108 req = tb_cfg_request_alloc();
109 if (!req)
110 return -ENOMEM;
111
112 req->match = dma_port_match;
113 req->copy = dma_port_copy;
114 req->request = &request;
115 req->request_size = sizeof(request);
116 req->request_type = TB_CFG_PKG_READ;
117 req->response = &reply;
118 req->response_size = 12 + 4 * length;
119 req->response_type = TB_CFG_PKG_READ;
120
121 res = tb_cfg_request_sync(ctl, req, timeout_msec);
122
123 tb_cfg_request_put(req);
124
125 if (res.err)
126 return res.err;
127
128 memcpy(buffer, &reply.data, 4 * length);
129 return 0;
130}
131
132static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
133 u32 port, u32 offset, u32 length, int timeout_msec)
134{
135 struct cfg_write_pkg request = {
136 .header = tb_cfg_make_header(route),
137 .addr = {
138 .seq = 1,
139 .port = port,
140 .space = TB_CFG_PORT,
141 .offset = offset,
142 .length = length,
143 },
144 };
145 struct tb_cfg_request *req;
146 struct cfg_read_pkg reply;
147 struct tb_cfg_result res;
148
149 memcpy(&request.data, buffer, length * 4);
150
151 req = tb_cfg_request_alloc();
152 if (!req)
153 return -ENOMEM;
154
155 req->match = dma_port_match;
156 req->copy = dma_port_copy;
157 req->request = &request;
158 req->request_size = 12 + 4 * length;
159 req->request_type = TB_CFG_PKG_WRITE;
160 req->response = &reply;
161 req->response_size = sizeof(reply);
162 req->response_type = TB_CFG_PKG_WRITE;
163
164 res = tb_cfg_request_sync(ctl, req, timeout_msec);
165
166 tb_cfg_request_put(req);
167
168 return res.err;
169}
170
171static int dma_find_port(struct tb_switch *sw)
172{
173 int port, ret;
174 u32 type;
175
176 /*
177 * The DMA (NHI) port is either 3 or 5 depending on the
178 * controller. Try both starting from 5 which is more common.
179 */
180 port = 5;
181 ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
182 DMA_PORT_TIMEOUT);
183 if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
184 return port;
185
186 port = 3;
187 ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
188 DMA_PORT_TIMEOUT);
189 if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
190 return port;
191
192 return -ENODEV;
193}
194
195/**
196 * dma_port_alloc() - Finds DMA control port from a switch pointed by route
197 * @sw: Switch from where find the DMA port
198 *
199 * Function checks if the switch NHI port supports DMA configuration
200 * based mailbox capability and if it does, allocates and initializes
201 * DMA port structure. Returns %NULL if the capabity was not found.
202 *
203 * The DMA control port is functional also when the switch is in safe
204 * mode.
205 */
206struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
207{
208 struct tb_dma_port *dma;
209 int port;
210
211 port = dma_find_port(sw);
212 if (port < 0)
213 return NULL;
214
215 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
216 if (!dma)
217 return NULL;
218
219 dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
220 if (!dma->buf) {
221 kfree(dma);
222 return NULL;
223 }
224
225 dma->sw = sw;
226 dma->port = port;
227 dma->base = DMA_PORT_CAP;
228
229 return dma;
230}
231
232/**
233 * dma_port_free() - Release DMA control port structure
234 * @dma: DMA control port
235 */
236void dma_port_free(struct tb_dma_port *dma)
237{
238 if (dma) {
239 kfree(dma->buf);
240 kfree(dma);
241 }
242}
243
244static int dma_port_wait_for_completion(struct tb_dma_port *dma,
245 unsigned int timeout)
246{
247 unsigned long end = jiffies + msecs_to_jiffies(timeout);
248 struct tb_switch *sw = dma->sw;
249
250 do {
251 int ret;
252 u32 in;
253
254 ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
255 dma->base + MAIL_IN, 1, 50);
256 if (ret) {
257 if (ret != -ETIMEDOUT)
258 return ret;
259 } else if (!(in & MAIL_IN_OP_REQUEST)) {
260 return 0;
261 }
262
263 usleep_range(50, 100);
264 } while (time_before(jiffies, end));
265
266 return -ETIMEDOUT;
267}
268
269static int status_to_errno(u32 status)
270{
271 switch (status & MAIL_OUT_STATUS_MASK) {
272 case MAIL_OUT_STATUS_COMPLETED:
273 return 0;
274 case MAIL_OUT_STATUS_ERR_AUTH:
275 return -EINVAL;
276 case MAIL_OUT_STATUS_ERR_ACCESS:
277 return -EACCES;
278 }
279
280 return -EIO;
281}
282
283static int dma_port_request(struct tb_dma_port *dma, u32 in,
284 unsigned int timeout)
285{
286 struct tb_switch *sw = dma->sw;
287 u32 out;
288 int ret;
289
290 ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
291 dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
292 if (ret)
293 return ret;
294
295 ret = dma_port_wait_for_completion(dma, timeout);
296 if (ret)
297 return ret;
298
299 ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
300 dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
301 if (ret)
302 return ret;
303
304 return status_to_errno(out);
305}
306
307static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
308 void *buf, u32 size)
309{
310 struct tb_switch *sw = dma->sw;
311 u32 in, dwaddress, dwords;
312 int ret;
313
314 dwaddress = address / 4;
315 dwords = size / 4;
316
317 in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
318 if (dwords < MAIL_DATA_DWORDS)
319 in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
320 in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
321 in |= MAIL_IN_OP_REQUEST;
322
323 ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
324 if (ret)
325 return ret;
326
327 return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
328 dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
329}
330
331static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
332 const void *buf, u32 size)
333{
334 struct tb_switch *sw = dma->sw;
335 u32 in, dwaddress, dwords;
336 int ret;
337
338 dwords = size / 4;
339
340 /* Write the block to MAIL_DATA registers */
341 ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
342 dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
343
344 in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
345
346 /* CSS header write is always done to the same magic address */
347 if (address >= DMA_PORT_CSS_ADDRESS) {
348 dwaddress = DMA_PORT_CSS_ADDRESS;
349 in |= MAIL_IN_CSS;
350 } else {
351 dwaddress = address / 4;
352 }
353
354 in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
355 in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
356 in |= MAIL_IN_OP_REQUEST;
357
358 return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
359}
360
361/**
362 * dma_port_flash_read() - Read from active flash region
363 * @dma: DMA control port
364 * @address: Address relative to the start of active region
365 * @buf: Buffer where the data is read
366 * @size: Size of the buffer
367 */
368int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
369 void *buf, size_t size)
370{
371 unsigned int retries = DMA_PORT_RETRIES;
372 unsigned int offset;
373
374 offset = address & 3;
375 address = address & ~3;
376
377 do {
378 u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
379 int ret;
380
381 ret = dma_port_flash_read_block(dma, address, dma->buf,
382 ALIGN(nbytes, 4));
383 if (ret) {
384 if (ret == -ETIMEDOUT) {
385 if (retries--)
386 continue;
387 ret = -EIO;
388 }
389 return ret;
390 }
391
392 memcpy(buf, dma->buf + offset, nbytes);
393
394 size -= nbytes;
395 address += nbytes;
396 buf += nbytes;
397 } while (size > 0);
398
399 return 0;
400}
401
402/**
403 * dma_port_flash_write() - Write to non-active flash region
404 * @dma: DMA control port
405 * @address: Address relative to the start of non-active region
406 * @buf: Data to write
407 * @size: Size of the buffer
408 *
409 * Writes block of data to the non-active flash region of the switch. If
410 * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
411 * using CSS command.
412 */
413int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
414 const void *buf, size_t size)
415{
416 unsigned int retries = DMA_PORT_RETRIES;
417 unsigned int offset;
418
419 if (address >= DMA_PORT_CSS_ADDRESS) {
420 offset = 0;
421 if (size > DMA_PORT_CSS_MAX_SIZE)
422 return -E2BIG;
423 } else {
424 offset = address & 3;
425 address = address & ~3;
426 }
427
428 do {
429 u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
430 int ret;
431
432 memcpy(dma->buf + offset, buf, nbytes);
433
434 ret = dma_port_flash_write_block(dma, address, buf, nbytes);
435 if (ret) {
436 if (ret == -ETIMEDOUT) {
437 if (retries--)
438 continue;
439 ret = -EIO;
440 }
441 return ret;
442 }
443
444 size -= nbytes;
445 address += nbytes;
446 buf += nbytes;
447 } while (size > 0);
448
449 return 0;
450}
451
452/**
453 * dma_port_flash_update_auth() - Starts flash authenticate cycle
454 * @dma: DMA control port
455 *
456 * Starts the flash update authentication cycle. If the image in the
457 * non-active area was valid, the switch starts upgrade process where
458 * active and non-active area get swapped in the end. Caller should call
459 * dma_port_flash_update_auth_status() to get status of this command.
460 * This is because if the switch in question is root switch the
461 * thunderbolt host controller gets reset as well.
462 */
463int dma_port_flash_update_auth(struct tb_dma_port *dma)
464{
465 u32 in;
466
467 in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
468 in |= MAIL_IN_OP_REQUEST;
469
470 return dma_port_request(dma, in, 150);
471}
472
473/**
474 * dma_port_flash_update_auth_status() - Reads status of update auth command
475 * @dma: DMA control port
476 * @status: Status code of the operation
477 *
478 * The function checks if there is status available from the last update
479 * auth command. Returns %0 if there is no status and no further
480 * action is required. If there is status, %1 is returned instead and
481 * @status holds the failure code.
482 *
483 * Negative return means there was an error reading status from the
484 * switch.
485 */
486int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
487{
488 struct tb_switch *sw = dma->sw;
489 u32 out, cmd;
490 int ret;
491
492 ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
493 dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
494 if (ret)
495 return ret;
496
497 /* Check if the status relates to flash update auth */
498 cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
499 if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
500 if (status)
501 *status = out & MAIL_OUT_STATUS_MASK;
502
503 /* Reset is needed in any case */
504 return 1;
505 }
506
507 return 0;
508}
509
510/**
511 * dma_port_power_cycle() - Power cycles the switch
512 * @dma: DMA control port
513 *
514 * Triggers power cycle to the switch.
515 */
516int dma_port_power_cycle(struct tb_dma_port *dma)
517{
518 u32 in;
519
520 in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
521 in |= MAIL_IN_OP_REQUEST;
522
523 return dma_port_request(dma, in, 150);
524}
diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h
new file mode 100644
index 000000000000..c4a69e0fbff7
--- /dev/null
+++ b/drivers/thunderbolt/dma_port.h
@@ -0,0 +1,34 @@
1/*
2 * Thunderbolt DMA configuration based mailbox support
3 *
4 * Copyright (C) 2017, Intel Corporation
5 * Authors: Michael Jamet <michael.jamet@intel.com>
6 * Mika Westerberg <mika.westerberg@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef DMA_PORT_H_
14#define DMA_PORT_H_
15
16#include "tb.h"
17
18struct tb_switch;
19struct tb_dma_port;
20
21#define DMA_PORT_CSS_ADDRESS 0x3fffff
22#define DMA_PORT_CSS_MAX_SIZE SZ_128
23
24struct tb_dma_port *dma_port_alloc(struct tb_switch *sw);
25void dma_port_free(struct tb_dma_port *dma);
26int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
27 void *buf, size_t size);
28int dma_port_flash_update_auth(struct tb_dma_port *dma);
29int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status);
30int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
31 const void *buf, size_t size);
32int dma_port_power_cycle(struct tb_dma_port *dma);
33
34#endif
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index d40a5f07fc4c..996c6e29c8ad 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -429,6 +429,50 @@ err:
429 return -EINVAL; 429 return -EINVAL;
430} 430}
431 431
432static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
433{
434 u32 drom_offset;
435 int ret;
436
437 if (!sw->dma_port)
438 return -ENODEV;
439
440 ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH,
441 sw->cap_plug_events + 12, 1);
442 if (ret)
443 return ret;
444
445 if (!drom_offset)
446 return -ENODEV;
447
448 ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
449 sizeof(*size));
450 if (ret)
451 return ret;
452
453 /* Size includes CRC8 + UID + CRC32 */
454 *size += 1 + 8 + 4;
455 sw->drom = kzalloc(*size, GFP_KERNEL);
456 if (!sw->drom)
457 return -ENOMEM;
458
459 ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
460 if (ret)
461 goto err_free;
462
463 /*
464 * Read UID from the minimal DROM because the one in NVM is just
465 * a placeholder.
466 */
467 tb_drom_read_uid_only(sw, &sw->uid);
468 return 0;
469
470err_free:
471 kfree(sw->drom);
472 sw->drom = NULL;
473 return ret;
474}
475
432/** 476/**
433 * tb_drom_read - copy drom to sw->drom and parse it 477 * tb_drom_read - copy drom to sw->drom and parse it
434 */ 478 */
@@ -450,6 +494,10 @@ int tb_drom_read(struct tb_switch *sw)
450 if (tb_drom_copy_efi(sw, &size) == 0) 494 if (tb_drom_copy_efi(sw, &size) == 0)
451 goto parse; 495 goto parse;
452 496
497 /* Non-Apple hardware has the DROM as part of NVM */
498 if (tb_drom_copy_nvm(sw, &size) == 0)
499 goto parse;
500
453 /* 501 /*
454 * The root switch contains only a dummy drom (header only, 502 * The root switch contains only a dummy drom (header only,
455 * no entries). Hardcode the configuration here. 503 * no entries). Hardcode the configuration here.
@@ -510,7 +558,8 @@ parse:
510 header->uid_crc8, crc); 558 header->uid_crc8, crc);
511 goto err; 559 goto err;
512 } 560 }
513 sw->uid = header->uid; 561 if (!sw->uid)
562 sw->uid = header->uid;
514 sw->vendor = header->vendor_id; 563 sw->vendor = header->vendor_id;
515 sw->device = header->model_id; 564 sw->device = header->model_id;
516 565
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 6384061100b0..4b47e0999cda 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -377,6 +377,8 @@ static void tb_switch_release(struct device *dev)
377{ 377{
378 struct tb_switch *sw = tb_to_switch(dev); 378 struct tb_switch *sw = tb_to_switch(dev);
379 379
380 dma_port_free(sw->dma_port);
381
380 kfree(sw->uuid); 382 kfree(sw->uuid);
381 kfree(sw->device_name); 383 kfree(sw->device_name);
382 kfree(sw->vendor_name); 384 kfree(sw->vendor_name);
@@ -570,6 +572,25 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
570 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); 572 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
571} 573}
572 574
575static void tb_switch_add_dma_port(struct tb_switch *sw)
576{
577 switch (sw->generation) {
578 case 3:
579 break;
580
581 case 2:
582 /* Only root switch can be upgraded */
583 if (tb_route(sw))
584 return;
585 break;
586
587 default:
588 return;
589 }
590
591 sw->dma_port = dma_port_alloc(sw);
592}
593
573/** 594/**
574 * tb_switch_add() - Add a switch to the domain 595 * tb_switch_add() - Add a switch to the domain
575 * @sw: Switch to add 596 * @sw: Switch to add
@@ -586,6 +607,15 @@ int tb_switch_add(struct tb_switch *sw)
586{ 607{
587 int i, ret; 608 int i, ret;
588 609
610 /*
611 * Initialize DMA control port now before we read DROM. Recent
612 * host controllers have more complete DROM on NVM that includes
613 * vendor and model identification strings which we then expose
614 * to the userspace. NVM can be accessed through DMA
615 * configuration based mailbox.
616 */
617 tb_switch_add_dma_port(sw);
618
589 /* read drom */ 619 /* read drom */
590 ret = tb_drom_read(sw); 620 ret = tb_drom_read(sw);
591 if (ret) { 621 if (ret) {
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 39d24dff82c5..31521c531715 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -12,12 +12,16 @@
12 12
13#include "tb_regs.h" 13#include "tb_regs.h"
14#include "ctl.h" 14#include "ctl.h"
15#include "dma_port.h"
15 16
16/** 17/**
17 * struct tb_switch - a thunderbolt switch 18 * struct tb_switch - a thunderbolt switch
18 * @dev: Device for the switch 19 * @dev: Device for the switch
19 * @config: Switch configuration 20 * @config: Switch configuration
20 * @ports: Ports in this switch 21 * @ports: Ports in this switch
22 * @dma_port: If the switch has port supporting DMA configuration based
23 * mailbox this will hold the pointer to that (%NULL
24 * otherwise).
21 * @tb: Pointer to the domain the switch belongs to 25 * @tb: Pointer to the domain the switch belongs to
22 * @uid: Unique ID of the switch 26 * @uid: Unique ID of the switch
23 * @uuid: UUID of the switch (or %NULL if not supported) 27 * @uuid: UUID of the switch (or %NULL if not supported)
@@ -34,6 +38,7 @@ struct tb_switch {
34 struct device dev; 38 struct device dev;
35 struct tb_regs_switch_header config; 39 struct tb_regs_switch_header config;
36 struct tb_port *ports; 40 struct tb_port *ports;
41 struct tb_dma_port *dma_port;
37 struct tb *tb; 42 struct tb *tb;
38 u64 uid; 43 u64 uid;
39 uuid_be *uuid; 44 uuid_be *uuid;