diff options
-rw-r--r-- | Documentation/ABI/testing/sysfs-bus-thunderbolt | 26 | ||||
-rw-r--r-- | drivers/thunderbolt/Kconfig | 1 | ||||
-rw-r--r-- | drivers/thunderbolt/domain.c | 18 | ||||
-rw-r--r-- | drivers/thunderbolt/icm.c | 33 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.h | 1 | ||||
-rw-r--r-- | drivers/thunderbolt/switch.c | 603 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.c | 7 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.h | 40 |
8 files changed, 706 insertions, 23 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt index 05b7f9a6431f..2a98149943ea 100644 --- a/Documentation/ABI/testing/sysfs-bus-thunderbolt +++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt | |||
@@ -82,3 +82,29 @@ Description: This attribute contains unique_id string of this device. | |||
82 | This is either read from hardware registers (UUID on | 82 | This is either read from hardware registers (UUID on |
83 | newer hardware) or based on UID from the device DROM. | 83 | newer hardware) or based on UID from the device DROM. |
84 | Can be used to uniquely identify particular device. | 84 | Can be used to uniquely identify particular device. |
85 | |||
86 | What: /sys/bus/thunderbolt/devices/.../nvm_version | ||
87 | Date: Sep 2017 | ||
88 | KernelVersion: 4.13 | ||
89 | Contact: thunderbolt-software@lists.01.org | ||
90 | Description: If the device has upgradeable firmware the version | ||
91 | number is available here. Format: %x.%x, major.minor. | ||
92 | If the device is in safe mode reading the file returns | ||
93 | -ENODATA instead as the NVM version is not available. | ||
94 | |||
95 | What: /sys/bus/thunderbolt/devices/.../nvm_authenticate | ||
96 | Date: Sep 2017 | ||
97 | KernelVersion: 4.13 | ||
98 | Contact: thunderbolt-software@lists.01.org | ||
99 | Description: When new NVM image is written to the non-active NVM | ||
100 | area (through non_activeX NVMem device), the | ||
101 | authentication procedure is started by writing 1 to | ||
102 | this file. If everything goes well, the device is | ||
103 | restarted with the new NVM firmware. If the image | ||
104 | verification fails an error code is returned instead. | ||
105 | |||
106 | When read holds status of the last authentication | ||
107 | operation if an error occurred during the process. This | ||
108 | is directly the status value from the DMA configuration | ||
109 | based mailbox before the device is power cycled. Writing | ||
110 | 0 here clears the status. | ||
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index a9cc724985ad..f4869c38c7e4 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig | |||
@@ -6,6 +6,7 @@ menuconfig THUNDERBOLT | |||
6 | select CRC32 | 6 | select CRC32 |
7 | select CRYPTO | 7 | select CRYPTO |
8 | select CRYPTO_HASH | 8 | select CRYPTO_HASH |
9 | select NVMEM | ||
9 | help | 10 | help |
10 | Thunderbolt Controller driver. This driver is required if you | 11 | Thunderbolt Controller driver. This driver is required if you |
11 | want to hotplug Thunderbolt devices on Apple hardware or on PCs | 12 | want to hotplug Thunderbolt devices on Apple hardware or on PCs |
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index f71b63e90016..9f2dcd48974d 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c | |||
@@ -426,6 +426,23 @@ err_free_tfm: | |||
426 | return ret; | 426 | return ret; |
427 | } | 427 | } |
428 | 428 | ||
429 | /** | ||
430 | * tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths | ||
431 | * @tb: Domain whose PCIe paths to disconnect | ||
432 | * | ||
433 | * This needs to be called in preparation for NVM upgrade of the host | ||
434 | * controller. Makes sure all PCIe paths are disconnected. | ||
435 | * | ||
436 | * Return %0 on success and negative errno in case of error. | ||
437 | */ | ||
438 | int tb_domain_disconnect_pcie_paths(struct tb *tb) | ||
439 | { | ||
440 | if (!tb->cm_ops->disconnect_pcie_paths) | ||
441 | return -EPERM; | ||
442 | |||
443 | return tb->cm_ops->disconnect_pcie_paths(tb); | ||
444 | } | ||
445 | |||
429 | int tb_domain_init(void) | 446 | int tb_domain_init(void) |
430 | { | 447 | { |
431 | return bus_register(&tb_bus_type); | 448 | return bus_register(&tb_bus_type); |
@@ -435,4 +452,5 @@ void tb_domain_exit(void) | |||
435 | { | 452 | { |
436 | bus_unregister(&tb_bus_type); | 453 | bus_unregister(&tb_bus_type); |
437 | ida_destroy(&tb_domain_ida); | 454 | ida_destroy(&tb_domain_ida); |
455 | tb_switch_exit(); | ||
438 | } | 456 | } |
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 0ffa4ec249ac..8ee340290219 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c | |||
@@ -54,6 +54,7 @@ | |||
54 | * where ICM needs to be started manually | 54 | * where ICM needs to be started manually |
55 | * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides | 55 | * @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides |
56 | * (only set when @upstream_port is not %NULL) | 56 | * (only set when @upstream_port is not %NULL) |
57 | * @safe_mode: ICM is in safe mode | ||
57 | * @is_supported: Checks if we can support ICM on this controller | 58 | * @is_supported: Checks if we can support ICM on this controller |
58 | * @get_mode: Read and return the ICM firmware mode (optional) | 59 | * @get_mode: Read and return the ICM firmware mode (optional) |
59 | * @get_route: Find a route string for given switch | 60 | * @get_route: Find a route string for given switch |
@@ -65,6 +66,7 @@ struct icm { | |||
65 | struct delayed_work rescan_work; | 66 | struct delayed_work rescan_work; |
66 | struct pci_dev *upstream_port; | 67 | struct pci_dev *upstream_port; |
67 | int vnd_cap; | 68 | int vnd_cap; |
69 | bool safe_mode; | ||
68 | bool (*is_supported)(struct tb *tb); | 70 | bool (*is_supported)(struct tb *tb); |
69 | int (*get_mode)(struct tb *tb); | 71 | int (*get_mode)(struct tb *tb); |
70 | int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); | 72 | int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); |
@@ -852,6 +854,10 @@ static int icm_firmware_init(struct tb *tb) | |||
852 | ret = icm->get_mode(tb); | 854 | ret = icm->get_mode(tb); |
853 | 855 | ||
854 | switch (ret) { | 856 | switch (ret) { |
857 | case NHI_FW_SAFE_MODE: | ||
858 | icm->safe_mode = true; | ||
859 | break; | ||
860 | |||
855 | case NHI_FW_CM_MODE: | 861 | case NHI_FW_CM_MODE: |
856 | /* Ask ICM to accept all Thunderbolt devices */ | 862 | /* Ask ICM to accept all Thunderbolt devices */ |
857 | nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); | 863 | nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0); |
@@ -879,12 +885,20 @@ static int icm_firmware_init(struct tb *tb) | |||
879 | 885 | ||
880 | static int icm_driver_ready(struct tb *tb) | 886 | static int icm_driver_ready(struct tb *tb) |
881 | { | 887 | { |
888 | struct icm *icm = tb_priv(tb); | ||
882 | int ret; | 889 | int ret; |
883 | 890 | ||
884 | ret = icm_firmware_init(tb); | 891 | ret = icm_firmware_init(tb); |
885 | if (ret) | 892 | if (ret) |
886 | return ret; | 893 | return ret; |
887 | 894 | ||
895 | if (icm->safe_mode) { | ||
896 | tb_info(tb, "Thunderbolt host controller is in safe mode.\n"); | ||
897 | tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n"); | ||
898 | tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n"); | ||
899 | return 0; | ||
900 | } | ||
901 | |||
888 | return __icm_driver_ready(tb, &tb->security_level); | 902 | return __icm_driver_ready(tb, &tb->security_level); |
889 | } | 903 | } |
890 | 904 | ||
@@ -975,12 +989,23 @@ static void icm_complete(struct tb *tb) | |||
975 | 989 | ||
976 | static int icm_start(struct tb *tb) | 990 | static int icm_start(struct tb *tb) |
977 | { | 991 | { |
992 | struct icm *icm = tb_priv(tb); | ||
978 | int ret; | 993 | int ret; |
979 | 994 | ||
980 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); | 995 | if (icm->safe_mode) |
996 | tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0); | ||
997 | else | ||
998 | tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0); | ||
981 | if (!tb->root_switch) | 999 | if (!tb->root_switch) |
982 | return -ENODEV; | 1000 | return -ENODEV; |
983 | 1001 | ||
1002 | /* | ||
1003 | * NVM upgrade has not been tested on Apple systems and they | ||
1004 | * don't provide images publicly either. To be on the safe side | ||
1005 | * prevent root switch NVM upgrade on Macs for now. | ||
1006 | */ | ||
1007 | tb->root_switch->no_nvm_upgrade = is_apple(); | ||
1008 | |||
984 | ret = tb_switch_add(tb->root_switch); | 1009 | ret = tb_switch_add(tb->root_switch); |
985 | if (ret) | 1010 | if (ret) |
986 | tb_switch_put(tb->root_switch); | 1011 | tb_switch_put(tb->root_switch); |
@@ -998,6 +1023,11 @@ static void icm_stop(struct tb *tb) | |||
998 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); | 1023 | nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); |
999 | } | 1024 | } |
1000 | 1025 | ||
1026 | static int icm_disconnect_pcie_paths(struct tb *tb) | ||
1027 | { | ||
1028 | return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); | ||
1029 | } | ||
1030 | |||
1001 | /* Falcon Ridge and Alpine Ridge */ | 1031 | /* Falcon Ridge and Alpine Ridge */ |
1002 | static const struct tb_cm_ops icm_fr_ops = { | 1032 | static const struct tb_cm_ops icm_fr_ops = { |
1003 | .driver_ready = icm_driver_ready, | 1033 | .driver_ready = icm_driver_ready, |
@@ -1009,6 +1039,7 @@ static const struct tb_cm_ops icm_fr_ops = { | |||
1009 | .approve_switch = icm_fr_approve_switch, | 1039 | .approve_switch = icm_fr_approve_switch, |
1010 | .add_switch_key = icm_fr_add_switch_key, | 1040 | .add_switch_key = icm_fr_add_switch_key, |
1011 | .challenge_switch_key = icm_fr_challenge_switch_key, | 1041 | .challenge_switch_key = icm_fr_challenge_switch_key, |
1042 | .disconnect_pcie_paths = icm_disconnect_pcie_paths, | ||
1012 | }; | 1043 | }; |
1013 | 1044 | ||
1014 | struct tb *icm_probe(struct tb_nhi *nhi) | 1045 | struct tb *icm_probe(struct tb_nhi *nhi) |
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 953864ae0ab3..5b5bb2c436be 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h | |||
@@ -155,6 +155,7 @@ enum nhi_fw_mode { | |||
155 | 155 | ||
156 | enum nhi_mailbox_cmd { | 156 | enum nhi_mailbox_cmd { |
157 | NHI_MAILBOX_SAVE_DEVS = 0x05, | 157 | NHI_MAILBOX_SAVE_DEVS = 0x05, |
158 | NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06, | ||
158 | NHI_MAILBOX_DRV_UNLOADS = 0x07, | 159 | NHI_MAILBOX_DRV_UNLOADS = 0x07, |
159 | NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23, | 160 | NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23, |
160 | }; | 161 | }; |
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 1524edf42ee8..ab3e8f410444 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c | |||
@@ -5,13 +5,395 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/delay.h> | 7 | #include <linux/delay.h> |
8 | #include <linux/idr.h> | ||
9 | #include <linux/nvmem-provider.h> | ||
10 | #include <linux/sizes.h> | ||
8 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/vmalloc.h> | ||
9 | 13 | ||
10 | #include "tb.h" | 14 | #include "tb.h" |
11 | 15 | ||
12 | /* Switch authorization from userspace is serialized by this lock */ | 16 | /* Switch authorization from userspace is serialized by this lock */ |
13 | static DEFINE_MUTEX(switch_lock); | 17 | static DEFINE_MUTEX(switch_lock); |
14 | 18 | ||
19 | /* Switch NVM support */ | ||
20 | |||
21 | #define NVM_DEVID 0x05 | ||
22 | #define NVM_VERSION 0x08 | ||
23 | #define NVM_CSS 0x10 | ||
24 | #define NVM_FLASH_SIZE 0x45 | ||
25 | |||
26 | #define NVM_MIN_SIZE SZ_32K | ||
27 | #define NVM_MAX_SIZE SZ_512K | ||
28 | |||
29 | static DEFINE_IDA(nvm_ida); | ||
30 | |||
31 | struct nvm_auth_status { | ||
32 | struct list_head list; | ||
33 | uuid_be uuid; | ||
34 | u32 status; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * Hold NVM authentication failure status per switch This information | ||
39 | * needs to stay around even when the switch gets power cycled so we | ||
40 | * keep it separately. | ||
41 | */ | ||
42 | static LIST_HEAD(nvm_auth_status_cache); | ||
43 | static DEFINE_MUTEX(nvm_auth_status_lock); | ||
44 | |||
45 | static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) | ||
46 | { | ||
47 | struct nvm_auth_status *st; | ||
48 | |||
49 | list_for_each_entry(st, &nvm_auth_status_cache, list) { | ||
50 | if (!uuid_be_cmp(st->uuid, *sw->uuid)) | ||
51 | return st; | ||
52 | } | ||
53 | |||
54 | return NULL; | ||
55 | } | ||
56 | |||
57 | static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) | ||
58 | { | ||
59 | struct nvm_auth_status *st; | ||
60 | |||
61 | mutex_lock(&nvm_auth_status_lock); | ||
62 | st = __nvm_get_auth_status(sw); | ||
63 | mutex_unlock(&nvm_auth_status_lock); | ||
64 | |||
65 | *status = st ? st->status : 0; | ||
66 | } | ||
67 | |||
68 | static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) | ||
69 | { | ||
70 | struct nvm_auth_status *st; | ||
71 | |||
72 | if (WARN_ON(!sw->uuid)) | ||
73 | return; | ||
74 | |||
75 | mutex_lock(&nvm_auth_status_lock); | ||
76 | st = __nvm_get_auth_status(sw); | ||
77 | |||
78 | if (!st) { | ||
79 | st = kzalloc(sizeof(*st), GFP_KERNEL); | ||
80 | if (!st) | ||
81 | goto unlock; | ||
82 | |||
83 | memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); | ||
84 | INIT_LIST_HEAD(&st->list); | ||
85 | list_add_tail(&st->list, &nvm_auth_status_cache); | ||
86 | } | ||
87 | |||
88 | st->status = status; | ||
89 | unlock: | ||
90 | mutex_unlock(&nvm_auth_status_lock); | ||
91 | } | ||
92 | |||
93 | static void nvm_clear_auth_status(const struct tb_switch *sw) | ||
94 | { | ||
95 | struct nvm_auth_status *st; | ||
96 | |||
97 | mutex_lock(&nvm_auth_status_lock); | ||
98 | st = __nvm_get_auth_status(sw); | ||
99 | if (st) { | ||
100 | list_del(&st->list); | ||
101 | kfree(st); | ||
102 | } | ||
103 | mutex_unlock(&nvm_auth_status_lock); | ||
104 | } | ||
105 | |||
106 | static int nvm_validate_and_write(struct tb_switch *sw) | ||
107 | { | ||
108 | unsigned int image_size, hdr_size; | ||
109 | const u8 *buf = sw->nvm->buf; | ||
110 | u16 ds_size; | ||
111 | int ret; | ||
112 | |||
113 | if (!buf) | ||
114 | return -EINVAL; | ||
115 | |||
116 | image_size = sw->nvm->buf_data_size; | ||
117 | if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) | ||
118 | return -EINVAL; | ||
119 | |||
120 | /* | ||
121 | * FARB pointer must point inside the image and must at least | ||
122 | * contain parts of the digital section we will be reading here. | ||
123 | */ | ||
124 | hdr_size = (*(u32 *)buf) & 0xffffff; | ||
125 | if (hdr_size + NVM_DEVID + 2 >= image_size) | ||
126 | return -EINVAL; | ||
127 | |||
128 | /* Digital section start should be aligned to 4k page */ | ||
129 | if (!IS_ALIGNED(hdr_size, SZ_4K)) | ||
130 | return -EINVAL; | ||
131 | |||
132 | /* | ||
133 | * Read digital section size and check that it also fits inside | ||
134 | * the image. | ||
135 | */ | ||
136 | ds_size = *(u16 *)(buf + hdr_size); | ||
137 | if (ds_size >= image_size) | ||
138 | return -EINVAL; | ||
139 | |||
140 | if (!sw->safe_mode) { | ||
141 | u16 device_id; | ||
142 | |||
143 | /* | ||
144 | * Make sure the device ID in the image matches the one | ||
145 | * we read from the switch config space. | ||
146 | */ | ||
147 | device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); | ||
148 | if (device_id != sw->config.device_id) | ||
149 | return -EINVAL; | ||
150 | |||
151 | if (sw->generation < 3) { | ||
152 | /* Write CSS headers first */ | ||
153 | ret = dma_port_flash_write(sw->dma_port, | ||
154 | DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, | ||
155 | DMA_PORT_CSS_MAX_SIZE); | ||
156 | if (ret) | ||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | /* Skip headers in the image */ | ||
161 | buf += hdr_size; | ||
162 | image_size -= hdr_size; | ||
163 | } | ||
164 | |||
165 | return dma_port_flash_write(sw->dma_port, 0, buf, image_size); | ||
166 | } | ||
167 | |||
168 | static int nvm_authenticate_host(struct tb_switch *sw) | ||
169 | { | ||
170 | int ret; | ||
171 | |||
172 | /* | ||
173 | * Root switch NVM upgrade requires that we disconnect the | ||
174 | * existing PCIe paths first (in case it is not in safe mode | ||
175 | * already). | ||
176 | */ | ||
177 | if (!sw->safe_mode) { | ||
178 | ret = tb_domain_disconnect_pcie_paths(sw->tb); | ||
179 | if (ret) | ||
180 | return ret; | ||
181 | /* | ||
182 | * The host controller goes away pretty soon after this if | ||
183 | * everything goes well so getting timeout is expected. | ||
184 | */ | ||
185 | ret = dma_port_flash_update_auth(sw->dma_port); | ||
186 | return ret == -ETIMEDOUT ? 0 : ret; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * From safe mode we can get out by just power cycling the | ||
191 | * switch. | ||
192 | */ | ||
193 | dma_port_power_cycle(sw->dma_port); | ||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static int nvm_authenticate_device(struct tb_switch *sw) | ||
198 | { | ||
199 | int ret, retries = 10; | ||
200 | |||
201 | ret = dma_port_flash_update_auth(sw->dma_port); | ||
202 | if (ret && ret != -ETIMEDOUT) | ||
203 | return ret; | ||
204 | |||
205 | /* | ||
206 | * Poll here for the authentication status. It takes some time | ||
207 | * for the device to respond (we get timeout for a while). Once | ||
208 | * we get response the device needs to be power cycled in order | ||
209 | * to the new NVM to be taken into use. | ||
210 | */ | ||
211 | do { | ||
212 | u32 status; | ||
213 | |||
214 | ret = dma_port_flash_update_auth_status(sw->dma_port, &status); | ||
215 | if (ret < 0 && ret != -ETIMEDOUT) | ||
216 | return ret; | ||
217 | if (ret > 0) { | ||
218 | if (status) { | ||
219 | tb_sw_warn(sw, "failed to authenticate NVM\n"); | ||
220 | nvm_set_auth_status(sw, status); | ||
221 | } | ||
222 | |||
223 | tb_sw_info(sw, "power cycling the switch now\n"); | ||
224 | dma_port_power_cycle(sw->dma_port); | ||
225 | return 0; | ||
226 | } | ||
227 | |||
228 | msleep(500); | ||
229 | } while (--retries); | ||
230 | |||
231 | return -ETIMEDOUT; | ||
232 | } | ||
233 | |||
234 | static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, | ||
235 | size_t bytes) | ||
236 | { | ||
237 | struct tb_switch *sw = priv; | ||
238 | |||
239 | return dma_port_flash_read(sw->dma_port, offset, val, bytes); | ||
240 | } | ||
241 | |||
242 | static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, | ||
243 | size_t bytes) | ||
244 | { | ||
245 | struct tb_switch *sw = priv; | ||
246 | int ret = 0; | ||
247 | |||
248 | if (mutex_lock_interruptible(&switch_lock)) | ||
249 | return -ERESTARTSYS; | ||
250 | |||
251 | /* | ||
252 | * Since writing the NVM image might require some special steps, | ||
253 | * for example when CSS headers are written, we cache the image | ||
254 | * locally here and handle the special cases when the user asks | ||
255 | * us to authenticate the image. | ||
256 | */ | ||
257 | if (!sw->nvm->buf) { | ||
258 | sw->nvm->buf = vmalloc(NVM_MAX_SIZE); | ||
259 | if (!sw->nvm->buf) { | ||
260 | ret = -ENOMEM; | ||
261 | goto unlock; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | sw->nvm->buf_data_size = offset + bytes; | ||
266 | memcpy(sw->nvm->buf + offset, val, bytes); | ||
267 | |||
268 | unlock: | ||
269 | mutex_unlock(&switch_lock); | ||
270 | |||
271 | return ret; | ||
272 | } | ||
273 | |||
274 | static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, | ||
275 | size_t size, bool active) | ||
276 | { | ||
277 | struct nvmem_config config; | ||
278 | |||
279 | memset(&config, 0, sizeof(config)); | ||
280 | |||
281 | if (active) { | ||
282 | config.name = "nvm_active"; | ||
283 | config.reg_read = tb_switch_nvm_read; | ||
284 | } else { | ||
285 | config.name = "nvm_non_active"; | ||
286 | config.reg_write = tb_switch_nvm_write; | ||
287 | } | ||
288 | |||
289 | config.id = id; | ||
290 | config.stride = 4; | ||
291 | config.word_size = 4; | ||
292 | config.size = size; | ||
293 | config.dev = &sw->dev; | ||
294 | config.owner = THIS_MODULE; | ||
295 | config.root_only = true; | ||
296 | config.priv = sw; | ||
297 | |||
298 | return nvmem_register(&config); | ||
299 | } | ||
300 | |||
301 | static int tb_switch_nvm_add(struct tb_switch *sw) | ||
302 | { | ||
303 | struct nvmem_device *nvm_dev; | ||
304 | struct tb_switch_nvm *nvm; | ||
305 | u32 val; | ||
306 | int ret; | ||
307 | |||
308 | if (!sw->dma_port) | ||
309 | return 0; | ||
310 | |||
311 | nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); | ||
312 | if (!nvm) | ||
313 | return -ENOMEM; | ||
314 | |||
315 | nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); | ||
316 | |||
317 | /* | ||
318 | * If the switch is in safe-mode the only accessible portion of | ||
319 | * the NVM is the non-active one where userspace is expected to | ||
320 | * write new functional NVM. | ||
321 | */ | ||
322 | if (!sw->safe_mode) { | ||
323 | u32 nvm_size, hdr_size; | ||
324 | |||
325 | ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, | ||
326 | sizeof(val)); | ||
327 | if (ret) | ||
328 | goto err_ida; | ||
329 | |||
330 | hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; | ||
331 | nvm_size = (SZ_1M << (val & 7)) / 8; | ||
332 | nvm_size = (nvm_size - hdr_size) / 2; | ||
333 | |||
334 | ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, | ||
335 | sizeof(val)); | ||
336 | if (ret) | ||
337 | goto err_ida; | ||
338 | |||
339 | nvm->major = val >> 16; | ||
340 | nvm->minor = val >> 8; | ||
341 | |||
342 | nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); | ||
343 | if (IS_ERR(nvm_dev)) { | ||
344 | ret = PTR_ERR(nvm_dev); | ||
345 | goto err_ida; | ||
346 | } | ||
347 | nvm->active = nvm_dev; | ||
348 | } | ||
349 | |||
350 | nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); | ||
351 | if (IS_ERR(nvm_dev)) { | ||
352 | ret = PTR_ERR(nvm_dev); | ||
353 | goto err_nvm_active; | ||
354 | } | ||
355 | nvm->non_active = nvm_dev; | ||
356 | |||
357 | mutex_lock(&switch_lock); | ||
358 | sw->nvm = nvm; | ||
359 | mutex_unlock(&switch_lock); | ||
360 | |||
361 | return 0; | ||
362 | |||
363 | err_nvm_active: | ||
364 | if (nvm->active) | ||
365 | nvmem_unregister(nvm->active); | ||
366 | err_ida: | ||
367 | ida_simple_remove(&nvm_ida, nvm->id); | ||
368 | kfree(nvm); | ||
369 | |||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | static void tb_switch_nvm_remove(struct tb_switch *sw) | ||
374 | { | ||
375 | struct tb_switch_nvm *nvm; | ||
376 | |||
377 | mutex_lock(&switch_lock); | ||
378 | nvm = sw->nvm; | ||
379 | sw->nvm = NULL; | ||
380 | mutex_unlock(&switch_lock); | ||
381 | |||
382 | if (!nvm) | ||
383 | return; | ||
384 | |||
385 | /* Remove authentication status in case the switch is unplugged */ | ||
386 | if (!nvm->authenticating) | ||
387 | nvm_clear_auth_status(sw); | ||
388 | |||
389 | nvmem_unregister(nvm->non_active); | ||
390 | if (nvm->active) | ||
391 | nvmem_unregister(nvm->active); | ||
392 | ida_simple_remove(&nvm_ida, nvm->id); | ||
393 | vfree(nvm->buf); | ||
394 | kfree(nvm); | ||
395 | } | ||
396 | |||
15 | /* port utility functions */ | 397 | /* port utility functions */ |
16 | 398 | ||
17 | static const char *tb_port_type(struct tb_regs_port_header *port) | 399 | static const char *tb_port_type(struct tb_regs_port_header *port) |
@@ -448,6 +830,83 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr, | |||
448 | } | 830 | } |
449 | static DEVICE_ATTR_RW(key); | 831 | static DEVICE_ATTR_RW(key); |
450 | 832 | ||
833 | static ssize_t nvm_authenticate_show(struct device *dev, | ||
834 | struct device_attribute *attr, char *buf) | ||
835 | { | ||
836 | struct tb_switch *sw = tb_to_switch(dev); | ||
837 | u32 status; | ||
838 | |||
839 | nvm_get_auth_status(sw, &status); | ||
840 | return sprintf(buf, "%#x\n", status); | ||
841 | } | ||
842 | |||
843 | static ssize_t nvm_authenticate_store(struct device *dev, | ||
844 | struct device_attribute *attr, const char *buf, size_t count) | ||
845 | { | ||
846 | struct tb_switch *sw = tb_to_switch(dev); | ||
847 | bool val; | ||
848 | int ret; | ||
849 | |||
850 | if (mutex_lock_interruptible(&switch_lock)) | ||
851 | return -ERESTARTSYS; | ||
852 | |||
853 | /* If NVMem devices are not yet added */ | ||
854 | if (!sw->nvm) { | ||
855 | ret = -EAGAIN; | ||
856 | goto exit_unlock; | ||
857 | } | ||
858 | |||
859 | ret = kstrtobool(buf, &val); | ||
860 | if (ret) | ||
861 | goto exit_unlock; | ||
862 | |||
863 | /* Always clear the authentication status */ | ||
864 | nvm_clear_auth_status(sw); | ||
865 | |||
866 | if (val) { | ||
867 | ret = nvm_validate_and_write(sw); | ||
868 | if (ret) | ||
869 | goto exit_unlock; | ||
870 | |||
871 | sw->nvm->authenticating = true; | ||
872 | |||
873 | if (!tb_route(sw)) | ||
874 | ret = nvm_authenticate_host(sw); | ||
875 | else | ||
876 | ret = nvm_authenticate_device(sw); | ||
877 | } | ||
878 | |||
879 | exit_unlock: | ||
880 | mutex_unlock(&switch_lock); | ||
881 | |||
882 | if (ret) | ||
883 | return ret; | ||
884 | return count; | ||
885 | } | ||
886 | static DEVICE_ATTR_RW(nvm_authenticate); | ||
887 | |||
888 | static ssize_t nvm_version_show(struct device *dev, | ||
889 | struct device_attribute *attr, char *buf) | ||
890 | { | ||
891 | struct tb_switch *sw = tb_to_switch(dev); | ||
892 | int ret; | ||
893 | |||
894 | if (mutex_lock_interruptible(&switch_lock)) | ||
895 | return -ERESTARTSYS; | ||
896 | |||
897 | if (sw->safe_mode) | ||
898 | ret = -ENODATA; | ||
899 | else if (!sw->nvm) | ||
900 | ret = -EAGAIN; | ||
901 | else | ||
902 | ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); | ||
903 | |||
904 | mutex_unlock(&switch_lock); | ||
905 | |||
906 | return ret; | ||
907 | } | ||
908 | static DEVICE_ATTR_RO(nvm_version); | ||
909 | |||
451 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, | 910 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
452 | char *buf) | 911 | char *buf) |
453 | { | 912 | { |
@@ -480,6 +939,8 @@ static struct attribute *switch_attrs[] = { | |||
480 | &dev_attr_device.attr, | 939 | &dev_attr_device.attr, |
481 | &dev_attr_device_name.attr, | 940 | &dev_attr_device_name.attr, |
482 | &dev_attr_key.attr, | 941 | &dev_attr_key.attr, |
942 | &dev_attr_nvm_authenticate.attr, | ||
943 | &dev_attr_nvm_version.attr, | ||
483 | &dev_attr_vendor.attr, | 944 | &dev_attr_vendor.attr, |
484 | &dev_attr_vendor_name.attr, | 945 | &dev_attr_vendor_name.attr, |
485 | &dev_attr_unique_id.attr, | 946 | &dev_attr_unique_id.attr, |
@@ -498,9 +959,14 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, | |||
498 | sw->security_level == TB_SECURITY_SECURE) | 959 | sw->security_level == TB_SECURITY_SECURE) |
499 | return attr->mode; | 960 | return attr->mode; |
500 | return 0; | 961 | return 0; |
962 | } else if (attr == &dev_attr_nvm_authenticate.attr || | ||
963 | attr == &dev_attr_nvm_version.attr) { | ||
964 | if (sw->dma_port) | ||
965 | return attr->mode; | ||
966 | return 0; | ||
501 | } | 967 | } |
502 | 968 | ||
503 | return attr->mode; | 969 | return sw->safe_mode ? 0 : attr->mode; |
504 | } | 970 | } |
505 | 971 | ||
506 | static struct attribute_group switch_group = { | 972 | static struct attribute_group switch_group = { |
@@ -652,6 +1118,45 @@ err_free_sw_ports: | |||
652 | } | 1118 | } |
653 | 1119 | ||
654 | /** | 1120 | /** |
1121 | * tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode | ||
1122 | * @tb: Pointer to the owning domain | ||
1123 | * @parent: Parent device for this switch | ||
1124 | * @route: Route string for this switch | ||
1125 | * | ||
1126 | * This creates a switch in safe mode. This means the switch pretty much | ||
1127 | * lacks all capabilities except DMA configuration port before it is | ||
1128 | * flashed with a valid NVM firmware. | ||
1129 | * | ||
1130 | * The returned switch must be released by calling tb_switch_put(). | ||
1131 | * | ||
1132 | * Return: Pointer to the allocated switch or %NULL in case of failure | ||
1133 | */ | ||
1134 | struct tb_switch * | ||
1135 | tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) | ||
1136 | { | ||
1137 | struct tb_switch *sw; | ||
1138 | |||
1139 | sw = kzalloc(sizeof(*sw), GFP_KERNEL); | ||
1140 | if (!sw) | ||
1141 | return NULL; | ||
1142 | |||
1143 | sw->tb = tb; | ||
1144 | sw->config.depth = tb_route_length(route); | ||
1145 | sw->config.route_hi = upper_32_bits(route); | ||
1146 | sw->config.route_lo = lower_32_bits(route); | ||
1147 | sw->safe_mode = true; | ||
1148 | |||
1149 | device_initialize(&sw->dev); | ||
1150 | sw->dev.parent = parent; | ||
1151 | sw->dev.bus = &tb_bus_type; | ||
1152 | sw->dev.type = &tb_switch_type; | ||
1153 | sw->dev.groups = switch_groups; | ||
1154 | dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); | ||
1155 | |||
1156 | return sw; | ||
1157 | } | ||
1158 | |||
1159 | /** | ||
655 | * tb_switch_configure() - Uploads configuration to the switch | 1160 | * tb_switch_configure() - Uploads configuration to the switch |
656 | * @sw: Switch to configure | 1161 | * @sw: Switch to configure |
657 | * | 1162 | * |
@@ -717,8 +1222,11 @@ static void tb_switch_set_uuid(struct tb_switch *sw) | |||
717 | sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); | 1222 | sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); |
718 | } | 1223 | } |
719 | 1224 | ||
720 | static void tb_switch_add_dma_port(struct tb_switch *sw) | 1225 | static int tb_switch_add_dma_port(struct tb_switch *sw) |
721 | { | 1226 | { |
1227 | u32 status; | ||
1228 | int ret; | ||
1229 | |||
722 | switch (sw->generation) { | 1230 | switch (sw->generation) { |
723 | case 3: | 1231 | case 3: |
724 | break; | 1232 | break; |
@@ -726,14 +1234,49 @@ static void tb_switch_add_dma_port(struct tb_switch *sw) | |||
726 | case 2: | 1234 | case 2: |
727 | /* Only root switch can be upgraded */ | 1235 | /* Only root switch can be upgraded */ |
728 | if (tb_route(sw)) | 1236 | if (tb_route(sw)) |
729 | return; | 1237 | return 0; |
730 | break; | 1238 | break; |
731 | 1239 | ||
732 | default: | 1240 | default: |
733 | return; | 1241 | /* |
1242 | * DMA port is the only thing available when the switch | ||
1243 | * is in safe mode. | ||
1244 | */ | ||
1245 | if (!sw->safe_mode) | ||
1246 | return 0; | ||
1247 | break; | ||
734 | } | 1248 | } |
735 | 1249 | ||
1250 | if (sw->no_nvm_upgrade) | ||
1251 | return 0; | ||
1252 | |||
736 | sw->dma_port = dma_port_alloc(sw); | 1253 | sw->dma_port = dma_port_alloc(sw); |
1254 | if (!sw->dma_port) | ||
1255 | return 0; | ||
1256 | |||
1257 | /* | ||
1258 | * Check status of the previous flash authentication. If there | ||
1259 | * is one we need to power cycle the switch in any case to make | ||
1260 | * it functional again. | ||
1261 | */ | ||
1262 | ret = dma_port_flash_update_auth_status(sw->dma_port, &status); | ||
1263 | if (ret <= 0) | ||
1264 | return ret; | ||
1265 | |||
1266 | if (status) { | ||
1267 | tb_sw_info(sw, "switch flash authentication failed\n"); | ||
1268 | tb_switch_set_uuid(sw); | ||
1269 | nvm_set_auth_status(sw, status); | ||
1270 | } | ||
1271 | |||
1272 | tb_sw_info(sw, "power cycling the switch now\n"); | ||
1273 | dma_port_power_cycle(sw->dma_port); | ||
1274 | |||
1275 | /* | ||
1276 | * We return error here which causes the switch adding failure. | ||
1277 | * It should appear back after power cycle is complete. | ||
1278 | */ | ||
1279 | return -ESHUTDOWN; | ||
737 | } | 1280 | } |
738 | 1281 | ||
739 | /** | 1282 | /** |
@@ -759,29 +1302,41 @@ int tb_switch_add(struct tb_switch *sw) | |||
759 | * to the userspace. NVM can be accessed through DMA | 1302 | * to the userspace. NVM can be accessed through DMA |
760 | * configuration based mailbox. | 1303 | * configuration based mailbox. |
761 | */ | 1304 | */ |
762 | tb_switch_add_dma_port(sw); | 1305 | ret = tb_switch_add_dma_port(sw); |
763 | 1306 | if (ret) | |
764 | /* read drom */ | ||
765 | ret = tb_drom_read(sw); | ||
766 | if (ret) { | ||
767 | tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); | ||
768 | return ret; | 1307 | return ret; |
769 | } | ||
770 | tb_sw_info(sw, "uid: %#llx\n", sw->uid); | ||
771 | 1308 | ||
772 | tb_switch_set_uuid(sw); | 1309 | if (!sw->safe_mode) { |
1310 | /* read drom */ | ||
1311 | ret = tb_drom_read(sw); | ||
1312 | if (ret) { | ||
1313 | tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); | ||
1314 | return ret; | ||
1315 | } | ||
1316 | tb_sw_info(sw, "uid: %#llx\n", sw->uid); | ||
773 | 1317 | ||
774 | for (i = 0; i <= sw->config.max_port_number; i++) { | 1318 | tb_switch_set_uuid(sw); |
775 | if (sw->ports[i].disabled) { | 1319 | |
776 | tb_port_info(&sw->ports[i], "disabled by eeprom\n"); | 1320 | for (i = 0; i <= sw->config.max_port_number; i++) { |
777 | continue; | 1321 | if (sw->ports[i].disabled) { |
1322 | tb_port_info(&sw->ports[i], "disabled by eeprom\n"); | ||
1323 | continue; | ||
1324 | } | ||
1325 | ret = tb_init_port(&sw->ports[i]); | ||
1326 | if (ret) | ||
1327 | return ret; | ||
778 | } | 1328 | } |
779 | ret = tb_init_port(&sw->ports[i]); | ||
780 | if (ret) | ||
781 | return ret; | ||
782 | } | 1329 | } |
783 | 1330 | ||
784 | return device_add(&sw->dev); | 1331 | ret = device_add(&sw->dev); |
1332 | if (ret) | ||
1333 | return ret; | ||
1334 | |||
1335 | ret = tb_switch_nvm_add(sw); | ||
1336 | if (ret) | ||
1337 | device_del(&sw->dev); | ||
1338 | |||
1339 | return ret; | ||
785 | } | 1340 | } |
786 | 1341 | ||
787 | /** | 1342 | /** |
@@ -808,6 +1363,7 @@ void tb_switch_remove(struct tb_switch *sw) | |||
808 | if (!sw->is_unplugged) | 1363 | if (!sw->is_unplugged) |
809 | tb_plug_events_active(sw, false); | 1364 | tb_plug_events_active(sw, false); |
810 | 1365 | ||
1366 | tb_switch_nvm_remove(sw); | ||
811 | device_unregister(&sw->dev); | 1367 | device_unregister(&sw->dev); |
812 | } | 1368 | } |
813 | 1369 | ||
@@ -976,3 +1532,8 @@ struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_be *uuid) | |||
976 | 1532 | ||
977 | return NULL; | 1533 | return NULL; |
978 | } | 1534 | } |
1535 | |||
1536 | void tb_switch_exit(void) | ||
1537 | { | ||
1538 | ida_destroy(&nvm_ida); | ||
1539 | } | ||
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index ad2304bad592..1b02ca0b6129 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c | |||
@@ -369,6 +369,13 @@ static int tb_start(struct tb *tb) | |||
369 | if (!tb->root_switch) | 369 | if (!tb->root_switch) |
370 | return -ENOMEM; | 370 | return -ENOMEM; |
371 | 371 | ||
372 | /* | ||
373 | * ICM firmware upgrade needs running firmware and in native | ||
374 | * mode that is not available so disable firmware upgrade of the | ||
375 | * root switch. | ||
376 | */ | ||
377 | tb->root_switch->no_nvm_upgrade = true; | ||
378 | |||
372 | ret = tb_switch_configure(tb->root_switch); | 379 | ret = tb_switch_configure(tb->root_switch); |
373 | if (ret) { | 380 | if (ret) { |
374 | tb_switch_put(tb->root_switch); | 381 | tb_switch_put(tb->root_switch); |
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index a998b3a251d5..3d9f64676e58 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #ifndef TB_H_ | 7 | #ifndef TB_H_ |
8 | #define TB_H_ | 8 | #define TB_H_ |
9 | 9 | ||
10 | #include <linux/nvmem-provider.h> | ||
10 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
11 | #include <linux/uuid.h> | 12 | #include <linux/uuid.h> |
12 | 13 | ||
@@ -15,6 +16,30 @@ | |||
15 | #include "dma_port.h" | 16 | #include "dma_port.h" |
16 | 17 | ||
17 | /** | 18 | /** |
19 | * struct tb_switch_nvm - Structure holding switch NVM information | ||
20 | * @major: Major version number of the active NVM portion | ||
21 | * @minor: Minor version number of the active NVM portion | ||
22 | * @id: Identifier used with both NVM portions | ||
23 | * @active: Active portion NVMem device | ||
24 | * @non_active: Non-active portion NVMem device | ||
25 | * @buf: Buffer where the NVM image is stored before it is written to | ||
26 | * the actual NVM flash device | ||
27 | * @buf_data_size: Number of bytes actually consumed by the new NVM | ||
28 | * image | ||
29 | * @authenticating: The switch is authenticating the new NVM | ||
30 | */ | ||
31 | struct tb_switch_nvm { | ||
32 | u8 major; | ||
33 | u8 minor; | ||
34 | int id; | ||
35 | struct nvmem_device *active; | ||
36 | struct nvmem_device *non_active; | ||
37 | void *buf; | ||
38 | size_t buf_data_size; | ||
39 | bool authenticating; | ||
40 | }; | ||
41 | |||
42 | /** | ||
18 | * enum tb_security_level - Thunderbolt security level | 43 | * enum tb_security_level - Thunderbolt security level |
19 | * @TB_SECURITY_NONE: No security, legacy mode | 44 | * @TB_SECURITY_NONE: No security, legacy mode |
20 | * @TB_SECURITY_USER: User approval required at minimum | 45 | * @TB_SECURITY_USER: User approval required at minimum |
@@ -39,7 +64,8 @@ enum tb_security_level { | |||
39 | * @ports: Ports in this switch | 64 | * @ports: Ports in this switch |
40 | * @dma_port: If the switch has port supporting DMA configuration based | 65 | * @dma_port: If the switch has port supporting DMA configuration based |
41 | * mailbox this will hold the pointer to that (%NULL | 66 | * mailbox this will hold the pointer to that (%NULL |
42 | * otherwise). | 67 | * otherwise). If set it also means the switch has |
68 | * upgradeable NVM. | ||
43 | * @tb: Pointer to the domain the switch belongs to | 69 | * @tb: Pointer to the domain the switch belongs to |
44 | * @uid: Unique ID of the switch | 70 | * @uid: Unique ID of the switch |
45 | * @uuid: UUID of the switch (or %NULL if not supported) | 71 | * @uuid: UUID of the switch (or %NULL if not supported) |
@@ -51,6 +77,9 @@ enum tb_security_level { | |||
51 | * @cap_plug_events: Offset to the plug events capability (%0 if not found) | 77 | * @cap_plug_events: Offset to the plug events capability (%0 if not found) |
52 | * @is_unplugged: The switch is going away | 78 | * @is_unplugged: The switch is going away |
53 | * @drom: DROM of the switch (%NULL if not found) | 79 | * @drom: DROM of the switch (%NULL if not found) |
80 | * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) | ||
81 | * @no_nvm_upgrade: Prevent NVM upgrade of this switch | ||
82 | * @safe_mode: The switch is in safe-mode | ||
54 | * @authorized: Whether the switch is authorized by user or policy | 83 | * @authorized: Whether the switch is authorized by user or policy |
55 | * @work: Work used to automatically authorize a switch | 84 | * @work: Work used to automatically authorize a switch |
56 | * @security_level: Switch supported security level | 85 | * @security_level: Switch supported security level |
@@ -81,6 +110,9 @@ struct tb_switch { | |||
81 | int cap_plug_events; | 110 | int cap_plug_events; |
82 | bool is_unplugged; | 111 | bool is_unplugged; |
83 | u8 *drom; | 112 | u8 *drom; |
113 | struct tb_switch_nvm *nvm; | ||
114 | bool no_nvm_upgrade; | ||
115 | bool safe_mode; | ||
84 | unsigned int authorized; | 116 | unsigned int authorized; |
85 | struct work_struct work; | 117 | struct work_struct work; |
86 | enum tb_security_level security_level; | 118 | enum tb_security_level security_level; |
@@ -172,6 +204,7 @@ struct tb_path { | |||
172 | * @approve_switch: Approve switch | 204 | * @approve_switch: Approve switch |
173 | * @add_switch_key: Add key to switch | 205 | * @add_switch_key: Add key to switch |
174 | * @challenge_switch_key: Challenge switch using key | 206 | * @challenge_switch_key: Challenge switch using key |
207 | * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update | ||
175 | */ | 208 | */ |
176 | struct tb_cm_ops { | 209 | struct tb_cm_ops { |
177 | int (*driver_ready)(struct tb *tb); | 210 | int (*driver_ready)(struct tb *tb); |
@@ -187,6 +220,7 @@ struct tb_cm_ops { | |||
187 | int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); | 220 | int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); |
188 | int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, | 221 | int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, |
189 | const u8 *challenge, u8 *response); | 222 | const u8 *challenge, u8 *response); |
223 | int (*disconnect_pcie_paths)(struct tb *tb); | ||
190 | }; | 224 | }; |
191 | 225 | ||
192 | /** | 226 | /** |
@@ -340,6 +374,7 @@ extern struct device_type tb_switch_type; | |||
340 | 374 | ||
341 | int tb_domain_init(void); | 375 | int tb_domain_init(void); |
342 | void tb_domain_exit(void); | 376 | void tb_domain_exit(void); |
377 | void tb_switch_exit(void); | ||
343 | 378 | ||
344 | struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize); | 379 | struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize); |
345 | int tb_domain_add(struct tb *tb); | 380 | int tb_domain_add(struct tb *tb); |
@@ -351,6 +386,7 @@ void tb_domain_complete(struct tb *tb); | |||
351 | int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); | 386 | int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); |
352 | int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); | 387 | int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); |
353 | int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); | 388 | int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); |
389 | int tb_domain_disconnect_pcie_paths(struct tb *tb); | ||
354 | 390 | ||
355 | static inline void tb_domain_put(struct tb *tb) | 391 | static inline void tb_domain_put(struct tb *tb) |
356 | { | 392 | { |
@@ -359,6 +395,8 @@ static inline void tb_domain_put(struct tb *tb) | |||
359 | 395 | ||
360 | struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, | 396 | struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, |
361 | u64 route); | 397 | u64 route); |
398 | struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, | ||
399 | struct device *parent, u64 route); | ||
362 | int tb_switch_configure(struct tb_switch *sw); | 400 | int tb_switch_configure(struct tb_switch *sw); |
363 | int tb_switch_add(struct tb_switch *sw); | 401 | int tb_switch_add(struct tb_switch *sw); |
364 | void tb_switch_remove(struct tb_switch *sw); | 402 | void tb_switch_remove(struct tb_switch *sw); |