diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 20:32:24 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-04 20:32:24 -0400 |
commit | 2521129a6d2fd8a81f99cf95055eddea3df914ff (patch) | |
tree | f8b7879979f656669ce31cbc247b97ae702291fb /drivers/thunderbolt | |
parent | 98a96f202203fecad65b44449077c695686ad4db (diff) | |
parent | 16eb2bfc65ef86d3ac6420d50ddc2c48f0023cee (diff) |
Merge tag 'char-misc-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char / misc driver patches from Greg KH:
"Here's the big driver misc / char pull request for 3.17-rc1.
Lots of things in here, the thunderbolt support for Apple laptops,
some other new drivers, testing fixes, and other good things. All
have been in linux-next for a long time"
* tag 'char-misc-3.17-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (119 commits)
misc: bh1780: Introduce the use of devm_kzalloc
Lattice ECP3 FPGA: Correct endianness
drivers/misc/ti-st: Load firmware from ti-connectivity directory.
dt-bindings: extcon: Add support for SM5502 MUIC device
extcon: sm5502: Change internal hardware switch according to cable type
extcon: sm5502: Detect cable state after completing platform booting
extcon: sm5502: Add support new SM5502 extcon device driver
extcon: arizona: Get MICVDD against extcon device
extcon: Remove unnecessary OOM messages
misc: vexpress: Fix sparse non static symbol warnings
mei: drop unused hw dependent fw status functions
misc: bh1770glc: Use managed functions
pcmcia: remove DEFINE_PCI_DEVICE_TABLE usage
misc: remove DEFINE_PCI_DEVICE_TABLE usage
ipack: Replace DEFINE_PCI_DEVICE_TABLE macro use
drivers/char/dsp56k.c: drop check for negativity of unsigned parameter
mei: fix return value on disconnect timeout
mei: don't schedule suspend in pm idle
mei: start disconnect request timer consistently
mei: reset client connection state on timeout
...
Diffstat (limited to 'drivers/thunderbolt')
-rw-r--r-- | drivers/thunderbolt/Kconfig | 13 | ||||
-rw-r--r-- | drivers/thunderbolt/Makefile | 3 | ||||
-rw-r--r-- | drivers/thunderbolt/cap.c | 116 | ||||
-rw-r--r-- | drivers/thunderbolt/ctl.c | 731 | ||||
-rw-r--r-- | drivers/thunderbolt/ctl.h | 75 | ||||
-rw-r--r-- | drivers/thunderbolt/eeprom.c | 449 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.c | 675 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi.h | 114 | ||||
-rw-r--r-- | drivers/thunderbolt/nhi_regs.h | 101 | ||||
-rw-r--r-- | drivers/thunderbolt/path.c | 215 | ||||
-rw-r--r-- | drivers/thunderbolt/switch.c | 507 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.c | 436 | ||||
-rw-r--r-- | drivers/thunderbolt/tb.h | 271 | ||||
-rw-r--r-- | drivers/thunderbolt/tb_regs.h | 213 | ||||
-rw-r--r-- | drivers/thunderbolt/tunnel_pci.c | 232 | ||||
-rw-r--r-- | drivers/thunderbolt/tunnel_pci.h | 30 |
16 files changed, 4181 insertions, 0 deletions
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig new file mode 100644 index 000000000000..c121acc15bfe --- /dev/null +++ b/drivers/thunderbolt/Kconfig | |||
@@ -0,0 +1,13 @@ | |||
1 | menuconfig THUNDERBOLT | ||
2 | tristate "Thunderbolt support for Apple devices" | ||
3 | depends on PCI | ||
4 | select CRC32 | ||
5 | help | ||
6 | Cactus Ridge Thunderbolt Controller driver | ||
7 | This driver is required if you want to hotplug Thunderbolt devices on | ||
8 | Apple hardware. | ||
9 | |||
10 | Device chaining is currently not supported. | ||
11 | |||
12 | To compile this driver a module, choose M here. The module will be | ||
13 | called thunderbolt. | ||
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile new file mode 100644 index 000000000000..5d1053cdfa54 --- /dev/null +++ b/drivers/thunderbolt/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | obj-${CONFIG_THUNDERBOLT} := thunderbolt.o | ||
2 | thunderbolt-objs := nhi.o ctl.o tb.o switch.o cap.o path.o tunnel_pci.o eeprom.o | ||
3 | |||
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c new file mode 100644 index 000000000000..a7b47e7cddbd --- /dev/null +++ b/drivers/thunderbolt/cap.c | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - capabilities lookup | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/slab.h> | ||
8 | #include <linux/errno.h> | ||
9 | |||
10 | #include "tb.h" | ||
11 | |||
12 | |||
13 | struct tb_cap_any { | ||
14 | union { | ||
15 | struct tb_cap_basic basic; | ||
16 | struct tb_cap_extended_short extended_short; | ||
17 | struct tb_cap_extended_long extended_long; | ||
18 | }; | ||
19 | } __packed; | ||
20 | |||
21 | static bool tb_cap_is_basic(struct tb_cap_any *cap) | ||
22 | { | ||
23 | /* basic.cap is u8. This checks only the lower 8 bit of cap. */ | ||
24 | return cap->basic.cap != 5; | ||
25 | } | ||
26 | |||
27 | static bool tb_cap_is_long(struct tb_cap_any *cap) | ||
28 | { | ||
29 | return !tb_cap_is_basic(cap) | ||
30 | && cap->extended_short.next == 0 | ||
31 | && cap->extended_short.length == 0; | ||
32 | } | ||
33 | |||
34 | static enum tb_cap tb_cap(struct tb_cap_any *cap) | ||
35 | { | ||
36 | if (tb_cap_is_basic(cap)) | ||
37 | return cap->basic.cap; | ||
38 | else | ||
39 | /* extended_short/long have cap at the same offset. */ | ||
40 | return cap->extended_short.cap; | ||
41 | } | ||
42 | |||
43 | static u32 tb_cap_next(struct tb_cap_any *cap, u32 offset) | ||
44 | { | ||
45 | int next; | ||
46 | if (offset == 1) { | ||
47 | /* | ||
48 | * The first pointer is part of the switch header and always | ||
49 | * a simple pointer. | ||
50 | */ | ||
51 | next = cap->basic.next; | ||
52 | } else { | ||
53 | /* | ||
54 | * Somehow Intel decided to use 3 different types of capability | ||
55 | * headers. It is not like anyone could have predicted that | ||
56 | * single byte offsets are not enough... | ||
57 | */ | ||
58 | if (tb_cap_is_basic(cap)) | ||
59 | next = cap->basic.next; | ||
60 | else if (!tb_cap_is_long(cap)) | ||
61 | next = cap->extended_short.next; | ||
62 | else | ||
63 | next = cap->extended_long.next; | ||
64 | } | ||
65 | /* | ||
66 | * "Hey, we could terminate some capability lists with a null offset | ||
67 | * and others with a pointer to the last element." - "Great idea!" | ||
68 | */ | ||
69 | if (next == offset) | ||
70 | return 0; | ||
71 | return next; | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * tb_find_cap() - find a capability | ||
76 | * | ||
77 | * Return: Returns a positive offset if the capability was found and 0 if not. | ||
78 | * Returns an error code on failure. | ||
79 | */ | ||
80 | int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap) | ||
81 | { | ||
82 | u32 offset = 1; | ||
83 | struct tb_cap_any header; | ||
84 | int res; | ||
85 | int retries = 10; | ||
86 | while (retries--) { | ||
87 | res = tb_port_read(port, &header, space, offset, 1); | ||
88 | if (res) { | ||
89 | /* Intel needs some help with linked lists. */ | ||
90 | if (space == TB_CFG_PORT && offset == 0xa | ||
91 | && port->config.type == TB_TYPE_DP_HDMI_OUT) { | ||
92 | offset = 0x39; | ||
93 | continue; | ||
94 | } | ||
95 | return res; | ||
96 | } | ||
97 | if (offset != 1) { | ||
98 | if (tb_cap(&header) == cap) | ||
99 | return offset; | ||
100 | if (tb_cap_is_long(&header)) { | ||
101 | /* tb_cap_extended_long is 2 dwords */ | ||
102 | res = tb_port_read(port, &header, space, | ||
103 | offset, 2); | ||
104 | if (res) | ||
105 | return res; | ||
106 | } | ||
107 | } | ||
108 | offset = tb_cap_next(&header, offset); | ||
109 | if (!offset) | ||
110 | return 0; | ||
111 | } | ||
112 | tb_port_WARN(port, | ||
113 | "run out of retries while looking for cap %#x in config space %d, last offset: %#x\n", | ||
114 | cap, space, offset); | ||
115 | return -EIO; | ||
116 | } | ||
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c new file mode 100644 index 000000000000..799634b382c6 --- /dev/null +++ b/drivers/thunderbolt/ctl.c | |||
@@ -0,0 +1,731 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - control channel and configuration commands | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/crc32.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/pci.h> | ||
10 | #include <linux/dmapool.h> | ||
11 | #include <linux/workqueue.h> | ||
12 | #include <linux/kfifo.h> | ||
13 | |||
14 | #include "ctl.h" | ||
15 | |||
16 | |||
17 | struct ctl_pkg { | ||
18 | struct tb_ctl *ctl; | ||
19 | void *buffer; | ||
20 | struct ring_frame frame; | ||
21 | }; | ||
22 | |||
23 | #define TB_CTL_RX_PKG_COUNT 10 | ||
24 | |||
25 | /** | ||
26 | * struct tb_cfg - thunderbolt control channel | ||
27 | */ | ||
28 | struct tb_ctl { | ||
29 | struct tb_nhi *nhi; | ||
30 | struct tb_ring *tx; | ||
31 | struct tb_ring *rx; | ||
32 | |||
33 | struct dma_pool *frame_pool; | ||
34 | struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT]; | ||
35 | DECLARE_KFIFO(response_fifo, struct ctl_pkg*, 16); | ||
36 | struct completion response_ready; | ||
37 | |||
38 | hotplug_cb callback; | ||
39 | void *callback_data; | ||
40 | }; | ||
41 | |||
42 | |||
43 | #define tb_ctl_WARN(ctl, format, arg...) \ | ||
44 | dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg) | ||
45 | |||
46 | #define tb_ctl_err(ctl, format, arg...) \ | ||
47 | dev_err(&(ctl)->nhi->pdev->dev, format, ## arg) | ||
48 | |||
49 | #define tb_ctl_warn(ctl, format, arg...) \ | ||
50 | dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg) | ||
51 | |||
52 | #define tb_ctl_info(ctl, format, arg...) \ | ||
53 | dev_info(&(ctl)->nhi->pdev->dev, format, ## arg) | ||
54 | |||
55 | |||
56 | /* configuration packets definitions */ | ||
57 | |||
58 | enum tb_cfg_pkg_type { | ||
59 | TB_CFG_PKG_READ = 1, | ||
60 | TB_CFG_PKG_WRITE = 2, | ||
61 | TB_CFG_PKG_ERROR = 3, | ||
62 | TB_CFG_PKG_NOTIFY_ACK = 4, | ||
63 | TB_CFG_PKG_EVENT = 5, | ||
64 | TB_CFG_PKG_XDOMAIN_REQ = 6, | ||
65 | TB_CFG_PKG_XDOMAIN_RESP = 7, | ||
66 | TB_CFG_PKG_OVERRIDE = 8, | ||
67 | TB_CFG_PKG_RESET = 9, | ||
68 | TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd, | ||
69 | }; | ||
70 | |||
71 | /* common header */ | ||
72 | struct tb_cfg_header { | ||
73 | u32 route_hi:22; | ||
74 | u32 unknown:10; /* highest order bit is set on replies */ | ||
75 | u32 route_lo; | ||
76 | } __packed; | ||
77 | |||
78 | /* additional header for read/write packets */ | ||
79 | struct tb_cfg_address { | ||
80 | u32 offset:13; /* in dwords */ | ||
81 | u32 length:6; /* in dwords */ | ||
82 | u32 port:6; | ||
83 | enum tb_cfg_space space:2; | ||
84 | u32 seq:2; /* sequence number */ | ||
85 | u32 zero:3; | ||
86 | } __packed; | ||
87 | |||
88 | /* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */ | ||
89 | struct cfg_read_pkg { | ||
90 | struct tb_cfg_header header; | ||
91 | struct tb_cfg_address addr; | ||
92 | } __packed; | ||
93 | |||
94 | /* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */ | ||
95 | struct cfg_write_pkg { | ||
96 | struct tb_cfg_header header; | ||
97 | struct tb_cfg_address addr; | ||
98 | u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */ | ||
99 | } __packed; | ||
100 | |||
101 | /* TB_CFG_PKG_ERROR */ | ||
102 | struct cfg_error_pkg { | ||
103 | struct tb_cfg_header header; | ||
104 | enum tb_cfg_error error:4; | ||
105 | u32 zero1:4; | ||
106 | u32 port:6; | ||
107 | u32 zero2:2; /* Both should be zero, still they are different fields. */ | ||
108 | u32 zero3:16; | ||
109 | } __packed; | ||
110 | |||
111 | /* TB_CFG_PKG_EVENT */ | ||
112 | struct cfg_event_pkg { | ||
113 | struct tb_cfg_header header; | ||
114 | u32 port:6; | ||
115 | u32 zero:25; | ||
116 | bool unplug:1; | ||
117 | } __packed; | ||
118 | |||
119 | /* TB_CFG_PKG_RESET */ | ||
120 | struct cfg_reset_pkg { | ||
121 | struct tb_cfg_header header; | ||
122 | } __packed; | ||
123 | |||
124 | /* TB_CFG_PKG_PREPARE_TO_SLEEP */ | ||
125 | struct cfg_pts_pkg { | ||
126 | struct tb_cfg_header header; | ||
127 | u32 data; | ||
128 | } __packed; | ||
129 | |||
130 | |||
131 | /* utility functions */ | ||
132 | |||
133 | static u64 get_route(struct tb_cfg_header header) | ||
134 | { | ||
135 | return (u64) header.route_hi << 32 | header.route_lo; | ||
136 | } | ||
137 | |||
138 | static struct tb_cfg_header make_header(u64 route) | ||
139 | { | ||
140 | struct tb_cfg_header header = { | ||
141 | .route_hi = route >> 32, | ||
142 | .route_lo = route, | ||
143 | }; | ||
144 | /* check for overflow, route_hi is not 32 bits! */ | ||
145 | WARN_ON(get_route(header) != route); | ||
146 | return header; | ||
147 | } | ||
148 | |||
149 | static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type, | ||
150 | u64 route) | ||
151 | { | ||
152 | struct tb_cfg_header *header = pkg->buffer; | ||
153 | |||
154 | /* check frame, TODO: frame flags */ | ||
155 | if (WARN(len != pkg->frame.size, | ||
156 | "wrong framesize (expected %#x, got %#x)\n", | ||
157 | len, pkg->frame.size)) | ||
158 | return -EIO; | ||
159 | if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n", | ||
160 | type, pkg->frame.eof)) | ||
161 | return -EIO; | ||
162 | if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n", | ||
163 | pkg->frame.sof)) | ||
164 | return -EIO; | ||
165 | |||
166 | /* check header */ | ||
167 | if (WARN(header->unknown != 1 << 9, | ||
168 | "header->unknown is %#x\n", header->unknown)) | ||
169 | return -EIO; | ||
170 | if (WARN(route != get_route(*header), | ||
171 | "wrong route (expected %llx, got %llx)", | ||
172 | route, get_route(*header))) | ||
173 | return -EIO; | ||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int check_config_address(struct tb_cfg_address addr, | ||
178 | enum tb_cfg_space space, u32 offset, | ||
179 | u32 length) | ||
180 | { | ||
181 | if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero)) | ||
182 | return -EIO; | ||
183 | if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)", | ||
184 | space, addr.space)) | ||
185 | return -EIO; | ||
186 | if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)", | ||
187 | offset, addr.offset)) | ||
188 | return -EIO; | ||
189 | if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)", | ||
190 | length, addr.length)) | ||
191 | return -EIO; | ||
192 | if (WARN(addr.seq, "addr.seq is %#x\n", addr.seq)) | ||
193 | return -EIO; | ||
194 | /* | ||
195 | * We cannot check addr->port as it is set to the upstream port of the | ||
196 | * sender. | ||
197 | */ | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static struct tb_cfg_result decode_error(struct ctl_pkg *response) | ||
202 | { | ||
203 | struct cfg_error_pkg *pkg = response->buffer; | ||
204 | struct tb_cfg_result res = { 0 }; | ||
205 | res.response_route = get_route(pkg->header); | ||
206 | res.response_port = 0; | ||
207 | res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR, | ||
208 | get_route(pkg->header)); | ||
209 | if (res.err) | ||
210 | return res; | ||
211 | |||
212 | WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1); | ||
213 | WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1); | ||
214 | WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1); | ||
215 | res.err = 1; | ||
216 | res.tb_error = pkg->error; | ||
217 | res.response_port = pkg->port; | ||
218 | return res; | ||
219 | |||
220 | } | ||
221 | |||
222 | static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len, | ||
223 | enum tb_cfg_pkg_type type, u64 route) | ||
224 | { | ||
225 | struct tb_cfg_header *header = pkg->buffer; | ||
226 | struct tb_cfg_result res = { 0 }; | ||
227 | |||
228 | if (pkg->frame.eof == TB_CFG_PKG_ERROR) | ||
229 | return decode_error(pkg); | ||
230 | |||
231 | res.response_port = 0; /* will be updated later for cfg_read/write */ | ||
232 | res.response_route = get_route(*header); | ||
233 | res.err = check_header(pkg, len, type, route); | ||
234 | return res; | ||
235 | } | ||
236 | |||
237 | static void tb_cfg_print_error(struct tb_ctl *ctl, | ||
238 | const struct tb_cfg_result *res) | ||
239 | { | ||
240 | WARN_ON(res->err != 1); | ||
241 | switch (res->tb_error) { | ||
242 | case TB_CFG_ERROR_PORT_NOT_CONNECTED: | ||
243 | /* Port is not connected. This can happen during surprise | ||
244 | * removal. Do not warn. */ | ||
245 | return; | ||
246 | case TB_CFG_ERROR_INVALID_CONFIG_SPACE: | ||
247 | /* | ||
248 | * Invalid cfg_space/offset/length combination in | ||
249 | * cfg_read/cfg_write. | ||
250 | */ | ||
251 | tb_ctl_WARN(ctl, | ||
252 | "CFG_ERROR(%llx:%x): Invalid config space of offset\n", | ||
253 | res->response_route, res->response_port); | ||
254 | return; | ||
255 | case TB_CFG_ERROR_NO_SUCH_PORT: | ||
256 | /* | ||
257 | * - The route contains a non-existent port. | ||
258 | * - The route contains a non-PHY port (e.g. PCIe). | ||
259 | * - The port in cfg_read/cfg_write does not exist. | ||
260 | */ | ||
261 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n", | ||
262 | res->response_route, res->response_port); | ||
263 | return; | ||
264 | case TB_CFG_ERROR_LOOP: | ||
265 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", | ||
266 | res->response_route, res->response_port); | ||
267 | return; | ||
268 | default: | ||
269 | /* 5,6,7,9 and 11 are also valid error codes */ | ||
270 | tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", | ||
271 | res->response_route, res->response_port); | ||
272 | return; | ||
273 | } | ||
274 | } | ||
275 | |||
276 | static void cpu_to_be32_array(__be32 *dst, u32 *src, size_t len) | ||
277 | { | ||
278 | int i; | ||
279 | for (i = 0; i < len; i++) | ||
280 | dst[i] = cpu_to_be32(src[i]); | ||
281 | } | ||
282 | |||
283 | static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len) | ||
284 | { | ||
285 | int i; | ||
286 | for (i = 0; i < len; i++) | ||
287 | dst[i] = be32_to_cpu(src[i]); | ||
288 | } | ||
289 | |||
290 | static __be32 tb_crc(void *data, size_t len) | ||
291 | { | ||
292 | return cpu_to_be32(~__crc32c_le(~0, data, len)); | ||
293 | } | ||
294 | |||
295 | static void tb_ctl_pkg_free(struct ctl_pkg *pkg) | ||
296 | { | ||
297 | if (pkg) { | ||
298 | dma_pool_free(pkg->ctl->frame_pool, | ||
299 | pkg->buffer, pkg->frame.buffer_phy); | ||
300 | kfree(pkg); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl) | ||
305 | { | ||
306 | struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL); | ||
307 | if (!pkg) | ||
308 | return NULL; | ||
309 | pkg->ctl = ctl; | ||
310 | pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL, | ||
311 | &pkg->frame.buffer_phy); | ||
312 | if (!pkg->buffer) { | ||
313 | kfree(pkg); | ||
314 | return NULL; | ||
315 | } | ||
316 | return pkg; | ||
317 | } | ||
318 | |||
319 | |||
320 | /* RX/TX handling */ | ||
321 | |||
322 | static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, | ||
323 | bool canceled) | ||
324 | { | ||
325 | struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); | ||
326 | tb_ctl_pkg_free(pkg); | ||
327 | } | ||
328 | |||
329 | /** | ||
330 | * tb_cfg_tx() - transmit a packet on the control channel | ||
331 | * | ||
332 | * len must be a multiple of four. | ||
333 | * | ||
334 | * Return: Returns 0 on success or an error code on failure. | ||
335 | */ | ||
336 | static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len, | ||
337 | enum tb_cfg_pkg_type type) | ||
338 | { | ||
339 | int res; | ||
340 | struct ctl_pkg *pkg; | ||
341 | if (len % 4 != 0) { /* required for le->be conversion */ | ||
342 | tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len); | ||
343 | return -EINVAL; | ||
344 | } | ||
345 | if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */ | ||
346 | tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n", | ||
347 | len, TB_FRAME_SIZE - 4); | ||
348 | return -EINVAL; | ||
349 | } | ||
350 | pkg = tb_ctl_pkg_alloc(ctl); | ||
351 | if (!pkg) | ||
352 | return -ENOMEM; | ||
353 | pkg->frame.callback = tb_ctl_tx_callback; | ||
354 | pkg->frame.size = len + 4; | ||
355 | pkg->frame.sof = type; | ||
356 | pkg->frame.eof = type; | ||
357 | cpu_to_be32_array(pkg->buffer, data, len / 4); | ||
358 | *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len); | ||
359 | |||
360 | res = ring_tx(ctl->tx, &pkg->frame); | ||
361 | if (res) /* ring is stopped */ | ||
362 | tb_ctl_pkg_free(pkg); | ||
363 | return res; | ||
364 | } | ||
365 | |||
366 | /** | ||
367 | * tb_ctl_handle_plug_event() - acknowledge a plug event, invoke ctl->callback | ||
368 | */ | ||
369 | static void tb_ctl_handle_plug_event(struct tb_ctl *ctl, | ||
370 | struct ctl_pkg *response) | ||
371 | { | ||
372 | struct cfg_event_pkg *pkg = response->buffer; | ||
373 | u64 route = get_route(pkg->header); | ||
374 | |||
375 | if (check_header(response, sizeof(*pkg), TB_CFG_PKG_EVENT, route)) { | ||
376 | tb_ctl_warn(ctl, "malformed TB_CFG_PKG_EVENT\n"); | ||
377 | return; | ||
378 | } | ||
379 | |||
380 | if (tb_cfg_error(ctl, route, pkg->port, TB_CFG_ERROR_ACK_PLUG_EVENT)) | ||
381 | tb_ctl_warn(ctl, "could not ack plug event on %llx:%x\n", | ||
382 | route, pkg->port); | ||
383 | WARN(pkg->zero, "pkg->zero is %#x\n", pkg->zero); | ||
384 | ctl->callback(ctl->callback_data, route, pkg->port, pkg->unplug); | ||
385 | } | ||
386 | |||
387 | static void tb_ctl_rx_submit(struct ctl_pkg *pkg) | ||
388 | { | ||
389 | ring_rx(pkg->ctl->rx, &pkg->frame); /* | ||
390 | * We ignore failures during stop. | ||
391 | * All rx packets are referenced | ||
392 | * from ctl->rx_packets, so we do | ||
393 | * not loose them. | ||
394 | */ | ||
395 | } | ||
396 | |||
397 | static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, | ||
398 | bool canceled) | ||
399 | { | ||
400 | struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame); | ||
401 | |||
402 | if (canceled) | ||
403 | return; /* | ||
404 | * ring is stopped, packet is referenced from | ||
405 | * ctl->rx_packets. | ||
406 | */ | ||
407 | |||
408 | if (frame->size < 4 || frame->size % 4 != 0) { | ||
409 | tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n", | ||
410 | frame->size); | ||
411 | goto rx; | ||
412 | } | ||
413 | |||
414 | frame->size -= 4; /* remove checksum */ | ||
415 | if (*(__be32 *) (pkg->buffer + frame->size) | ||
416 | != tb_crc(pkg->buffer, frame->size)) { | ||
417 | tb_ctl_err(pkg->ctl, | ||
418 | "RX: checksum mismatch, dropping packet\n"); | ||
419 | goto rx; | ||
420 | } | ||
421 | be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4); | ||
422 | |||
423 | if (frame->eof == TB_CFG_PKG_EVENT) { | ||
424 | tb_ctl_handle_plug_event(pkg->ctl, pkg); | ||
425 | goto rx; | ||
426 | } | ||
427 | if (!kfifo_put(&pkg->ctl->response_fifo, pkg)) { | ||
428 | tb_ctl_err(pkg->ctl, "RX: fifo is full\n"); | ||
429 | goto rx; | ||
430 | } | ||
431 | complete(&pkg->ctl->response_ready); | ||
432 | return; | ||
433 | rx: | ||
434 | tb_ctl_rx_submit(pkg); | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * tb_ctl_rx() - receive a packet from the control channel | ||
439 | */ | ||
440 | static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer, | ||
441 | size_t length, int timeout_msec, | ||
442 | u64 route, enum tb_cfg_pkg_type type) | ||
443 | { | ||
444 | struct tb_cfg_result res; | ||
445 | struct ctl_pkg *pkg; | ||
446 | |||
447 | if (!wait_for_completion_timeout(&ctl->response_ready, | ||
448 | msecs_to_jiffies(timeout_msec))) { | ||
449 | tb_ctl_WARN(ctl, "RX: timeout\n"); | ||
450 | return (struct tb_cfg_result) { .err = -ETIMEDOUT }; | ||
451 | } | ||
452 | if (!kfifo_get(&ctl->response_fifo, &pkg)) { | ||
453 | tb_ctl_WARN(ctl, "empty kfifo\n"); | ||
454 | return (struct tb_cfg_result) { .err = -EIO }; | ||
455 | } | ||
456 | |||
457 | res = parse_header(pkg, length, type, route); | ||
458 | if (!res.err) | ||
459 | memcpy(buffer, pkg->buffer, length); | ||
460 | tb_ctl_rx_submit(pkg); | ||
461 | return res; | ||
462 | } | ||
463 | |||
464 | |||
465 | /* public interface, alloc/start/stop/free */ | ||
466 | |||
467 | /** | ||
468 | * tb_ctl_alloc() - allocate a control channel | ||
469 | * | ||
470 | * cb will be invoked once for every hot plug event. | ||
471 | * | ||
472 | * Return: Returns a pointer on success or NULL on failure. | ||
473 | */ | ||
474 | struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data) | ||
475 | { | ||
476 | int i; | ||
477 | struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); | ||
478 | if (!ctl) | ||
479 | return NULL; | ||
480 | ctl->nhi = nhi; | ||
481 | ctl->callback = cb; | ||
482 | ctl->callback_data = cb_data; | ||
483 | |||
484 | init_completion(&ctl->response_ready); | ||
485 | INIT_KFIFO(ctl->response_fifo); | ||
486 | ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev, | ||
487 | TB_FRAME_SIZE, 4, 0); | ||
488 | if (!ctl->frame_pool) | ||
489 | goto err; | ||
490 | |||
491 | ctl->tx = ring_alloc_tx(nhi, 0, 10); | ||
492 | if (!ctl->tx) | ||
493 | goto err; | ||
494 | |||
495 | ctl->rx = ring_alloc_rx(nhi, 0, 10); | ||
496 | if (!ctl->rx) | ||
497 | goto err; | ||
498 | |||
499 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) { | ||
500 | ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); | ||
501 | if (!ctl->rx_packets[i]) | ||
502 | goto err; | ||
503 | ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback; | ||
504 | } | ||
505 | |||
506 | tb_ctl_info(ctl, "control channel created\n"); | ||
507 | return ctl; | ||
508 | err: | ||
509 | tb_ctl_free(ctl); | ||
510 | return NULL; | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * tb_ctl_free() - free a control channel | ||
515 | * | ||
516 | * Must be called after tb_ctl_stop. | ||
517 | * | ||
518 | * Must NOT be called from ctl->callback. | ||
519 | */ | ||
520 | void tb_ctl_free(struct tb_ctl *ctl) | ||
521 | { | ||
522 | int i; | ||
523 | if (ctl->rx) | ||
524 | ring_free(ctl->rx); | ||
525 | if (ctl->tx) | ||
526 | ring_free(ctl->tx); | ||
527 | |||
528 | /* free RX packets */ | ||
529 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) | ||
530 | tb_ctl_pkg_free(ctl->rx_packets[i]); | ||
531 | |||
532 | |||
533 | if (ctl->frame_pool) | ||
534 | dma_pool_destroy(ctl->frame_pool); | ||
535 | kfree(ctl); | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * tb_cfg_start() - start/resume the control channel | ||
540 | */ | ||
541 | void tb_ctl_start(struct tb_ctl *ctl) | ||
542 | { | ||
543 | int i; | ||
544 | tb_ctl_info(ctl, "control channel starting...\n"); | ||
545 | ring_start(ctl->tx); /* is used to ack hotplug packets, start first */ | ||
546 | ring_start(ctl->rx); | ||
547 | for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) | ||
548 | tb_ctl_rx_submit(ctl->rx_packets[i]); | ||
549 | } | ||
550 | |||
551 | /** | ||
552 | * control() - pause the control channel | ||
553 | * | ||
554 | * All invocations of ctl->callback will have finished after this method | ||
555 | * returns. | ||
556 | * | ||
557 | * Must NOT be called from ctl->callback. | ||
558 | */ | ||
559 | void tb_ctl_stop(struct tb_ctl *ctl) | ||
560 | { | ||
561 | ring_stop(ctl->rx); | ||
562 | ring_stop(ctl->tx); | ||
563 | |||
564 | if (!kfifo_is_empty(&ctl->response_fifo)) | ||
565 | tb_ctl_WARN(ctl, "dangling response in response_fifo\n"); | ||
566 | kfifo_reset(&ctl->response_fifo); | ||
567 | tb_ctl_info(ctl, "control channel stopped\n"); | ||
568 | } | ||
569 | |||
570 | /* public interface, commands */ | ||
571 | |||
572 | /** | ||
573 | * tb_cfg_error() - send error packet | ||
574 | * | ||
575 | * Return: Returns 0 on success or an error code on failure. | ||
576 | */ | ||
577 | int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port, | ||
578 | enum tb_cfg_error error) | ||
579 | { | ||
580 | struct cfg_error_pkg pkg = { | ||
581 | .header = make_header(route), | ||
582 | .port = port, | ||
583 | .error = error, | ||
584 | }; | ||
585 | tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port); | ||
586 | return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR); | ||
587 | } | ||
588 | |||
589 | /** | ||
590 | * tb_cfg_reset() - send a reset packet and wait for a response | ||
591 | * | ||
592 | * If the switch at route is incorrectly configured then we will not receive a | ||
593 | * reply (even though the switch will reset). The caller should check for | ||
594 | * -ETIMEDOUT and attempt to reconfigure the switch. | ||
595 | */ | ||
596 | struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, | ||
597 | int timeout_msec) | ||
598 | { | ||
599 | int err; | ||
600 | struct cfg_reset_pkg request = { .header = make_header(route) }; | ||
601 | struct tb_cfg_header reply; | ||
602 | |||
603 | err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_RESET); | ||
604 | if (err) | ||
605 | return (struct tb_cfg_result) { .err = err }; | ||
606 | |||
607 | return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route, | ||
608 | TB_CFG_PKG_RESET); | ||
609 | } | ||
610 | |||
611 | /** | ||
612 | * tb_cfg_read() - read from config space into buffer | ||
613 | * | ||
614 | * Offset and length are in dwords. | ||
615 | */ | ||
616 | struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, | ||
617 | u64 route, u32 port, enum tb_cfg_space space, | ||
618 | u32 offset, u32 length, int timeout_msec) | ||
619 | { | ||
620 | struct tb_cfg_result res = { 0 }; | ||
621 | struct cfg_read_pkg request = { | ||
622 | .header = make_header(route), | ||
623 | .addr = { | ||
624 | .port = port, | ||
625 | .space = space, | ||
626 | .offset = offset, | ||
627 | .length = length, | ||
628 | }, | ||
629 | }; | ||
630 | struct cfg_write_pkg reply; | ||
631 | |||
632 | res.err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_READ); | ||
633 | if (res.err) | ||
634 | return res; | ||
635 | |||
636 | res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route, | ||
637 | TB_CFG_PKG_READ); | ||
638 | if (res.err) | ||
639 | return res; | ||
640 | |||
641 | res.response_port = reply.addr.port; | ||
642 | res.err = check_config_address(reply.addr, space, offset, length); | ||
643 | if (!res.err) | ||
644 | memcpy(buffer, &reply.data, 4 * length); | ||
645 | return res; | ||
646 | } | ||
647 | |||
648 | /** | ||
649 | * tb_cfg_write() - write from buffer into config space | ||
650 | * | ||
651 | * Offset and length are in dwords. | ||
652 | */ | ||
653 | struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer, | ||
654 | u64 route, u32 port, enum tb_cfg_space space, | ||
655 | u32 offset, u32 length, int timeout_msec) | ||
656 | { | ||
657 | struct tb_cfg_result res = { 0 }; | ||
658 | struct cfg_write_pkg request = { | ||
659 | .header = make_header(route), | ||
660 | .addr = { | ||
661 | .port = port, | ||
662 | .space = space, | ||
663 | .offset = offset, | ||
664 | .length = length, | ||
665 | }, | ||
666 | }; | ||
667 | struct cfg_read_pkg reply; | ||
668 | |||
669 | memcpy(&request.data, buffer, length * 4); | ||
670 | |||
671 | res.err = tb_ctl_tx(ctl, &request, 12 + 4 * length, TB_CFG_PKG_WRITE); | ||
672 | if (res.err) | ||
673 | return res; | ||
674 | |||
675 | res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route, | ||
676 | TB_CFG_PKG_WRITE); | ||
677 | if (res.err) | ||
678 | return res; | ||
679 | |||
680 | res.response_port = reply.addr.port; | ||
681 | res.err = check_config_address(reply.addr, space, offset, length); | ||
682 | return res; | ||
683 | } | ||
684 | |||
685 | int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, | ||
686 | enum tb_cfg_space space, u32 offset, u32 length) | ||
687 | { | ||
688 | struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, | ||
689 | space, offset, length, TB_CFG_DEFAULT_TIMEOUT); | ||
690 | if (res.err == 1) { | ||
691 | tb_cfg_print_error(ctl, &res); | ||
692 | return -EIO; | ||
693 | } | ||
694 | WARN(res.err, "tb_cfg_read: %d\n", res.err); | ||
695 | return res.err; | ||
696 | } | ||
697 | |||
698 | int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, | ||
699 | enum tb_cfg_space space, u32 offset, u32 length) | ||
700 | { | ||
701 | struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, | ||
702 | space, offset, length, TB_CFG_DEFAULT_TIMEOUT); | ||
703 | if (res.err == 1) { | ||
704 | tb_cfg_print_error(ctl, &res); | ||
705 | return -EIO; | ||
706 | } | ||
707 | WARN(res.err, "tb_cfg_write: %d\n", res.err); | ||
708 | return res.err; | ||
709 | } | ||
710 | |||
711 | /** | ||
712 | * tb_cfg_get_upstream_port() - get upstream port number of switch at route | ||
713 | * | ||
714 | * Reads the first dword from the switches TB_CFG_SWITCH config area and | ||
715 | * returns the port number from which the reply originated. | ||
716 | * | ||
717 | * Return: Returns the upstream port number on success or an error code on | ||
718 | * failure. | ||
719 | */ | ||
720 | int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) | ||
721 | { | ||
722 | u32 dummy; | ||
723 | struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, | ||
724 | TB_CFG_SWITCH, 0, 1, | ||
725 | TB_CFG_DEFAULT_TIMEOUT); | ||
726 | if (res.err == 1) | ||
727 | return -EIO; | ||
728 | if (res.err) | ||
729 | return res.err; | ||
730 | return res.response_port; | ||
731 | } | ||
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h new file mode 100644 index 000000000000..ba87d6e731dd --- /dev/null +++ b/drivers/thunderbolt/ctl.h | |||
@@ -0,0 +1,75 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - control channel and configuration commands | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef _TB_CFG | ||
8 | #define _TB_CFG | ||
9 | |||
10 | #include "nhi.h" | ||
11 | |||
12 | /* control channel */ | ||
13 | struct tb_ctl; | ||
14 | |||
15 | typedef void (*hotplug_cb)(void *data, u64 route, u8 port, bool unplug); | ||
16 | |||
17 | struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data); | ||
18 | void tb_ctl_start(struct tb_ctl *ctl); | ||
19 | void tb_ctl_stop(struct tb_ctl *ctl); | ||
20 | void tb_ctl_free(struct tb_ctl *ctl); | ||
21 | |||
22 | /* configuration commands */ | ||
23 | |||
24 | #define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */ | ||
25 | |||
26 | enum tb_cfg_space { | ||
27 | TB_CFG_HOPS = 0, | ||
28 | TB_CFG_PORT = 1, | ||
29 | TB_CFG_SWITCH = 2, | ||
30 | TB_CFG_COUNTERS = 3, | ||
31 | }; | ||
32 | |||
33 | enum tb_cfg_error { | ||
34 | TB_CFG_ERROR_PORT_NOT_CONNECTED = 0, | ||
35 | TB_CFG_ERROR_INVALID_CONFIG_SPACE = 2, | ||
36 | TB_CFG_ERROR_NO_SUCH_PORT = 4, | ||
37 | TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */ | ||
38 | TB_CFG_ERROR_LOOP = 8, | ||
39 | }; | ||
40 | |||
41 | struct tb_cfg_result { | ||
42 | u64 response_route; | ||
43 | u32 response_port; /* | ||
44 | * If err = 1 then this is the port that send the | ||
45 | * error. | ||
46 | * If err = 0 and if this was a cfg_read/write then | ||
47 | * this is the the upstream port of the responding | ||
48 | * switch. | ||
49 | * Otherwise the field is set to zero. | ||
50 | */ | ||
51 | int err; /* negative errors, 0 for success, 1 for tb errors */ | ||
52 | enum tb_cfg_error tb_error; /* valid if err == 1 */ | ||
53 | }; | ||
54 | |||
55 | |||
56 | int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port, | ||
57 | enum tb_cfg_error error); | ||
58 | struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, | ||
59 | int timeout_msec); | ||
60 | struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, | ||
61 | u64 route, u32 port, | ||
62 | enum tb_cfg_space space, u32 offset, | ||
63 | u32 length, int timeout_msec); | ||
64 | struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer, | ||
65 | u64 route, u32 port, | ||
66 | enum tb_cfg_space space, u32 offset, | ||
67 | u32 length, int timeout_msec); | ||
68 | int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, | ||
69 | enum tb_cfg_space space, u32 offset, u32 length); | ||
70 | int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, | ||
71 | enum tb_cfg_space space, u32 offset, u32 length); | ||
72 | int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route); | ||
73 | |||
74 | |||
75 | #endif | ||
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c new file mode 100644 index 000000000000..0dde34e3a7c5 --- /dev/null +++ b/drivers/thunderbolt/eeprom.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - eeprom access | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/crc32.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include "tb.h" | ||
10 | |||
11 | /** | ||
12 | * tb_eeprom_ctl_write() - write control word | ||
13 | */ | ||
14 | static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) | ||
15 | { | ||
16 | return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1); | ||
17 | } | ||
18 | |||
19 | /** | ||
20 | * tb_eeprom_ctl_write() - read control word | ||
21 | */ | ||
22 | static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) | ||
23 | { | ||
24 | return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1); | ||
25 | } | ||
26 | |||
27 | enum tb_eeprom_transfer { | ||
28 | TB_EEPROM_IN, | ||
29 | TB_EEPROM_OUT, | ||
30 | }; | ||
31 | |||
32 | /** | ||
33 | * tb_eeprom_active - enable rom access | ||
34 | * | ||
35 | * WARNING: Always disable access after usage. Otherwise the controller will | ||
36 | * fail to reprobe. | ||
37 | */ | ||
38 | static int tb_eeprom_active(struct tb_switch *sw, bool enable) | ||
39 | { | ||
40 | struct tb_eeprom_ctl ctl; | ||
41 | int res = tb_eeprom_ctl_read(sw, &ctl); | ||
42 | if (res) | ||
43 | return res; | ||
44 | if (enable) { | ||
45 | ctl.access_high = 1; | ||
46 | res = tb_eeprom_ctl_write(sw, &ctl); | ||
47 | if (res) | ||
48 | return res; | ||
49 | ctl.access_low = 0; | ||
50 | return tb_eeprom_ctl_write(sw, &ctl); | ||
51 | } else { | ||
52 | ctl.access_low = 1; | ||
53 | res = tb_eeprom_ctl_write(sw, &ctl); | ||
54 | if (res) | ||
55 | return res; | ||
56 | ctl.access_high = 0; | ||
57 | return tb_eeprom_ctl_write(sw, &ctl); | ||
58 | } | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * tb_eeprom_transfer - transfer one bit | ||
63 | * | ||
64 | * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in. | ||
65 | * If TB_EEPROM_OUT is passed, then ctl->data_out will be written. | ||
66 | */ | ||
67 | static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl, | ||
68 | enum tb_eeprom_transfer direction) | ||
69 | { | ||
70 | int res; | ||
71 | if (direction == TB_EEPROM_OUT) { | ||
72 | res = tb_eeprom_ctl_write(sw, ctl); | ||
73 | if (res) | ||
74 | return res; | ||
75 | } | ||
76 | ctl->clock = 1; | ||
77 | res = tb_eeprom_ctl_write(sw, ctl); | ||
78 | if (res) | ||
79 | return res; | ||
80 | if (direction == TB_EEPROM_IN) { | ||
81 | res = tb_eeprom_ctl_read(sw, ctl); | ||
82 | if (res) | ||
83 | return res; | ||
84 | } | ||
85 | ctl->clock = 0; | ||
86 | return tb_eeprom_ctl_write(sw, ctl); | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * tb_eeprom_out - write one byte to the bus | ||
91 | */ | ||
92 | static int tb_eeprom_out(struct tb_switch *sw, u8 val) | ||
93 | { | ||
94 | struct tb_eeprom_ctl ctl; | ||
95 | int i; | ||
96 | int res = tb_eeprom_ctl_read(sw, &ctl); | ||
97 | if (res) | ||
98 | return res; | ||
99 | for (i = 0; i < 8; i++) { | ||
100 | ctl.data_out = val & 0x80; | ||
101 | res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT); | ||
102 | if (res) | ||
103 | return res; | ||
104 | val <<= 1; | ||
105 | } | ||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | /** | ||
110 | * tb_eeprom_in - read one byte from the bus | ||
111 | */ | ||
112 | static int tb_eeprom_in(struct tb_switch *sw, u8 *val) | ||
113 | { | ||
114 | struct tb_eeprom_ctl ctl; | ||
115 | int i; | ||
116 | int res = tb_eeprom_ctl_read(sw, &ctl); | ||
117 | if (res) | ||
118 | return res; | ||
119 | *val = 0; | ||
120 | for (i = 0; i < 8; i++) { | ||
121 | *val <<= 1; | ||
122 | res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN); | ||
123 | if (res) | ||
124 | return res; | ||
125 | *val |= ctl.data_in; | ||
126 | } | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * tb_eeprom_read_n - read count bytes from offset into val | ||
132 | */ | ||
133 | static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val, | ||
134 | size_t count) | ||
135 | { | ||
136 | int i, res; | ||
137 | res = tb_eeprom_active(sw, true); | ||
138 | if (res) | ||
139 | return res; | ||
140 | res = tb_eeprom_out(sw, 3); | ||
141 | if (res) | ||
142 | return res; | ||
143 | res = tb_eeprom_out(sw, offset >> 8); | ||
144 | if (res) | ||
145 | return res; | ||
146 | res = tb_eeprom_out(sw, offset); | ||
147 | if (res) | ||
148 | return res; | ||
149 | for (i = 0; i < count; i++) { | ||
150 | res = tb_eeprom_in(sw, val + i); | ||
151 | if (res) | ||
152 | return res; | ||
153 | } | ||
154 | return tb_eeprom_active(sw, false); | ||
155 | } | ||
156 | |||
157 | static u8 tb_crc8(u8 *data, int len) | ||
158 | { | ||
159 | int i, j; | ||
160 | u8 val = 0xff; | ||
161 | for (i = 0; i < len; i++) { | ||
162 | val ^= data[i]; | ||
163 | for (j = 0; j < 8; j++) | ||
164 | val = (val << 1) ^ ((val & 0x80) ? 7 : 0); | ||
165 | } | ||
166 | return val; | ||
167 | } | ||
168 | |||
169 | static u32 tb_crc32(void *data, size_t len) | ||
170 | { | ||
171 | return ~__crc32c_le(~0, data, len); | ||
172 | } | ||
173 | |||
174 | #define TB_DROM_DATA_START 13 | ||
175 | struct tb_drom_header { | ||
176 | /* BYTE 0 */ | ||
177 | u8 uid_crc8; /* checksum for uid */ | ||
178 | /* BYTES 1-8 */ | ||
179 | u64 uid; | ||
180 | /* BYTES 9-12 */ | ||
181 | u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */ | ||
182 | /* BYTE 13 */ | ||
183 | u8 device_rom_revision; /* should be <= 1 */ | ||
184 | u16 data_len:10; | ||
185 | u8 __unknown1:6; | ||
186 | /* BYTES 16-21 */ | ||
187 | u16 vendor_id; | ||
188 | u16 model_id; | ||
189 | u8 model_rev; | ||
190 | u8 eeprom_rev; | ||
191 | } __packed; | ||
192 | |||
193 | enum tb_drom_entry_type { | ||
194 | /* force unsigned to prevent "one-bit signed bitfield" warning */ | ||
195 | TB_DROM_ENTRY_GENERIC = 0U, | ||
196 | TB_DROM_ENTRY_PORT, | ||
197 | }; | ||
198 | |||
199 | struct tb_drom_entry_header { | ||
200 | u8 len; | ||
201 | u8 index:6; | ||
202 | bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */ | ||
203 | enum tb_drom_entry_type type:1; | ||
204 | } __packed; | ||
205 | |||
206 | struct tb_drom_entry_port { | ||
207 | /* BYTES 0-1 */ | ||
208 | struct tb_drom_entry_header header; | ||
209 | /* BYTE 2 */ | ||
210 | u8 dual_link_port_rid:4; | ||
211 | u8 link_nr:1; | ||
212 | u8 unknown1:2; | ||
213 | bool has_dual_link_port:1; | ||
214 | |||
215 | /* BYTE 3 */ | ||
216 | u8 dual_link_port_nr:6; | ||
217 | u8 unknown2:2; | ||
218 | |||
219 | /* BYTES 4 - 5 TODO decode */ | ||
220 | u8 micro2:4; | ||
221 | u8 micro1:4; | ||
222 | u8 micro3; | ||
223 | |||
224 | /* BYTES 5-6, TODO: verify (find hardware that has these set) */ | ||
225 | u8 peer_port_rid:4; | ||
226 | u8 unknown3:3; | ||
227 | bool has_peer_port:1; | ||
228 | u8 peer_port_nr:6; | ||
229 | u8 unknown4:2; | ||
230 | } __packed; | ||
231 | |||
232 | |||
233 | /** | ||
234 | * tb_eeprom_get_drom_offset - get drom offset within eeprom | ||
235 | */ | ||
236 | static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) | ||
237 | { | ||
238 | struct tb_cap_plug_events cap; | ||
239 | int res; | ||
240 | if (!sw->cap_plug_events) { | ||
241 | tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n"); | ||
242 | return -ENOSYS; | ||
243 | } | ||
244 | res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events, | ||
245 | sizeof(cap) / 4); | ||
246 | if (res) | ||
247 | return res; | ||
248 | |||
249 | if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) { | ||
250 | tb_sw_warn(sw, "no NVM\n"); | ||
251 | return -ENOSYS; | ||
252 | } | ||
253 | |||
254 | if (cap.drom_offset > 0xffff) { | ||
255 | tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n", | ||
256 | cap.drom_offset); | ||
257 | return -ENXIO; | ||
258 | } | ||
259 | *offset = cap.drom_offset; | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | /** | ||
264 | * tb_drom_read_uid_only - read uid directly from drom | ||
265 | * | ||
266 | * Does not use the cached copy in sw->drom. Used during resume to check switch | ||
267 | * identity. | ||
268 | */ | ||
269 | int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid) | ||
270 | { | ||
271 | u8 data[9]; | ||
272 | u16 drom_offset; | ||
273 | u8 crc; | ||
274 | int res = tb_eeprom_get_drom_offset(sw, &drom_offset); | ||
275 | if (res) | ||
276 | return res; | ||
277 | |||
278 | /* read uid */ | ||
279 | res = tb_eeprom_read_n(sw, drom_offset, data, 9); | ||
280 | if (res) | ||
281 | return res; | ||
282 | |||
283 | crc = tb_crc8(data + 1, 8); | ||
284 | if (crc != data[0]) { | ||
285 | tb_sw_warn(sw, "uid crc8 missmatch (expected: %#x, got: %#x)\n", | ||
286 | data[0], crc); | ||
287 | return -EIO; | ||
288 | } | ||
289 | |||
290 | *uid = *(u64 *)(data+1); | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | static void tb_drom_parse_port_entry(struct tb_port *port, | ||
295 | struct tb_drom_entry_port *entry) | ||
296 | { | ||
297 | port->link_nr = entry->link_nr; | ||
298 | if (entry->has_dual_link_port) | ||
299 | port->dual_link_port = | ||
300 | &port->sw->ports[entry->dual_link_port_nr]; | ||
301 | } | ||
302 | |||
303 | static int tb_drom_parse_entry(struct tb_switch *sw, | ||
304 | struct tb_drom_entry_header *header) | ||
305 | { | ||
306 | struct tb_port *port; | ||
307 | int res; | ||
308 | enum tb_port_type type; | ||
309 | |||
310 | if (header->type != TB_DROM_ENTRY_PORT) | ||
311 | return 0; | ||
312 | |||
313 | port = &sw->ports[header->index]; | ||
314 | port->disabled = header->port_disabled; | ||
315 | if (port->disabled) | ||
316 | return 0; | ||
317 | |||
318 | res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1); | ||
319 | if (res) | ||
320 | return res; | ||
321 | type &= 0xffffff; | ||
322 | |||
323 | if (type == TB_TYPE_PORT) { | ||
324 | struct tb_drom_entry_port *entry = (void *) header; | ||
325 | if (header->len != sizeof(*entry)) { | ||
326 | tb_sw_warn(sw, | ||
327 | "port entry has size %#x (expected %#zx)\n", | ||
328 | header->len, sizeof(struct tb_drom_entry_port)); | ||
329 | return -EIO; | ||
330 | } | ||
331 | tb_drom_parse_port_entry(port, entry); | ||
332 | } | ||
333 | return 0; | ||
334 | } | ||
335 | |||
336 | /** | ||
337 | * tb_drom_parse_entries - parse the linked list of drom entries | ||
338 | * | ||
339 | * Drom must have been copied to sw->drom. | ||
340 | */ | ||
341 | static int tb_drom_parse_entries(struct tb_switch *sw) | ||
342 | { | ||
343 | struct tb_drom_header *header = (void *) sw->drom; | ||
344 | u16 pos = sizeof(*header); | ||
345 | u16 drom_size = header->data_len + TB_DROM_DATA_START; | ||
346 | |||
347 | while (pos < drom_size) { | ||
348 | struct tb_drom_entry_header *entry = (void *) (sw->drom + pos); | ||
349 | if (pos + 1 == drom_size || pos + entry->len > drom_size | ||
350 | || !entry->len) { | ||
351 | tb_sw_warn(sw, "drom buffer overrun, aborting\n"); | ||
352 | return -EIO; | ||
353 | } | ||
354 | |||
355 | tb_drom_parse_entry(sw, entry); | ||
356 | |||
357 | pos += entry->len; | ||
358 | } | ||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * tb_drom_read - copy drom to sw->drom and parse it | ||
364 | */ | ||
365 | int tb_drom_read(struct tb_switch *sw) | ||
366 | { | ||
367 | u16 drom_offset; | ||
368 | u16 size; | ||
369 | u32 crc; | ||
370 | struct tb_drom_header *header; | ||
371 | int res; | ||
372 | if (sw->drom) | ||
373 | return 0; | ||
374 | |||
375 | if (tb_route(sw) == 0) { | ||
376 | /* | ||
377 | * The root switch contains only a dummy drom (header only, | ||
378 | * no entries). Hardcode the configuration here. | ||
379 | */ | ||
380 | tb_drom_read_uid_only(sw, &sw->uid); | ||
381 | |||
382 | sw->ports[1].link_nr = 0; | ||
383 | sw->ports[2].link_nr = 1; | ||
384 | sw->ports[1].dual_link_port = &sw->ports[2]; | ||
385 | sw->ports[2].dual_link_port = &sw->ports[1]; | ||
386 | |||
387 | sw->ports[3].link_nr = 0; | ||
388 | sw->ports[4].link_nr = 1; | ||
389 | sw->ports[3].dual_link_port = &sw->ports[4]; | ||
390 | sw->ports[4].dual_link_port = &sw->ports[3]; | ||
391 | return 0; | ||
392 | } | ||
393 | |||
394 | res = tb_eeprom_get_drom_offset(sw, &drom_offset); | ||
395 | if (res) | ||
396 | return res; | ||
397 | |||
398 | res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2); | ||
399 | if (res) | ||
400 | return res; | ||
401 | size &= 0x3ff; | ||
402 | size += TB_DROM_DATA_START; | ||
403 | tb_sw_info(sw, "reading drom (length: %#x)\n", size); | ||
404 | if (size < sizeof(*header)) { | ||
405 | tb_sw_warn(sw, "drom too small, aborting\n"); | ||
406 | return -EIO; | ||
407 | } | ||
408 | |||
409 | sw->drom = kzalloc(size, GFP_KERNEL); | ||
410 | if (!sw->drom) | ||
411 | return -ENOMEM; | ||
412 | res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size); | ||
413 | if (res) | ||
414 | goto err; | ||
415 | |||
416 | header = (void *) sw->drom; | ||
417 | |||
418 | if (header->data_len + TB_DROM_DATA_START != size) { | ||
419 | tb_sw_warn(sw, "drom size mismatch, aborting\n"); | ||
420 | goto err; | ||
421 | } | ||
422 | |||
423 | crc = tb_crc8((u8 *) &header->uid, 8); | ||
424 | if (crc != header->uid_crc8) { | ||
425 | tb_sw_warn(sw, | ||
426 | "drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n", | ||
427 | header->uid_crc8, crc); | ||
428 | goto err; | ||
429 | } | ||
430 | sw->uid = header->uid; | ||
431 | |||
432 | crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); | ||
433 | if (crc != header->data_crc32) { | ||
434 | tb_sw_warn(sw, | ||
435 | "drom data crc32 mismatch (expected: %#x, got: %#x), aborting\n", | ||
436 | header->data_crc32, crc); | ||
437 | goto err; | ||
438 | } | ||
439 | |||
440 | if (header->device_rom_revision > 1) | ||
441 | tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n", | ||
442 | header->device_rom_revision); | ||
443 | |||
444 | return tb_drom_parse_entries(sw); | ||
445 | err: | ||
446 | kfree(sw->drom); | ||
447 | return -EIO; | ||
448 | |||
449 | } | ||
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c new file mode 100644 index 000000000000..c68fe1222c16 --- /dev/null +++ b/drivers/thunderbolt/nhi.c | |||
@@ -0,0 +1,675 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - NHI driver | ||
3 | * | ||
4 | * The NHI (native host interface) is the pci device that allows us to send and | ||
5 | * receive frames from the thunderbolt bus. | ||
6 | * | ||
7 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/pm_runtime.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/pci.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/dmi.h> | ||
17 | |||
18 | #include "nhi.h" | ||
19 | #include "nhi_regs.h" | ||
20 | #include "tb.h" | ||
21 | |||
22 | #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") | ||
23 | |||
24 | |||
25 | static int ring_interrupt_index(struct tb_ring *ring) | ||
26 | { | ||
27 | int bit = ring->hop; | ||
28 | if (!ring->is_tx) | ||
29 | bit += ring->nhi->hop_count; | ||
30 | return bit; | ||
31 | } | ||
32 | |||
33 | /** | ||
34 | * ring_interrupt_active() - activate/deactivate interrupts for a single ring | ||
35 | * | ||
36 | * ring->nhi->lock must be held. | ||
37 | */ | ||
38 | static void ring_interrupt_active(struct tb_ring *ring, bool active) | ||
39 | { | ||
40 | int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32; | ||
41 | int bit = ring_interrupt_index(ring) & 31; | ||
42 | int mask = 1 << bit; | ||
43 | u32 old, new; | ||
44 | old = ioread32(ring->nhi->iobase + reg); | ||
45 | if (active) | ||
46 | new = old | mask; | ||
47 | else | ||
48 | new = old & ~mask; | ||
49 | |||
50 | dev_info(&ring->nhi->pdev->dev, | ||
51 | "%s interrupt at register %#x bit %d (%#x -> %#x)\n", | ||
52 | active ? "enabling" : "disabling", reg, bit, old, new); | ||
53 | |||
54 | if (new == old) | ||
55 | dev_WARN(&ring->nhi->pdev->dev, | ||
56 | "interrupt for %s %d is already %s\n", | ||
57 | RING_TYPE(ring), ring->hop, | ||
58 | active ? "enabled" : "disabled"); | ||
59 | iowrite32(new, ring->nhi->iobase + reg); | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * nhi_disable_interrupts() - disable interrupts for all rings | ||
64 | * | ||
65 | * Use only during init and shutdown. | ||
66 | */ | ||
67 | static void nhi_disable_interrupts(struct tb_nhi *nhi) | ||
68 | { | ||
69 | int i = 0; | ||
70 | /* disable interrupts */ | ||
71 | for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) | ||
72 | iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); | ||
73 | |||
74 | /* clear interrupt status bits */ | ||
75 | for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) | ||
76 | ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); | ||
77 | } | ||
78 | |||
79 | /* ring helper methods */ | ||
80 | |||
81 | static void __iomem *ring_desc_base(struct tb_ring *ring) | ||
82 | { | ||
83 | void __iomem *io = ring->nhi->iobase; | ||
84 | io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; | ||
85 | io += ring->hop * 16; | ||
86 | return io; | ||
87 | } | ||
88 | |||
89 | static void __iomem *ring_options_base(struct tb_ring *ring) | ||
90 | { | ||
91 | void __iomem *io = ring->nhi->iobase; | ||
92 | io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; | ||
93 | io += ring->hop * 32; | ||
94 | return io; | ||
95 | } | ||
96 | |||
97 | static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) | ||
98 | { | ||
99 | iowrite16(value, ring_desc_base(ring) + offset); | ||
100 | } | ||
101 | |||
102 | static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) | ||
103 | { | ||
104 | iowrite32(value, ring_desc_base(ring) + offset); | ||
105 | } | ||
106 | |||
107 | static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) | ||
108 | { | ||
109 | iowrite32(value, ring_desc_base(ring) + offset); | ||
110 | iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); | ||
111 | } | ||
112 | |||
113 | static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) | ||
114 | { | ||
115 | iowrite32(value, ring_options_base(ring) + offset); | ||
116 | } | ||
117 | |||
118 | static bool ring_full(struct tb_ring *ring) | ||
119 | { | ||
120 | return ((ring->head + 1) % ring->size) == ring->tail; | ||
121 | } | ||
122 | |||
123 | static bool ring_empty(struct tb_ring *ring) | ||
124 | { | ||
125 | return ring->head == ring->tail; | ||
126 | } | ||
127 | |||
128 | /** | ||
129 | * ring_write_descriptors() - post frames from ring->queue to the controller | ||
130 | * | ||
131 | * ring->lock is held. | ||
132 | */ | ||
133 | static void ring_write_descriptors(struct tb_ring *ring) | ||
134 | { | ||
135 | struct ring_frame *frame, *n; | ||
136 | struct ring_desc *descriptor; | ||
137 | list_for_each_entry_safe(frame, n, &ring->queue, list) { | ||
138 | if (ring_full(ring)) | ||
139 | break; | ||
140 | list_move_tail(&frame->list, &ring->in_flight); | ||
141 | descriptor = &ring->descriptors[ring->head]; | ||
142 | descriptor->phys = frame->buffer_phy; | ||
143 | descriptor->time = 0; | ||
144 | descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; | ||
145 | if (ring->is_tx) { | ||
146 | descriptor->length = frame->size; | ||
147 | descriptor->eof = frame->eof; | ||
148 | descriptor->sof = frame->sof; | ||
149 | } | ||
150 | ring->head = (ring->head + 1) % ring->size; | ||
151 | ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * ring_work() - progress completed frames | ||
157 | * | ||
158 | * If the ring is shutting down then all frames are marked as canceled and | ||
159 | * their callbacks are invoked. | ||
160 | * | ||
161 | * Otherwise we collect all completed frame from the ring buffer, write new | ||
162 | * frame to the ring buffer and invoke the callbacks for the completed frames. | ||
163 | */ | ||
164 | static void ring_work(struct work_struct *work) | ||
165 | { | ||
166 | struct tb_ring *ring = container_of(work, typeof(*ring), work); | ||
167 | struct ring_frame *frame; | ||
168 | bool canceled = false; | ||
169 | LIST_HEAD(done); | ||
170 | mutex_lock(&ring->lock); | ||
171 | |||
172 | if (!ring->running) { | ||
173 | /* Move all frames to done and mark them as canceled. */ | ||
174 | list_splice_tail_init(&ring->in_flight, &done); | ||
175 | list_splice_tail_init(&ring->queue, &done); | ||
176 | canceled = true; | ||
177 | goto invoke_callback; | ||
178 | } | ||
179 | |||
180 | while (!ring_empty(ring)) { | ||
181 | if (!(ring->descriptors[ring->tail].flags | ||
182 | & RING_DESC_COMPLETED)) | ||
183 | break; | ||
184 | frame = list_first_entry(&ring->in_flight, typeof(*frame), | ||
185 | list); | ||
186 | list_move_tail(&frame->list, &done); | ||
187 | if (!ring->is_tx) { | ||
188 | frame->size = ring->descriptors[ring->tail].length; | ||
189 | frame->eof = ring->descriptors[ring->tail].eof; | ||
190 | frame->sof = ring->descriptors[ring->tail].sof; | ||
191 | frame->flags = ring->descriptors[ring->tail].flags; | ||
192 | if (frame->sof != 0) | ||
193 | dev_WARN(&ring->nhi->pdev->dev, | ||
194 | "%s %d got unexpected SOF: %#x\n", | ||
195 | RING_TYPE(ring), ring->hop, | ||
196 | frame->sof); | ||
197 | /* | ||
198 | * known flags: | ||
199 | * raw not enabled, interupt not set: 0x2=0010 | ||
200 | * raw enabled: 0xa=1010 | ||
201 | * raw not enabled: 0xb=1011 | ||
202 | * partial frame (>MAX_FRAME_SIZE): 0xe=1110 | ||
203 | */ | ||
204 | if (frame->flags != 0xa) | ||
205 | dev_WARN(&ring->nhi->pdev->dev, | ||
206 | "%s %d got unexpected flags: %#x\n", | ||
207 | RING_TYPE(ring), ring->hop, | ||
208 | frame->flags); | ||
209 | } | ||
210 | ring->tail = (ring->tail + 1) % ring->size; | ||
211 | } | ||
212 | ring_write_descriptors(ring); | ||
213 | |||
214 | invoke_callback: | ||
215 | mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */ | ||
216 | while (!list_empty(&done)) { | ||
217 | frame = list_first_entry(&done, typeof(*frame), list); | ||
218 | /* | ||
219 | * The callback may reenqueue or delete frame. | ||
220 | * Do not hold on to it. | ||
221 | */ | ||
222 | list_del_init(&frame->list); | ||
223 | frame->callback(ring, frame, canceled); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) | ||
228 | { | ||
229 | int ret = 0; | ||
230 | mutex_lock(&ring->lock); | ||
231 | if (ring->running) { | ||
232 | list_add_tail(&frame->list, &ring->queue); | ||
233 | ring_write_descriptors(ring); | ||
234 | } else { | ||
235 | ret = -ESHUTDOWN; | ||
236 | } | ||
237 | mutex_unlock(&ring->lock); | ||
238 | return ret; | ||
239 | } | ||
240 | |||
241 | static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size, | ||
242 | bool transmit) | ||
243 | { | ||
244 | struct tb_ring *ring = NULL; | ||
245 | dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", | ||
246 | transmit ? "TX" : "RX", hop, size); | ||
247 | |||
248 | mutex_lock(&nhi->lock); | ||
249 | if (hop >= nhi->hop_count) { | ||
250 | dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop); | ||
251 | goto err; | ||
252 | } | ||
253 | if (transmit && nhi->tx_rings[hop]) { | ||
254 | dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop); | ||
255 | goto err; | ||
256 | } else if (!transmit && nhi->rx_rings[hop]) { | ||
257 | dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop); | ||
258 | goto err; | ||
259 | } | ||
260 | ring = kzalloc(sizeof(*ring), GFP_KERNEL); | ||
261 | if (!ring) | ||
262 | goto err; | ||
263 | |||
264 | mutex_init(&ring->lock); | ||
265 | INIT_LIST_HEAD(&ring->queue); | ||
266 | INIT_LIST_HEAD(&ring->in_flight); | ||
267 | INIT_WORK(&ring->work, ring_work); | ||
268 | |||
269 | ring->nhi = nhi; | ||
270 | ring->hop = hop; | ||
271 | ring->is_tx = transmit; | ||
272 | ring->size = size; | ||
273 | ring->head = 0; | ||
274 | ring->tail = 0; | ||
275 | ring->running = false; | ||
276 | ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, | ||
277 | size * sizeof(*ring->descriptors), | ||
278 | &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); | ||
279 | if (!ring->descriptors) | ||
280 | goto err; | ||
281 | |||
282 | if (transmit) | ||
283 | nhi->tx_rings[hop] = ring; | ||
284 | else | ||
285 | nhi->rx_rings[hop] = ring; | ||
286 | mutex_unlock(&nhi->lock); | ||
287 | return ring; | ||
288 | |||
289 | err: | ||
290 | if (ring) | ||
291 | mutex_destroy(&ring->lock); | ||
292 | kfree(ring); | ||
293 | mutex_unlock(&nhi->lock); | ||
294 | return NULL; | ||
295 | } | ||
296 | |||
297 | struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size) | ||
298 | { | ||
299 | return ring_alloc(nhi, hop, size, true); | ||
300 | } | ||
301 | |||
302 | struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size) | ||
303 | { | ||
304 | return ring_alloc(nhi, hop, size, false); | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * ring_start() - enable a ring | ||
309 | * | ||
310 | * Must not be invoked in parallel with ring_stop(). | ||
311 | */ | ||
312 | void ring_start(struct tb_ring *ring) | ||
313 | { | ||
314 | mutex_lock(&ring->nhi->lock); | ||
315 | mutex_lock(&ring->lock); | ||
316 | if (ring->running) { | ||
317 | dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); | ||
318 | goto err; | ||
319 | } | ||
320 | dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", | ||
321 | RING_TYPE(ring), ring->hop); | ||
322 | |||
323 | ring_iowrite64desc(ring, ring->descriptors_dma, 0); | ||
324 | if (ring->is_tx) { | ||
325 | ring_iowrite32desc(ring, ring->size, 12); | ||
326 | ring_iowrite32options(ring, 0, 4); /* time releated ? */ | ||
327 | ring_iowrite32options(ring, | ||
328 | RING_FLAG_ENABLE | RING_FLAG_RAW, 0); | ||
329 | } else { | ||
330 | ring_iowrite32desc(ring, | ||
331 | (TB_FRAME_SIZE << 16) | ring->size, 12); | ||
332 | ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */ | ||
333 | ring_iowrite32options(ring, | ||
334 | RING_FLAG_ENABLE | RING_FLAG_RAW, 0); | ||
335 | } | ||
336 | ring_interrupt_active(ring, true); | ||
337 | ring->running = true; | ||
338 | err: | ||
339 | mutex_unlock(&ring->lock); | ||
340 | mutex_unlock(&ring->nhi->lock); | ||
341 | } | ||
342 | |||
343 | |||
344 | /** | ||
345 | * ring_stop() - shutdown a ring | ||
346 | * | ||
347 | * Must not be invoked from a callback. | ||
348 | * | ||
349 | * This method will disable the ring. Further calls to ring_tx/ring_rx will | ||
350 | * return -ESHUTDOWN until ring_stop has been called. | ||
351 | * | ||
352 | * All enqueued frames will be canceled and their callbacks will be executed | ||
353 | * with frame->canceled set to true (on the callback thread). This method | ||
354 | * returns only after all callback invocations have finished. | ||
355 | */ | ||
356 | void ring_stop(struct tb_ring *ring) | ||
357 | { | ||
358 | mutex_lock(&ring->nhi->lock); | ||
359 | mutex_lock(&ring->lock); | ||
360 | dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", | ||
361 | RING_TYPE(ring), ring->hop); | ||
362 | if (!ring->running) { | ||
363 | dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", | ||
364 | RING_TYPE(ring), ring->hop); | ||
365 | goto err; | ||
366 | } | ||
367 | ring_interrupt_active(ring, false); | ||
368 | |||
369 | ring_iowrite32options(ring, 0, 0); | ||
370 | ring_iowrite64desc(ring, 0, 0); | ||
371 | ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); | ||
372 | ring_iowrite32desc(ring, 0, 12); | ||
373 | ring->head = 0; | ||
374 | ring->tail = 0; | ||
375 | ring->running = false; | ||
376 | |||
377 | err: | ||
378 | mutex_unlock(&ring->lock); | ||
379 | mutex_unlock(&ring->nhi->lock); | ||
380 | |||
381 | /* | ||
382 | * schedule ring->work to invoke callbacks on all remaining frames. | ||
383 | */ | ||
384 | schedule_work(&ring->work); | ||
385 | flush_work(&ring->work); | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | * ring_free() - free ring | ||
390 | * | ||
391 | * When this method returns all invocations of ring->callback will have | ||
392 | * finished. | ||
393 | * | ||
394 | * Ring must be stopped. | ||
395 | * | ||
396 | * Must NOT be called from ring_frame->callback! | ||
397 | */ | ||
398 | void ring_free(struct tb_ring *ring) | ||
399 | { | ||
400 | mutex_lock(&ring->nhi->lock); | ||
401 | /* | ||
402 | * Dissociate the ring from the NHI. This also ensures that | ||
403 | * nhi_interrupt_work cannot reschedule ring->work. | ||
404 | */ | ||
405 | if (ring->is_tx) | ||
406 | ring->nhi->tx_rings[ring->hop] = NULL; | ||
407 | else | ||
408 | ring->nhi->rx_rings[ring->hop] = NULL; | ||
409 | |||
410 | if (ring->running) { | ||
411 | dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", | ||
412 | RING_TYPE(ring), ring->hop); | ||
413 | } | ||
414 | |||
415 | dma_free_coherent(&ring->nhi->pdev->dev, | ||
416 | ring->size * sizeof(*ring->descriptors), | ||
417 | ring->descriptors, ring->descriptors_dma); | ||
418 | |||
419 | ring->descriptors = NULL; | ||
420 | ring->descriptors_dma = 0; | ||
421 | |||
422 | |||
423 | dev_info(&ring->nhi->pdev->dev, | ||
424 | "freeing %s %d\n", | ||
425 | RING_TYPE(ring), | ||
426 | ring->hop); | ||
427 | |||
428 | mutex_unlock(&ring->nhi->lock); | ||
429 | /** | ||
430 | * ring->work can no longer be scheduled (it is scheduled only by | ||
431 | * nhi_interrupt_work and ring_stop). Wait for it to finish before | ||
432 | * freeing the ring. | ||
433 | */ | ||
434 | flush_work(&ring->work); | ||
435 | mutex_destroy(&ring->lock); | ||
436 | kfree(ring); | ||
437 | } | ||
438 | |||
439 | static void nhi_interrupt_work(struct work_struct *work) | ||
440 | { | ||
441 | struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); | ||
442 | int value = 0; /* Suppress uninitialized usage warning. */ | ||
443 | int bit; | ||
444 | int hop = -1; | ||
445 | int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ | ||
446 | struct tb_ring *ring; | ||
447 | |||
448 | mutex_lock(&nhi->lock); | ||
449 | |||
450 | /* | ||
451 | * Starting at REG_RING_NOTIFY_BASE there are three status bitfields | ||
452 | * (TX, RX, RX overflow). We iterate over the bits and read a new | ||
453 | * dwords as required. The registers are cleared on read. | ||
454 | */ | ||
455 | for (bit = 0; bit < 3 * nhi->hop_count; bit++) { | ||
456 | if (bit % 32 == 0) | ||
457 | value = ioread32(nhi->iobase | ||
458 | + REG_RING_NOTIFY_BASE | ||
459 | + 4 * (bit / 32)); | ||
460 | if (++hop == nhi->hop_count) { | ||
461 | hop = 0; | ||
462 | type++; | ||
463 | } | ||
464 | if ((value & (1 << (bit % 32))) == 0) | ||
465 | continue; | ||
466 | if (type == 2) { | ||
467 | dev_warn(&nhi->pdev->dev, | ||
468 | "RX overflow for ring %d\n", | ||
469 | hop); | ||
470 | continue; | ||
471 | } | ||
472 | if (type == 0) | ||
473 | ring = nhi->tx_rings[hop]; | ||
474 | else | ||
475 | ring = nhi->rx_rings[hop]; | ||
476 | if (ring == NULL) { | ||
477 | dev_warn(&nhi->pdev->dev, | ||
478 | "got interrupt for inactive %s ring %d\n", | ||
479 | type ? "RX" : "TX", | ||
480 | hop); | ||
481 | continue; | ||
482 | } | ||
483 | /* we do not check ring->running, this is done in ring->work */ | ||
484 | schedule_work(&ring->work); | ||
485 | } | ||
486 | mutex_unlock(&nhi->lock); | ||
487 | } | ||
488 | |||
489 | static irqreturn_t nhi_msi(int irq, void *data) | ||
490 | { | ||
491 | struct tb_nhi *nhi = data; | ||
492 | schedule_work(&nhi->interrupt_work); | ||
493 | return IRQ_HANDLED; | ||
494 | } | ||
495 | |||
496 | static int nhi_suspend_noirq(struct device *dev) | ||
497 | { | ||
498 | struct pci_dev *pdev = to_pci_dev(dev); | ||
499 | struct tb *tb = pci_get_drvdata(pdev); | ||
500 | thunderbolt_suspend(tb); | ||
501 | return 0; | ||
502 | } | ||
503 | |||
504 | static int nhi_resume_noirq(struct device *dev) | ||
505 | { | ||
506 | struct pci_dev *pdev = to_pci_dev(dev); | ||
507 | struct tb *tb = pci_get_drvdata(pdev); | ||
508 | thunderbolt_resume(tb); | ||
509 | return 0; | ||
510 | } | ||
511 | |||
512 | static void nhi_shutdown(struct tb_nhi *nhi) | ||
513 | { | ||
514 | int i; | ||
515 | dev_info(&nhi->pdev->dev, "shutdown\n"); | ||
516 | |||
517 | for (i = 0; i < nhi->hop_count; i++) { | ||
518 | if (nhi->tx_rings[i]) | ||
519 | dev_WARN(&nhi->pdev->dev, | ||
520 | "TX ring %d is still active\n", i); | ||
521 | if (nhi->rx_rings[i]) | ||
522 | dev_WARN(&nhi->pdev->dev, | ||
523 | "RX ring %d is still active\n", i); | ||
524 | } | ||
525 | nhi_disable_interrupts(nhi); | ||
526 | /* | ||
527 | * We have to release the irq before calling flush_work. Otherwise an | ||
528 | * already executing IRQ handler could call schedule_work again. | ||
529 | */ | ||
530 | devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); | ||
531 | flush_work(&nhi->interrupt_work); | ||
532 | mutex_destroy(&nhi->lock); | ||
533 | } | ||
534 | |||
535 | static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) | ||
536 | { | ||
537 | struct tb_nhi *nhi; | ||
538 | struct tb *tb; | ||
539 | int res; | ||
540 | |||
541 | res = pcim_enable_device(pdev); | ||
542 | if (res) { | ||
543 | dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); | ||
544 | return res; | ||
545 | } | ||
546 | |||
547 | res = pci_enable_msi(pdev); | ||
548 | if (res) { | ||
549 | dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); | ||
550 | return res; | ||
551 | } | ||
552 | |||
553 | res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); | ||
554 | if (res) { | ||
555 | dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); | ||
556 | return res; | ||
557 | } | ||
558 | |||
559 | nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); | ||
560 | if (!nhi) | ||
561 | return -ENOMEM; | ||
562 | |||
563 | nhi->pdev = pdev; | ||
564 | /* cannot fail - table is allocated bin pcim_iomap_regions */ | ||
565 | nhi->iobase = pcim_iomap_table(pdev)[0]; | ||
566 | nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; | ||
567 | if (nhi->hop_count != 12) | ||
568 | dev_warn(&pdev->dev, "unexpected hop count: %d\n", | ||
569 | nhi->hop_count); | ||
570 | INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); | ||
571 | |||
572 | nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, | ||
573 | sizeof(*nhi->tx_rings), GFP_KERNEL); | ||
574 | nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, | ||
575 | sizeof(*nhi->rx_rings), GFP_KERNEL); | ||
576 | if (!nhi->tx_rings || !nhi->rx_rings) | ||
577 | return -ENOMEM; | ||
578 | |||
579 | nhi_disable_interrupts(nhi); /* In case someone left them on. */ | ||
580 | res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi, | ||
581 | IRQF_NO_SUSPEND, /* must work during _noirq */ | ||
582 | "thunderbolt", nhi); | ||
583 | if (res) { | ||
584 | dev_err(&pdev->dev, "request_irq failed, aborting\n"); | ||
585 | return res; | ||
586 | } | ||
587 | |||
588 | mutex_init(&nhi->lock); | ||
589 | |||
590 | pci_set_master(pdev); | ||
591 | |||
592 | /* magic value - clock related? */ | ||
593 | iowrite32(3906250 / 10000, nhi->iobase + 0x38c00); | ||
594 | |||
595 | dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); | ||
596 | tb = thunderbolt_alloc_and_start(nhi); | ||
597 | if (!tb) { | ||
598 | /* | ||
599 | * At this point the RX/TX rings might already have been | ||
600 | * activated. Do a proper shutdown. | ||
601 | */ | ||
602 | nhi_shutdown(nhi); | ||
603 | return -EIO; | ||
604 | } | ||
605 | pci_set_drvdata(pdev, tb); | ||
606 | |||
607 | return 0; | ||
608 | } | ||
609 | |||
610 | static void nhi_remove(struct pci_dev *pdev) | ||
611 | { | ||
612 | struct tb *tb = pci_get_drvdata(pdev); | ||
613 | struct tb_nhi *nhi = tb->nhi; | ||
614 | thunderbolt_shutdown_and_free(tb); | ||
615 | nhi_shutdown(nhi); | ||
616 | } | ||
617 | |||
618 | /* | ||
619 | * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable | ||
620 | * the tunnels asap. A corresponding pci quirk blocks the downstream bridges | ||
621 | * resume_noirq until we are done. | ||
622 | */ | ||
623 | static const struct dev_pm_ops nhi_pm_ops = { | ||
624 | .suspend_noirq = nhi_suspend_noirq, | ||
625 | .resume_noirq = nhi_resume_noirq, | ||
626 | .freeze_noirq = nhi_suspend_noirq, /* | ||
627 | * we just disable hotplug, the | ||
628 | * pci-tunnels stay alive. | ||
629 | */ | ||
630 | .restore_noirq = nhi_resume_noirq, | ||
631 | }; | ||
632 | |||
633 | static struct pci_device_id nhi_ids[] = { | ||
634 | /* | ||
635 | * We have to specify class, the TB bridges use the same device and | ||
636 | * vendor (sub)id. | ||
637 | */ | ||
638 | { | ||
639 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | ||
640 | .vendor = PCI_VENDOR_ID_INTEL, .device = 0x1547, | ||
641 | .subvendor = 0x2222, .subdevice = 0x1111, | ||
642 | }, | ||
643 | { | ||
644 | .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, | ||
645 | .vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c, | ||
646 | .subvendor = 0x2222, .subdevice = 0x1111, | ||
647 | }, | ||
648 | { 0,} | ||
649 | }; | ||
650 | |||
651 | MODULE_DEVICE_TABLE(pci, nhi_ids); | ||
652 | MODULE_LICENSE("GPL"); | ||
653 | |||
654 | static struct pci_driver nhi_driver = { | ||
655 | .name = "thunderbolt", | ||
656 | .id_table = nhi_ids, | ||
657 | .probe = nhi_probe, | ||
658 | .remove = nhi_remove, | ||
659 | .driver.pm = &nhi_pm_ops, | ||
660 | }; | ||
661 | |||
662 | static int __init nhi_init(void) | ||
663 | { | ||
664 | if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) | ||
665 | return -ENOSYS; | ||
666 | return pci_register_driver(&nhi_driver); | ||
667 | } | ||
668 | |||
669 | static void __exit nhi_unload(void) | ||
670 | { | ||
671 | pci_unregister_driver(&nhi_driver); | ||
672 | } | ||
673 | |||
674 | module_init(nhi_init); | ||
675 | module_exit(nhi_unload); | ||
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h new file mode 100644 index 000000000000..317242939b31 --- /dev/null +++ b/drivers/thunderbolt/nhi.h | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - NHI driver | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef DSL3510_H_ | ||
8 | #define DSL3510_H_ | ||
9 | |||
10 | #include <linux/mutex.h> | ||
11 | #include <linux/workqueue.h> | ||
12 | |||
13 | /** | ||
14 | * struct tb_nhi - thunderbolt native host interface | ||
15 | */ | ||
16 | struct tb_nhi { | ||
17 | struct mutex lock; /* | ||
18 | * Must be held during ring creation/destruction. | ||
19 | * Is acquired by interrupt_work when dispatching | ||
20 | * interrupts to individual rings. | ||
21 | **/ | ||
22 | struct pci_dev *pdev; | ||
23 | void __iomem *iobase; | ||
24 | struct tb_ring **tx_rings; | ||
25 | struct tb_ring **rx_rings; | ||
26 | struct work_struct interrupt_work; | ||
27 | u32 hop_count; /* Number of rings (end point hops) supported by NHI. */ | ||
28 | }; | ||
29 | |||
30 | /** | ||
31 | * struct tb_ring - thunderbolt TX or RX ring associated with a NHI | ||
32 | */ | ||
33 | struct tb_ring { | ||
34 | struct mutex lock; /* must be acquired after nhi->lock */ | ||
35 | struct tb_nhi *nhi; | ||
36 | int size; | ||
37 | int hop; | ||
38 | int head; /* write next descriptor here */ | ||
39 | int tail; /* complete next descriptor here */ | ||
40 | struct ring_desc *descriptors; | ||
41 | dma_addr_t descriptors_dma; | ||
42 | struct list_head queue; | ||
43 | struct list_head in_flight; | ||
44 | struct work_struct work; | ||
45 | bool is_tx:1; /* rx otherwise */ | ||
46 | bool running:1; | ||
47 | }; | ||
48 | |||
49 | struct ring_frame; | ||
50 | typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled); | ||
51 | |||
52 | /** | ||
53 | * struct ring_frame - for use with ring_rx/ring_tx | ||
54 | */ | ||
55 | struct ring_frame { | ||
56 | dma_addr_t buffer_phy; | ||
57 | ring_cb callback; | ||
58 | struct list_head list; | ||
59 | u32 size:12; /* TX: in, RX: out*/ | ||
60 | u32 flags:12; /* RX: out */ | ||
61 | u32 eof:4; /* TX:in, RX: out */ | ||
62 | u32 sof:4; /* TX:in, RX: out */ | ||
63 | }; | ||
64 | |||
65 | #define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */ | ||
66 | |||
67 | struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size); | ||
68 | struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size); | ||
69 | void ring_start(struct tb_ring *ring); | ||
70 | void ring_stop(struct tb_ring *ring); | ||
71 | void ring_free(struct tb_ring *ring); | ||
72 | |||
73 | int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); | ||
74 | |||
75 | /** | ||
76 | * ring_rx() - enqueue a frame on an RX ring | ||
77 | * | ||
78 | * frame->buffer, frame->buffer_phy and frame->callback have to be set. The | ||
79 | * buffer must contain at least TB_FRAME_SIZE bytes. | ||
80 | * | ||
81 | * frame->callback will be invoked with frame->size, frame->flags, frame->eof, | ||
82 | * frame->sof set once the frame has been received. | ||
83 | * | ||
84 | * If ring_stop is called after the packet has been enqueued frame->callback | ||
85 | * will be called with canceled set to true. | ||
86 | * | ||
87 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. | ||
88 | */ | ||
89 | static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame) | ||
90 | { | ||
91 | WARN_ON(ring->is_tx); | ||
92 | return __ring_enqueue(ring, frame); | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * ring_tx() - enqueue a frame on an TX ring | ||
97 | * | ||
98 | * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof | ||
99 | * and frame->sof have to be set. | ||
100 | * | ||
101 | * frame->callback will be invoked with once the frame has been transmitted. | ||
102 | * | ||
103 | * If ring_stop is called after the packet has been enqueued frame->callback | ||
104 | * will be called with canceled set to true. | ||
105 | * | ||
106 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. | ||
107 | */ | ||
108 | static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame) | ||
109 | { | ||
110 | WARN_ON(!ring->is_tx); | ||
111 | return __ring_enqueue(ring, frame); | ||
112 | } | ||
113 | |||
114 | #endif | ||
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h new file mode 100644 index 000000000000..86b996c702a0 --- /dev/null +++ b/drivers/thunderbolt/nhi_regs.h | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - NHI registers | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef DSL3510_REGS_H_ | ||
8 | #define DSL3510_REGS_H_ | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | |||
12 | enum ring_flags { | ||
13 | RING_FLAG_ISOCH_ENABLE = 1 << 27, /* TX only? */ | ||
14 | RING_FLAG_E2E_FLOW_CONTROL = 1 << 28, | ||
15 | RING_FLAG_PCI_NO_SNOOP = 1 << 29, | ||
16 | RING_FLAG_RAW = 1 << 30, /* ignore EOF/SOF mask, include checksum */ | ||
17 | RING_FLAG_ENABLE = 1 << 31, | ||
18 | }; | ||
19 | |||
20 | enum ring_desc_flags { | ||
21 | RING_DESC_ISOCH = 0x1, /* TX only? */ | ||
22 | RING_DESC_COMPLETED = 0x2, /* set by NHI */ | ||
23 | RING_DESC_POSTED = 0x4, /* always set this */ | ||
24 | RING_DESC_INTERRUPT = 0x8, /* request an interrupt on completion */ | ||
25 | }; | ||
26 | |||
27 | /** | ||
28 | * struct ring_desc - TX/RX ring entry | ||
29 | * | ||
30 | * For TX set length/eof/sof. | ||
31 | * For RX length/eof/sof are set by the NHI. | ||
32 | */ | ||
33 | struct ring_desc { | ||
34 | u64 phys; | ||
35 | u32 length:12; | ||
36 | u32 eof:4; | ||
37 | u32 sof:4; | ||
38 | enum ring_desc_flags flags:12; | ||
39 | u32 time; /* write zero */ | ||
40 | } __packed; | ||
41 | |||
42 | /* NHI registers in bar 0 */ | ||
43 | |||
44 | /* | ||
45 | * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT) | ||
46 | * 00: physical pointer to an array of struct ring_desc | ||
47 | * 08: ring tail (set by NHI) | ||
48 | * 10: ring head (index of first non posted descriptor) | ||
49 | * 12: descriptor count | ||
50 | */ | ||
51 | #define REG_TX_RING_BASE 0x00000 | ||
52 | |||
53 | /* | ||
54 | * 16 bytes per entry, one entry for every hop (REG_HOP_COUNT) | ||
55 | * 00: physical pointer to an array of struct ring_desc | ||
56 | * 08: ring head (index of first not posted descriptor) | ||
57 | * 10: ring tail (set by NHI) | ||
58 | * 12: descriptor count | ||
59 | * 14: max frame sizes (anything larger than 0x100 has no effect) | ||
60 | */ | ||
61 | #define REG_RX_RING_BASE 0x08000 | ||
62 | |||
63 | /* | ||
64 | * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT) | ||
65 | * 00: enum_ring_flags | ||
66 | * 04: isoch time stamp ?? (write 0) | ||
67 | * ..: unknown | ||
68 | */ | ||
69 | #define REG_TX_OPTIONS_BASE 0x19800 | ||
70 | |||
71 | /* | ||
72 | * 32 bytes per entry, one entry for every hop (REG_HOP_COUNT) | ||
73 | * 00: enum ring_flags | ||
74 | * If RING_FLAG_E2E_FLOW_CONTROL is set then bits 13-23 must be set to | ||
75 | * the corresponding TX hop id. | ||
76 | * 04: EOF/SOF mask (ignored for RING_FLAG_RAW rings) | ||
77 | * ..: unknown | ||
78 | */ | ||
79 | #define REG_RX_OPTIONS_BASE 0x29800 | ||
80 | |||
81 | /* | ||
82 | * three bitfields: tx, rx, rx overflow | ||
83 | * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are | ||
84 | * cleared on read. New interrupts are fired only after ALL registers have been | ||
85 | * read (even those containing only disabled rings). | ||
86 | */ | ||
87 | #define REG_RING_NOTIFY_BASE 0x37800 | ||
88 | #define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32) | ||
89 | |||
90 | /* | ||
91 | * two bitfields: rx, tx | ||
92 | * Both bitfields contains one bit for every hop (REG_HOP_COUNT). To | ||
93 | * enable/disable interrupts set/clear the corresponding bits. | ||
94 | */ | ||
95 | #define REG_RING_INTERRUPT_BASE 0x38200 | ||
96 | #define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32) | ||
97 | |||
98 | /* The last 11 bits contain the number of hops supported by the NHI port. */ | ||
99 | #define REG_HOP_COUNT 0x39640 | ||
100 | |||
101 | #endif | ||
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c new file mode 100644 index 000000000000..8fcf8a7b6c22 --- /dev/null +++ b/drivers/thunderbolt/path.c | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - path/tunnel functionality | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/slab.h> | ||
8 | #include <linux/errno.h> | ||
9 | |||
10 | #include "tb.h" | ||
11 | |||
12 | |||
13 | static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop) | ||
14 | { | ||
15 | tb_port_info(port, " Hop through port %d to hop %d (%s)\n", | ||
16 | hop->out_port, hop->next_hop, | ||
17 | hop->enable ? "enabled" : "disabled"); | ||
18 | tb_port_info(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n", | ||
19 | hop->weight, hop->priority, | ||
20 | hop->initial_credits, hop->drop_packages); | ||
21 | tb_port_info(port, " Counter enabled: %d Counter index: %d\n", | ||
22 | hop->counter_enable, hop->counter); | ||
23 | tb_port_info(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n", | ||
24 | hop->ingress_fc, hop->egress_fc, | ||
25 | hop->ingress_shared_buffer, hop->egress_shared_buffer); | ||
26 | tb_port_info(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n", | ||
27 | hop->unknown1, hop->unknown2, hop->unknown3); | ||
28 | } | ||
29 | |||
30 | /** | ||
31 | * tb_path_alloc() - allocate a thunderbolt path | ||
32 | * | ||
33 | * Return: Returns a tb_path on success or NULL on failure. | ||
34 | */ | ||
35 | struct tb_path *tb_path_alloc(struct tb *tb, int num_hops) | ||
36 | { | ||
37 | struct tb_path *path = kzalloc(sizeof(*path), GFP_KERNEL); | ||
38 | if (!path) | ||
39 | return NULL; | ||
40 | path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); | ||
41 | if (!path->hops) { | ||
42 | kfree(path); | ||
43 | return NULL; | ||
44 | } | ||
45 | path->tb = tb; | ||
46 | path->path_length = num_hops; | ||
47 | return path; | ||
48 | } | ||
49 | |||
50 | /** | ||
51 | * tb_path_free() - free a deactivated path | ||
52 | */ | ||
53 | void tb_path_free(struct tb_path *path) | ||
54 | { | ||
55 | if (path->activated) { | ||
56 | tb_WARN(path->tb, "trying to free an activated path\n") | ||
57 | return; | ||
58 | } | ||
59 | kfree(path->hops); | ||
60 | kfree(path); | ||
61 | } | ||
62 | |||
63 | static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop) | ||
64 | { | ||
65 | int i, res; | ||
66 | for (i = first_hop; i < path->path_length; i++) { | ||
67 | res = tb_port_add_nfc_credits(path->hops[i].in_port, | ||
68 | -path->nfc_credits); | ||
69 | if (res) | ||
70 | tb_port_warn(path->hops[i].in_port, | ||
71 | "nfc credits deallocation failed for hop %d\n", | ||
72 | i); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop) | ||
77 | { | ||
78 | int i, res; | ||
79 | struct tb_regs_hop hop = { }; | ||
80 | for (i = first_hop; i < path->path_length; i++) { | ||
81 | res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, | ||
82 | 2 * path->hops[i].in_hop_index, 2); | ||
83 | if (res) | ||
84 | tb_port_warn(path->hops[i].in_port, | ||
85 | "hop deactivation failed for hop %d, index %d\n", | ||
86 | i, path->hops[i].in_hop_index); | ||
87 | } | ||
88 | } | ||
89 | |||
90 | void tb_path_deactivate(struct tb_path *path) | ||
91 | { | ||
92 | if (!path->activated) { | ||
93 | tb_WARN(path->tb, "trying to deactivate an inactive path\n"); | ||
94 | return; | ||
95 | } | ||
96 | tb_info(path->tb, | ||
97 | "deactivating path from %llx:%x to %llx:%x\n", | ||
98 | tb_route(path->hops[0].in_port->sw), | ||
99 | path->hops[0].in_port->port, | ||
100 | tb_route(path->hops[path->path_length - 1].out_port->sw), | ||
101 | path->hops[path->path_length - 1].out_port->port); | ||
102 | __tb_path_deactivate_hops(path, 0); | ||
103 | __tb_path_deallocate_nfc(path, 0); | ||
104 | path->activated = false; | ||
105 | } | ||
106 | |||
107 | /** | ||
108 | * tb_path_activate() - activate a path | ||
109 | * | ||
110 | * Activate a path starting with the last hop and iterating backwards. The | ||
111 | * caller must fill path->hops before calling tb_path_activate(). | ||
112 | * | ||
113 | * Return: Returns 0 on success or an error code on failure. | ||
114 | */ | ||
115 | int tb_path_activate(struct tb_path *path) | ||
116 | { | ||
117 | int i, res; | ||
118 | enum tb_path_port out_mask, in_mask; | ||
119 | if (path->activated) { | ||
120 | tb_WARN(path->tb, "trying to activate already activated path\n"); | ||
121 | return -EINVAL; | ||
122 | } | ||
123 | |||
124 | tb_info(path->tb, | ||
125 | "activating path from %llx:%x to %llx:%x\n", | ||
126 | tb_route(path->hops[0].in_port->sw), | ||
127 | path->hops[0].in_port->port, | ||
128 | tb_route(path->hops[path->path_length - 1].out_port->sw), | ||
129 | path->hops[path->path_length - 1].out_port->port); | ||
130 | |||
131 | /* Clear counters. */ | ||
132 | for (i = path->path_length - 1; i >= 0; i--) { | ||
133 | if (path->hops[i].in_counter_index == -1) | ||
134 | continue; | ||
135 | res = tb_port_clear_counter(path->hops[i].in_port, | ||
136 | path->hops[i].in_counter_index); | ||
137 | if (res) | ||
138 | goto err; | ||
139 | } | ||
140 | |||
141 | /* Add non flow controlled credits. */ | ||
142 | for (i = path->path_length - 1; i >= 0; i--) { | ||
143 | res = tb_port_add_nfc_credits(path->hops[i].in_port, | ||
144 | path->nfc_credits); | ||
145 | if (res) { | ||
146 | __tb_path_deallocate_nfc(path, i); | ||
147 | goto err; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | /* Activate hops. */ | ||
152 | for (i = path->path_length - 1; i >= 0; i--) { | ||
153 | struct tb_regs_hop hop; | ||
154 | |||
155 | /* dword 0 */ | ||
156 | hop.next_hop = path->hops[i].next_hop_index; | ||
157 | hop.out_port = path->hops[i].out_port->port; | ||
158 | /* TODO: figure out why these are good values */ | ||
159 | hop.initial_credits = (i == path->path_length - 1) ? 16 : 7; | ||
160 | hop.unknown1 = 0; | ||
161 | hop.enable = 1; | ||
162 | |||
163 | /* dword 1 */ | ||
164 | out_mask = (i == path->path_length - 1) ? | ||
165 | TB_PATH_DESTINATION : TB_PATH_INTERNAL; | ||
166 | in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL; | ||
167 | hop.weight = path->weight; | ||
168 | hop.unknown2 = 0; | ||
169 | hop.priority = path->priority; | ||
170 | hop.drop_packages = path->drop_packages; | ||
171 | hop.counter = path->hops[i].in_counter_index; | ||
172 | hop.counter_enable = path->hops[i].in_counter_index != -1; | ||
173 | hop.ingress_fc = path->ingress_fc_enable & in_mask; | ||
174 | hop.egress_fc = path->egress_fc_enable & out_mask; | ||
175 | hop.ingress_shared_buffer = path->ingress_shared_buffer | ||
176 | & in_mask; | ||
177 | hop.egress_shared_buffer = path->egress_shared_buffer | ||
178 | & out_mask; | ||
179 | hop.unknown3 = 0; | ||
180 | |||
181 | tb_port_info(path->hops[i].in_port, "Writing hop %d, index %d", | ||
182 | i, path->hops[i].in_hop_index); | ||
183 | tb_dump_hop(path->hops[i].in_port, &hop); | ||
184 | res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS, | ||
185 | 2 * path->hops[i].in_hop_index, 2); | ||
186 | if (res) { | ||
187 | __tb_path_deactivate_hops(path, i); | ||
188 | __tb_path_deallocate_nfc(path, 0); | ||
189 | goto err; | ||
190 | } | ||
191 | } | ||
192 | path->activated = true; | ||
193 | tb_info(path->tb, "path activation complete\n"); | ||
194 | return 0; | ||
195 | err: | ||
196 | tb_WARN(path->tb, "path activation failed\n"); | ||
197 | return res; | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * tb_path_is_invalid() - check whether any ports on the path are invalid | ||
202 | * | ||
203 | * Return: Returns true if the path is invalid, false otherwise. | ||
204 | */ | ||
205 | bool tb_path_is_invalid(struct tb_path *path) | ||
206 | { | ||
207 | int i = 0; | ||
208 | for (i = 0; i < path->path_length; i++) { | ||
209 | if (path->hops[i].in_port->sw->is_unplugged) | ||
210 | return true; | ||
211 | if (path->hops[i].out_port->sw->is_unplugged) | ||
212 | return true; | ||
213 | } | ||
214 | return false; | ||
215 | } | ||
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c new file mode 100644 index 000000000000..aeb982969629 --- /dev/null +++ b/drivers/thunderbolt/switch.c | |||
@@ -0,0 +1,507 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - switch/port utility functions | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/delay.h> | ||
8 | #include <linux/slab.h> | ||
9 | |||
10 | #include "tb.h" | ||
11 | |||
12 | /* port utility functions */ | ||
13 | |||
14 | static const char *tb_port_type(struct tb_regs_port_header *port) | ||
15 | { | ||
16 | switch (port->type >> 16) { | ||
17 | case 0: | ||
18 | switch ((u8) port->type) { | ||
19 | case 0: | ||
20 | return "Inactive"; | ||
21 | case 1: | ||
22 | return "Port"; | ||
23 | case 2: | ||
24 | return "NHI"; | ||
25 | default: | ||
26 | return "unknown"; | ||
27 | } | ||
28 | case 0x2: | ||
29 | return "Ethernet"; | ||
30 | case 0x8: | ||
31 | return "SATA"; | ||
32 | case 0xe: | ||
33 | return "DP/HDMI"; | ||
34 | case 0x10: | ||
35 | return "PCIe"; | ||
36 | case 0x20: | ||
37 | return "USB"; | ||
38 | default: | ||
39 | return "unknown"; | ||
40 | } | ||
41 | } | ||
42 | |||
43 | static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) | ||
44 | { | ||
45 | tb_info(tb, | ||
46 | " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", | ||
47 | port->port_number, port->vendor_id, port->device_id, | ||
48 | port->revision, port->thunderbolt_version, tb_port_type(port), | ||
49 | port->type); | ||
50 | tb_info(tb, " Max hop id (in/out): %d/%d\n", | ||
51 | port->max_in_hop_id, port->max_out_hop_id); | ||
52 | tb_info(tb, " Max counters: %d\n", port->max_counters); | ||
53 | tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits); | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * tb_port_state() - get connectedness state of a port | ||
58 | * | ||
59 | * The port must have a TB_CAP_PHY (i.e. it should be a real port). | ||
60 | * | ||
61 | * Return: Returns an enum tb_port_state on success or an error code on failure. | ||
62 | */ | ||
63 | static int tb_port_state(struct tb_port *port) | ||
64 | { | ||
65 | struct tb_cap_phy phy; | ||
66 | int res; | ||
67 | if (port->cap_phy == 0) { | ||
68 | tb_port_WARN(port, "does not have a PHY\n"); | ||
69 | return -EINVAL; | ||
70 | } | ||
71 | res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2); | ||
72 | if (res) | ||
73 | return res; | ||
74 | return phy.state; | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * tb_wait_for_port() - wait for a port to become ready | ||
79 | * | ||
80 | * Wait up to 1 second for a port to reach state TB_PORT_UP. If | ||
81 | * wait_if_unplugged is set then we also wait if the port is in state | ||
82 | * TB_PORT_UNPLUGGED (it takes a while for the device to be registered after | ||
83 | * switch resume). Otherwise we only wait if a device is registered but the link | ||
84 | * has not yet been established. | ||
85 | * | ||
86 | * Return: Returns an error code on failure. Returns 0 if the port is not | ||
87 | * connected or failed to reach state TB_PORT_UP within one second. Returns 1 | ||
88 | * if the port is connected and in state TB_PORT_UP. | ||
89 | */ | ||
90 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) | ||
91 | { | ||
92 | int retries = 10; | ||
93 | int state; | ||
94 | if (!port->cap_phy) { | ||
95 | tb_port_WARN(port, "does not have PHY\n"); | ||
96 | return -EINVAL; | ||
97 | } | ||
98 | if (tb_is_upstream_port(port)) { | ||
99 | tb_port_WARN(port, "is the upstream port\n"); | ||
100 | return -EINVAL; | ||
101 | } | ||
102 | |||
103 | while (retries--) { | ||
104 | state = tb_port_state(port); | ||
105 | if (state < 0) | ||
106 | return state; | ||
107 | if (state == TB_PORT_DISABLED) { | ||
108 | tb_port_info(port, "is disabled (state: 0)\n"); | ||
109 | return 0; | ||
110 | } | ||
111 | if (state == TB_PORT_UNPLUGGED) { | ||
112 | if (wait_if_unplugged) { | ||
113 | /* used during resume */ | ||
114 | tb_port_info(port, | ||
115 | "is unplugged (state: 7), retrying...\n"); | ||
116 | msleep(100); | ||
117 | continue; | ||
118 | } | ||
119 | tb_port_info(port, "is unplugged (state: 7)\n"); | ||
120 | return 0; | ||
121 | } | ||
122 | if (state == TB_PORT_UP) { | ||
123 | tb_port_info(port, | ||
124 | "is connected, link is up (state: 2)\n"); | ||
125 | return 1; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * After plug-in the state is TB_PORT_CONNECTING. Give it some | ||
130 | * time. | ||
131 | */ | ||
132 | tb_port_info(port, | ||
133 | "is connected, link is not up (state: %d), retrying...\n", | ||
134 | state); | ||
135 | msleep(100); | ||
136 | } | ||
137 | tb_port_warn(port, | ||
138 | "failed to reach state TB_PORT_UP. Ignoring port...\n"); | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port | ||
144 | * | ||
145 | * Change the number of NFC credits allocated to @port by @credits. To remove | ||
146 | * NFC credits pass a negative amount of credits. | ||
147 | * | ||
148 | * Return: Returns 0 on success or an error code on failure. | ||
149 | */ | ||
150 | int tb_port_add_nfc_credits(struct tb_port *port, int credits) | ||
151 | { | ||
152 | if (credits == 0) | ||
153 | return 0; | ||
154 | tb_port_info(port, | ||
155 | "adding %#x NFC credits (%#x -> %#x)", | ||
156 | credits, | ||
157 | port->config.nfc_credits, | ||
158 | port->config.nfc_credits + credits); | ||
159 | port->config.nfc_credits += credits; | ||
160 | return tb_port_write(port, &port->config.nfc_credits, | ||
161 | TB_CFG_PORT, 4, 1); | ||
162 | } | ||
163 | |||
164 | /** | ||
165 | * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER | ||
166 | * | ||
167 | * Return: Returns 0 on success or an error code on failure. | ||
168 | */ | ||
169 | int tb_port_clear_counter(struct tb_port *port, int counter) | ||
170 | { | ||
171 | u32 zero[3] = { 0, 0, 0 }; | ||
172 | tb_port_info(port, "clearing counter %d\n", counter); | ||
173 | return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * tb_init_port() - initialize a port | ||
178 | * | ||
179 | * This is a helper method for tb_switch_alloc. Does not check or initialize | ||
180 | * any downstream switches. | ||
181 | * | ||
182 | * Return: Returns 0 on success or an error code on failure. | ||
183 | */ | ||
184 | static int tb_init_port(struct tb_port *port) | ||
185 | { | ||
186 | int res; | ||
187 | int cap; | ||
188 | |||
189 | res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); | ||
190 | if (res) | ||
191 | return res; | ||
192 | |||
193 | /* Port 0 is the switch itself and has no PHY. */ | ||
194 | if (port->config.type == TB_TYPE_PORT && port->port != 0) { | ||
195 | cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PHY); | ||
196 | |||
197 | if (cap > 0) | ||
198 | port->cap_phy = cap; | ||
199 | else | ||
200 | tb_port_WARN(port, "non switch port without a PHY\n"); | ||
201 | } | ||
202 | |||
203 | tb_dump_port(port->sw->tb, &port->config); | ||
204 | |||
205 | /* TODO: Read dual link port, DP port and more from EEPROM. */ | ||
206 | return 0; | ||
207 | |||
208 | } | ||
209 | |||
210 | /* switch utility functions */ | ||
211 | |||
212 | static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) | ||
213 | { | ||
214 | tb_info(tb, | ||
215 | " Switch: %x:%x (Revision: %d, TB Version: %d)\n", | ||
216 | sw->vendor_id, sw->device_id, sw->revision, | ||
217 | sw->thunderbolt_version); | ||
218 | tb_info(tb, " Max Port Number: %d\n", sw->max_port_number); | ||
219 | tb_info(tb, " Config:\n"); | ||
220 | tb_info(tb, | ||
221 | " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", | ||
222 | sw->upstream_port_number, sw->depth, | ||
223 | (((u64) sw->route_hi) << 32) | sw->route_lo, | ||
224 | sw->enabled, sw->plug_events_delay); | ||
225 | tb_info(tb, | ||
226 | " unknown1: %#x unknown4: %#x\n", | ||
227 | sw->__unknown1, sw->__unknown4); | ||
228 | } | ||
229 | |||
230 | /** | ||
231 | * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET | ||
232 | * | ||
233 | * Return: Returns 0 on success or an error code on failure. | ||
234 | */ | ||
235 | int tb_switch_reset(struct tb *tb, u64 route) | ||
236 | { | ||
237 | struct tb_cfg_result res; | ||
238 | struct tb_regs_switch_header header = { | ||
239 | header.route_hi = route >> 32, | ||
240 | header.route_lo = route, | ||
241 | header.enabled = true, | ||
242 | }; | ||
243 | tb_info(tb, "resetting switch at %llx\n", route); | ||
244 | res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, | ||
245 | 0, 2, 2, 2); | ||
246 | if (res.err) | ||
247 | return res.err; | ||
248 | res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); | ||
249 | if (res.err > 0) | ||
250 | return -EIO; | ||
251 | return res.err; | ||
252 | } | ||
253 | |||
254 | struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route) | ||
255 | { | ||
256 | u8 next_port = route; /* | ||
257 | * Routes use a stride of 8 bits, | ||
258 | * eventhough a port index has 6 bits at most. | ||
259 | * */ | ||
260 | if (route == 0) | ||
261 | return sw; | ||
262 | if (next_port > sw->config.max_port_number) | ||
263 | return NULL; | ||
264 | if (tb_is_upstream_port(&sw->ports[next_port])) | ||
265 | return NULL; | ||
266 | if (!sw->ports[next_port].remote) | ||
267 | return NULL; | ||
268 | return get_switch_at_route(sw->ports[next_port].remote->sw, | ||
269 | route >> TB_ROUTE_SHIFT); | ||
270 | } | ||
271 | |||
272 | /** | ||
273 | * tb_plug_events_active() - enable/disable plug events on a switch | ||
274 | * | ||
275 | * Also configures a sane plug_events_delay of 255ms. | ||
276 | * | ||
277 | * Return: Returns 0 on success or an error code on failure. | ||
278 | */ | ||
279 | static int tb_plug_events_active(struct tb_switch *sw, bool active) | ||
280 | { | ||
281 | u32 data; | ||
282 | int res; | ||
283 | |||
284 | sw->config.plug_events_delay = 0xff; | ||
285 | res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); | ||
286 | if (res) | ||
287 | return res; | ||
288 | |||
289 | res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); | ||
290 | if (res) | ||
291 | return res; | ||
292 | |||
293 | if (active) { | ||
294 | data = data & 0xFFFFFF83; | ||
295 | switch (sw->config.device_id) { | ||
296 | case 0x1513: | ||
297 | case 0x151a: | ||
298 | case 0x1549: | ||
299 | break; | ||
300 | default: | ||
301 | data |= 4; | ||
302 | } | ||
303 | } else { | ||
304 | data = data | 0x7c; | ||
305 | } | ||
306 | return tb_sw_write(sw, &data, TB_CFG_SWITCH, | ||
307 | sw->cap_plug_events + 1, 1); | ||
308 | } | ||
309 | |||
310 | |||
311 | /** | ||
312 | * tb_switch_free() - free a tb_switch and all downstream switches | ||
313 | */ | ||
314 | void tb_switch_free(struct tb_switch *sw) | ||
315 | { | ||
316 | int i; | ||
317 | /* port 0 is the switch itself and never has a remote */ | ||
318 | for (i = 1; i <= sw->config.max_port_number; i++) { | ||
319 | if (tb_is_upstream_port(&sw->ports[i])) | ||
320 | continue; | ||
321 | if (sw->ports[i].remote) | ||
322 | tb_switch_free(sw->ports[i].remote->sw); | ||
323 | sw->ports[i].remote = NULL; | ||
324 | } | ||
325 | |||
326 | if (!sw->is_unplugged) | ||
327 | tb_plug_events_active(sw, false); | ||
328 | |||
329 | kfree(sw->ports); | ||
330 | kfree(sw->drom); | ||
331 | kfree(sw); | ||
332 | } | ||
333 | |||
334 | /** | ||
335 | * tb_switch_alloc() - allocate and initialize a switch | ||
336 | * | ||
337 | * Return: Returns a NULL on failure. | ||
338 | */ | ||
339 | struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route) | ||
340 | { | ||
341 | int i; | ||
342 | int cap; | ||
343 | struct tb_switch *sw; | ||
344 | int upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); | ||
345 | if (upstream_port < 0) | ||
346 | return NULL; | ||
347 | |||
348 | sw = kzalloc(sizeof(*sw), GFP_KERNEL); | ||
349 | if (!sw) | ||
350 | return NULL; | ||
351 | |||
352 | sw->tb = tb; | ||
353 | if (tb_cfg_read(tb->ctl, &sw->config, route, 0, 2, 0, 5)) | ||
354 | goto err; | ||
355 | tb_info(tb, | ||
356 | "initializing Switch at %#llx (depth: %d, up port: %d)\n", | ||
357 | route, tb_route_length(route), upstream_port); | ||
358 | tb_info(tb, "old switch config:\n"); | ||
359 | tb_dump_switch(tb, &sw->config); | ||
360 | |||
361 | /* configure switch */ | ||
362 | sw->config.upstream_port_number = upstream_port; | ||
363 | sw->config.depth = tb_route_length(route); | ||
364 | sw->config.route_lo = route; | ||
365 | sw->config.route_hi = route >> 32; | ||
366 | sw->config.enabled = 1; | ||
367 | /* from here on we may use the tb_sw_* functions & macros */ | ||
368 | |||
369 | if (sw->config.vendor_id != 0x8086) | ||
370 | tb_sw_warn(sw, "unknown switch vendor id %#x\n", | ||
371 | sw->config.vendor_id); | ||
372 | |||
373 | if (sw->config.device_id != 0x1547 && sw->config.device_id != 0x1549) | ||
374 | tb_sw_warn(sw, "unsupported switch device id %#x\n", | ||
375 | sw->config.device_id); | ||
376 | |||
377 | /* upload configuration */ | ||
378 | if (tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3)) | ||
379 | goto err; | ||
380 | |||
381 | /* initialize ports */ | ||
382 | sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), | ||
383 | GFP_KERNEL); | ||
384 | if (!sw->ports) | ||
385 | goto err; | ||
386 | |||
387 | for (i = 0; i <= sw->config.max_port_number; i++) { | ||
388 | /* minimum setup for tb_find_cap and tb_drom_read to work */ | ||
389 | sw->ports[i].sw = sw; | ||
390 | sw->ports[i].port = i; | ||
391 | } | ||
392 | |||
393 | cap = tb_find_cap(&sw->ports[0], TB_CFG_SWITCH, TB_CAP_PLUG_EVENTS); | ||
394 | if (cap < 0) { | ||
395 | tb_sw_warn(sw, "cannot find TB_CAP_PLUG_EVENTS aborting\n"); | ||
396 | goto err; | ||
397 | } | ||
398 | sw->cap_plug_events = cap; | ||
399 | |||
400 | /* read drom */ | ||
401 | if (tb_drom_read(sw)) | ||
402 | tb_sw_warn(sw, "tb_eeprom_read_rom failed, continuing\n"); | ||
403 | tb_sw_info(sw, "uid: %#llx\n", sw->uid); | ||
404 | |||
405 | for (i = 0; i <= sw->config.max_port_number; i++) { | ||
406 | if (sw->ports[i].disabled) { | ||
407 | tb_port_info(&sw->ports[i], "disabled by eeprom\n"); | ||
408 | continue; | ||
409 | } | ||
410 | if (tb_init_port(&sw->ports[i])) | ||
411 | goto err; | ||
412 | } | ||
413 | |||
414 | /* TODO: I2C, IECS, link controller */ | ||
415 | |||
416 | if (tb_plug_events_active(sw, true)) | ||
417 | goto err; | ||
418 | |||
419 | return sw; | ||
420 | err: | ||
421 | kfree(sw->ports); | ||
422 | kfree(sw->drom); | ||
423 | kfree(sw); | ||
424 | return NULL; | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * tb_sw_set_unpplugged() - set is_unplugged on switch and downstream switches | ||
429 | */ | ||
430 | void tb_sw_set_unpplugged(struct tb_switch *sw) | ||
431 | { | ||
432 | int i; | ||
433 | if (sw == sw->tb->root_switch) { | ||
434 | tb_sw_WARN(sw, "cannot unplug root switch\n"); | ||
435 | return; | ||
436 | } | ||
437 | if (sw->is_unplugged) { | ||
438 | tb_sw_WARN(sw, "is_unplugged already set\n"); | ||
439 | return; | ||
440 | } | ||
441 | sw->is_unplugged = true; | ||
442 | for (i = 0; i <= sw->config.max_port_number; i++) { | ||
443 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) | ||
444 | tb_sw_set_unpplugged(sw->ports[i].remote->sw); | ||
445 | } | ||
446 | } | ||
447 | |||
448 | int tb_switch_resume(struct tb_switch *sw) | ||
449 | { | ||
450 | int i, err; | ||
451 | u64 uid; | ||
452 | tb_sw_info(sw, "resuming switch\n"); | ||
453 | |||
454 | err = tb_drom_read_uid_only(sw, &uid); | ||
455 | if (err) { | ||
456 | tb_sw_warn(sw, "uid read failed\n"); | ||
457 | return err; | ||
458 | } | ||
459 | if (sw->uid != uid) { | ||
460 | tb_sw_info(sw, | ||
461 | "changed while suspended (uid %#llx -> %#llx)\n", | ||
462 | sw->uid, uid); | ||
463 | return -ENODEV; | ||
464 | } | ||
465 | |||
466 | /* upload configuration */ | ||
467 | err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3); | ||
468 | if (err) | ||
469 | return err; | ||
470 | |||
471 | err = tb_plug_events_active(sw, true); | ||
472 | if (err) | ||
473 | return err; | ||
474 | |||
475 | /* check for surviving downstream switches */ | ||
476 | for (i = 1; i <= sw->config.max_port_number; i++) { | ||
477 | struct tb_port *port = &sw->ports[i]; | ||
478 | if (tb_is_upstream_port(port)) | ||
479 | continue; | ||
480 | if (!port->remote) | ||
481 | continue; | ||
482 | if (tb_wait_for_port(port, true) <= 0 | ||
483 | || tb_switch_resume(port->remote->sw)) { | ||
484 | tb_port_warn(port, | ||
485 | "lost during suspend, disconnecting\n"); | ||
486 | tb_sw_set_unpplugged(port->remote->sw); | ||
487 | } | ||
488 | } | ||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | void tb_switch_suspend(struct tb_switch *sw) | ||
493 | { | ||
494 | int i, err; | ||
495 | err = tb_plug_events_active(sw, false); | ||
496 | if (err) | ||
497 | return; | ||
498 | |||
499 | for (i = 1; i <= sw->config.max_port_number; i++) { | ||
500 | if (!tb_is_upstream_port(&sw->ports[i]) && sw->ports[i].remote) | ||
501 | tb_switch_suspend(sw->ports[i].remote->sw); | ||
502 | } | ||
503 | /* | ||
504 | * TODO: invoke tb_cfg_prepare_to_sleep here? does not seem to have any | ||
505 | * effect? | ||
506 | */ | ||
507 | } | ||
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c new file mode 100644 index 000000000000..d2c3fe346e91 --- /dev/null +++ b/drivers/thunderbolt/tb.c | |||
@@ -0,0 +1,436 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - bus logic (NHI independent) | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/slab.h> | ||
8 | #include <linux/errno.h> | ||
9 | #include <linux/delay.h> | ||
10 | |||
11 | #include "tb.h" | ||
12 | #include "tb_regs.h" | ||
13 | #include "tunnel_pci.h" | ||
14 | |||
15 | |||
16 | /* enumeration & hot plug handling */ | ||
17 | |||
18 | |||
19 | static void tb_scan_port(struct tb_port *port); | ||
20 | |||
21 | /** | ||
22 | * tb_scan_switch() - scan for and initialize downstream switches | ||
23 | */ | ||
24 | static void tb_scan_switch(struct tb_switch *sw) | ||
25 | { | ||
26 | int i; | ||
27 | for (i = 1; i <= sw->config.max_port_number; i++) | ||
28 | tb_scan_port(&sw->ports[i]); | ||
29 | } | ||
30 | |||
31 | /** | ||
32 | * tb_scan_port() - check for and initialize switches below port | ||
33 | */ | ||
34 | static void tb_scan_port(struct tb_port *port) | ||
35 | { | ||
36 | struct tb_switch *sw; | ||
37 | if (tb_is_upstream_port(port)) | ||
38 | return; | ||
39 | if (port->config.type != TB_TYPE_PORT) | ||
40 | return; | ||
41 | if (port->dual_link_port && port->link_nr) | ||
42 | return; /* | ||
43 | * Downstream switch is reachable through two ports. | ||
44 | * Only scan on the primary port (link_nr == 0). | ||
45 | */ | ||
46 | if (tb_wait_for_port(port, false) <= 0) | ||
47 | return; | ||
48 | if (port->remote) { | ||
49 | tb_port_WARN(port, "port already has a remote!\n"); | ||
50 | return; | ||
51 | } | ||
52 | sw = tb_switch_alloc(port->sw->tb, tb_downstream_route(port)); | ||
53 | if (!sw) | ||
54 | return; | ||
55 | port->remote = tb_upstream_port(sw); | ||
56 | tb_upstream_port(sw)->remote = port; | ||
57 | tb_scan_switch(sw); | ||
58 | } | ||
59 | |||
60 | /** | ||
61 | * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away | ||
62 | */ | ||
63 | static void tb_free_invalid_tunnels(struct tb *tb) | ||
64 | { | ||
65 | struct tb_pci_tunnel *tunnel; | ||
66 | struct tb_pci_tunnel *n; | ||
67 | list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) | ||
68 | { | ||
69 | if (tb_pci_is_invalid(tunnel)) { | ||
70 | tb_pci_deactivate(tunnel); | ||
71 | tb_pci_free(tunnel); | ||
72 | } | ||
73 | } | ||
74 | } | ||
75 | |||
76 | /** | ||
77 | * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches | ||
78 | */ | ||
79 | static void tb_free_unplugged_children(struct tb_switch *sw) | ||
80 | { | ||
81 | int i; | ||
82 | for (i = 1; i <= sw->config.max_port_number; i++) { | ||
83 | struct tb_port *port = &sw->ports[i]; | ||
84 | if (tb_is_upstream_port(port)) | ||
85 | continue; | ||
86 | if (!port->remote) | ||
87 | continue; | ||
88 | if (port->remote->sw->is_unplugged) { | ||
89 | tb_switch_free(port->remote->sw); | ||
90 | port->remote = NULL; | ||
91 | } else { | ||
92 | tb_free_unplugged_children(port->remote->sw); | ||
93 | } | ||
94 | } | ||
95 | } | ||
96 | |||
97 | |||
98 | /** | ||
99 | * find_pci_up_port() - return the first PCIe up port on @sw or NULL | ||
100 | */ | ||
101 | static struct tb_port *tb_find_pci_up_port(struct tb_switch *sw) | ||
102 | { | ||
103 | int i; | ||
104 | for (i = 1; i <= sw->config.max_port_number; i++) | ||
105 | if (sw->ports[i].config.type == TB_TYPE_PCIE_UP) | ||
106 | return &sw->ports[i]; | ||
107 | return NULL; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * find_unused_down_port() - return the first inactive PCIe down port on @sw | ||
112 | */ | ||
113 | static struct tb_port *tb_find_unused_down_port(struct tb_switch *sw) | ||
114 | { | ||
115 | int i; | ||
116 | int cap; | ||
117 | int res; | ||
118 | int data; | ||
119 | for (i = 1; i <= sw->config.max_port_number; i++) { | ||
120 | if (tb_is_upstream_port(&sw->ports[i])) | ||
121 | continue; | ||
122 | if (sw->ports[i].config.type != TB_TYPE_PCIE_DOWN) | ||
123 | continue; | ||
124 | cap = tb_find_cap(&sw->ports[i], TB_CFG_PORT, TB_CAP_PCIE); | ||
125 | if (cap <= 0) | ||
126 | continue; | ||
127 | res = tb_port_read(&sw->ports[i], &data, TB_CFG_PORT, cap, 1); | ||
128 | if (res < 0) | ||
129 | continue; | ||
130 | if (data & 0x80000000) | ||
131 | continue; | ||
132 | return &sw->ports[i]; | ||
133 | } | ||
134 | return NULL; | ||
135 | } | ||
136 | |||
137 | /** | ||
138 | * tb_activate_pcie_devices() - scan for and activate PCIe devices | ||
139 | * | ||
140 | * This method is somewhat ad hoc. For now it only supports one device | ||
141 | * per port and only devices at depth 1. | ||
142 | */ | ||
143 | static void tb_activate_pcie_devices(struct tb *tb) | ||
144 | { | ||
145 | int i; | ||
146 | int cap; | ||
147 | u32 data; | ||
148 | struct tb_switch *sw; | ||
149 | struct tb_port *up_port; | ||
150 | struct tb_port *down_port; | ||
151 | struct tb_pci_tunnel *tunnel; | ||
152 | /* scan for pcie devices at depth 1*/ | ||
153 | for (i = 1; i <= tb->root_switch->config.max_port_number; i++) { | ||
154 | if (tb_is_upstream_port(&tb->root_switch->ports[i])) | ||
155 | continue; | ||
156 | if (tb->root_switch->ports[i].config.type != TB_TYPE_PORT) | ||
157 | continue; | ||
158 | if (!tb->root_switch->ports[i].remote) | ||
159 | continue; | ||
160 | sw = tb->root_switch->ports[i].remote->sw; | ||
161 | up_port = tb_find_pci_up_port(sw); | ||
162 | if (!up_port) { | ||
163 | tb_sw_info(sw, "no PCIe devices found, aborting\n"); | ||
164 | continue; | ||
165 | } | ||
166 | |||
167 | /* check whether port is already activated */ | ||
168 | cap = tb_find_cap(up_port, TB_CFG_PORT, TB_CAP_PCIE); | ||
169 | if (cap <= 0) | ||
170 | continue; | ||
171 | if (tb_port_read(up_port, &data, TB_CFG_PORT, cap, 1)) | ||
172 | continue; | ||
173 | if (data & 0x80000000) { | ||
174 | tb_port_info(up_port, | ||
175 | "PCIe port already activated, aborting\n"); | ||
176 | continue; | ||
177 | } | ||
178 | |||
179 | down_port = tb_find_unused_down_port(tb->root_switch); | ||
180 | if (!down_port) { | ||
181 | tb_port_info(up_port, | ||
182 | "All PCIe down ports are occupied, aborting\n"); | ||
183 | continue; | ||
184 | } | ||
185 | tunnel = tb_pci_alloc(tb, up_port, down_port); | ||
186 | if (!tunnel) { | ||
187 | tb_port_info(up_port, | ||
188 | "PCIe tunnel allocation failed, aborting\n"); | ||
189 | continue; | ||
190 | } | ||
191 | |||
192 | if (tb_pci_activate(tunnel)) { | ||
193 | tb_port_info(up_port, | ||
194 | "PCIe tunnel activation failed, aborting\n"); | ||
195 | tb_pci_free(tunnel); | ||
196 | } | ||
197 | |||
198 | } | ||
199 | } | ||
200 | |||
201 | /* hotplug handling */ | ||
202 | |||
203 | struct tb_hotplug_event { | ||
204 | struct work_struct work; | ||
205 | struct tb *tb; | ||
206 | u64 route; | ||
207 | u8 port; | ||
208 | bool unplug; | ||
209 | }; | ||
210 | |||
211 | /** | ||
212 | * tb_handle_hotplug() - handle hotplug event | ||
213 | * | ||
214 | * Executes on tb->wq. | ||
215 | */ | ||
216 | static void tb_handle_hotplug(struct work_struct *work) | ||
217 | { | ||
218 | struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work); | ||
219 | struct tb *tb = ev->tb; | ||
220 | struct tb_switch *sw; | ||
221 | struct tb_port *port; | ||
222 | mutex_lock(&tb->lock); | ||
223 | if (!tb->hotplug_active) | ||
224 | goto out; /* during init, suspend or shutdown */ | ||
225 | |||
226 | sw = get_switch_at_route(tb->root_switch, ev->route); | ||
227 | if (!sw) { | ||
228 | tb_warn(tb, | ||
229 | "hotplug event from non existent switch %llx:%x (unplug: %d)\n", | ||
230 | ev->route, ev->port, ev->unplug); | ||
231 | goto out; | ||
232 | } | ||
233 | if (ev->port > sw->config.max_port_number) { | ||
234 | tb_warn(tb, | ||
235 | "hotplug event from non existent port %llx:%x (unplug: %d)\n", | ||
236 | ev->route, ev->port, ev->unplug); | ||
237 | goto out; | ||
238 | } | ||
239 | port = &sw->ports[ev->port]; | ||
240 | if (tb_is_upstream_port(port)) { | ||
241 | tb_warn(tb, | ||
242 | "hotplug event for upstream port %llx:%x (unplug: %d)\n", | ||
243 | ev->route, ev->port, ev->unplug); | ||
244 | goto out; | ||
245 | } | ||
246 | if (ev->unplug) { | ||
247 | if (port->remote) { | ||
248 | tb_port_info(port, "unplugged\n"); | ||
249 | tb_sw_set_unpplugged(port->remote->sw); | ||
250 | tb_free_invalid_tunnels(tb); | ||
251 | tb_switch_free(port->remote->sw); | ||
252 | port->remote = NULL; | ||
253 | } else { | ||
254 | tb_port_info(port, | ||
255 | "got unplug event for disconnected port, ignoring\n"); | ||
256 | } | ||
257 | } else if (port->remote) { | ||
258 | tb_port_info(port, | ||
259 | "got plug event for connected port, ignoring\n"); | ||
260 | } else { | ||
261 | tb_port_info(port, "hotplug: scanning\n"); | ||
262 | tb_scan_port(port); | ||
263 | if (!port->remote) { | ||
264 | tb_port_info(port, "hotplug: no switch found\n"); | ||
265 | } else if (port->remote->sw->config.depth > 1) { | ||
266 | tb_sw_warn(port->remote->sw, | ||
267 | "hotplug: chaining not supported\n"); | ||
268 | } else { | ||
269 | tb_sw_info(port->remote->sw, | ||
270 | "hotplug: activating pcie devices\n"); | ||
271 | tb_activate_pcie_devices(tb); | ||
272 | } | ||
273 | } | ||
274 | out: | ||
275 | mutex_unlock(&tb->lock); | ||
276 | kfree(ev); | ||
277 | } | ||
278 | |||
279 | /** | ||
280 | * tb_schedule_hotplug_handler() - callback function for the control channel | ||
281 | * | ||
282 | * Delegates to tb_handle_hotplug. | ||
283 | */ | ||
284 | static void tb_schedule_hotplug_handler(void *data, u64 route, u8 port, | ||
285 | bool unplug) | ||
286 | { | ||
287 | struct tb *tb = data; | ||
288 | struct tb_hotplug_event *ev = kmalloc(sizeof(*ev), GFP_KERNEL); | ||
289 | if (!ev) | ||
290 | return; | ||
291 | INIT_WORK(&ev->work, tb_handle_hotplug); | ||
292 | ev->tb = tb; | ||
293 | ev->route = route; | ||
294 | ev->port = port; | ||
295 | ev->unplug = unplug; | ||
296 | queue_work(tb->wq, &ev->work); | ||
297 | } | ||
298 | |||
299 | /** | ||
300 | * thunderbolt_shutdown_and_free() - shutdown everything | ||
301 | * | ||
302 | * Free all switches and the config channel. | ||
303 | * | ||
304 | * Used in the error path of thunderbolt_alloc_and_start. | ||
305 | */ | ||
306 | void thunderbolt_shutdown_and_free(struct tb *tb) | ||
307 | { | ||
308 | struct tb_pci_tunnel *tunnel; | ||
309 | struct tb_pci_tunnel *n; | ||
310 | |||
311 | mutex_lock(&tb->lock); | ||
312 | |||
313 | /* tunnels are only present after everything has been initialized */ | ||
314 | list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) { | ||
315 | tb_pci_deactivate(tunnel); | ||
316 | tb_pci_free(tunnel); | ||
317 | } | ||
318 | |||
319 | if (tb->root_switch) | ||
320 | tb_switch_free(tb->root_switch); | ||
321 | tb->root_switch = NULL; | ||
322 | |||
323 | if (tb->ctl) { | ||
324 | tb_ctl_stop(tb->ctl); | ||
325 | tb_ctl_free(tb->ctl); | ||
326 | } | ||
327 | tb->ctl = NULL; | ||
328 | tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */ | ||
329 | |||
330 | /* allow tb_handle_hotplug to acquire the lock */ | ||
331 | mutex_unlock(&tb->lock); | ||
332 | if (tb->wq) { | ||
333 | flush_workqueue(tb->wq); | ||
334 | destroy_workqueue(tb->wq); | ||
335 | tb->wq = NULL; | ||
336 | } | ||
337 | mutex_destroy(&tb->lock); | ||
338 | kfree(tb); | ||
339 | } | ||
340 | |||
341 | /** | ||
342 | * thunderbolt_alloc_and_start() - setup the thunderbolt bus | ||
343 | * | ||
344 | * Allocates a tb_cfg control channel, initializes the root switch, enables | ||
345 | * plug events and activates pci devices. | ||
346 | * | ||
347 | * Return: Returns NULL on error. | ||
348 | */ | ||
349 | struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi) | ||
350 | { | ||
351 | struct tb *tb; | ||
352 | |||
353 | BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4); | ||
354 | BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4); | ||
355 | BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4); | ||
356 | |||
357 | tb = kzalloc(sizeof(*tb), GFP_KERNEL); | ||
358 | if (!tb) | ||
359 | return NULL; | ||
360 | |||
361 | tb->nhi = nhi; | ||
362 | mutex_init(&tb->lock); | ||
363 | mutex_lock(&tb->lock); | ||
364 | INIT_LIST_HEAD(&tb->tunnel_list); | ||
365 | |||
366 | tb->wq = alloc_ordered_workqueue("thunderbolt", 0); | ||
367 | if (!tb->wq) | ||
368 | goto err_locked; | ||
369 | |||
370 | tb->ctl = tb_ctl_alloc(tb->nhi, tb_schedule_hotplug_handler, tb); | ||
371 | if (!tb->ctl) | ||
372 | goto err_locked; | ||
373 | /* | ||
374 | * tb_schedule_hotplug_handler may be called as soon as the config | ||
375 | * channel is started. Thats why we have to hold the lock here. | ||
376 | */ | ||
377 | tb_ctl_start(tb->ctl); | ||
378 | |||
379 | tb->root_switch = tb_switch_alloc(tb, 0); | ||
380 | if (!tb->root_switch) | ||
381 | goto err_locked; | ||
382 | |||
383 | /* Full scan to discover devices added before the driver was loaded. */ | ||
384 | tb_scan_switch(tb->root_switch); | ||
385 | tb_activate_pcie_devices(tb); | ||
386 | |||
387 | /* Allow tb_handle_hotplug to progress events */ | ||
388 | tb->hotplug_active = true; | ||
389 | mutex_unlock(&tb->lock); | ||
390 | return tb; | ||
391 | |||
392 | err_locked: | ||
393 | mutex_unlock(&tb->lock); | ||
394 | thunderbolt_shutdown_and_free(tb); | ||
395 | return NULL; | ||
396 | } | ||
397 | |||
398 | void thunderbolt_suspend(struct tb *tb) | ||
399 | { | ||
400 | tb_info(tb, "suspending...\n"); | ||
401 | mutex_lock(&tb->lock); | ||
402 | tb_switch_suspend(tb->root_switch); | ||
403 | tb_ctl_stop(tb->ctl); | ||
404 | tb->hotplug_active = false; /* signal tb_handle_hotplug to quit */ | ||
405 | mutex_unlock(&tb->lock); | ||
406 | tb_info(tb, "suspend finished\n"); | ||
407 | } | ||
408 | |||
409 | void thunderbolt_resume(struct tb *tb) | ||
410 | { | ||
411 | struct tb_pci_tunnel *tunnel, *n; | ||
412 | tb_info(tb, "resuming...\n"); | ||
413 | mutex_lock(&tb->lock); | ||
414 | tb_ctl_start(tb->ctl); | ||
415 | |||
416 | /* remove any pci devices the firmware might have setup */ | ||
417 | tb_switch_reset(tb, 0); | ||
418 | |||
419 | tb_switch_resume(tb->root_switch); | ||
420 | tb_free_invalid_tunnels(tb); | ||
421 | tb_free_unplugged_children(tb->root_switch); | ||
422 | list_for_each_entry_safe(tunnel, n, &tb->tunnel_list, list) | ||
423 | tb_pci_restart(tunnel); | ||
424 | if (!list_empty(&tb->tunnel_list)) { | ||
425 | /* | ||
426 | * the pcie links need some time to get going. | ||
427 | * 100ms works for me... | ||
428 | */ | ||
429 | tb_info(tb, "tunnels restarted, sleeping for 100ms\n"); | ||
430 | msleep(100); | ||
431 | } | ||
432 | /* Allow tb_handle_hotplug to progress events */ | ||
433 | tb->hotplug_active = true; | ||
434 | mutex_unlock(&tb->lock); | ||
435 | tb_info(tb, "resume finished\n"); | ||
436 | } | ||
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h new file mode 100644 index 000000000000..8b0d7cf2b6d6 --- /dev/null +++ b/drivers/thunderbolt/tb.h | |||
@@ -0,0 +1,271 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - bus logic (NHI independent) | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef TB_H_ | ||
8 | #define TB_H_ | ||
9 | |||
10 | #include <linux/pci.h> | ||
11 | |||
12 | #include "tb_regs.h" | ||
13 | #include "ctl.h" | ||
14 | |||
15 | /** | ||
16 | * struct tb_switch - a thunderbolt switch | ||
17 | */ | ||
18 | struct tb_switch { | ||
19 | struct tb_regs_switch_header config; | ||
20 | struct tb_port *ports; | ||
21 | struct tb *tb; | ||
22 | u64 uid; | ||
23 | int cap_plug_events; /* offset, zero if not found */ | ||
24 | bool is_unplugged; /* unplugged, will go away */ | ||
25 | u8 *drom; | ||
26 | }; | ||
27 | |||
28 | /** | ||
29 | * struct tb_port - a thunderbolt port, part of a tb_switch | ||
30 | */ | ||
31 | struct tb_port { | ||
32 | struct tb_regs_port_header config; | ||
33 | struct tb_switch *sw; | ||
34 | struct tb_port *remote; /* remote port, NULL if not connected */ | ||
35 | int cap_phy; /* offset, zero if not found */ | ||
36 | u8 port; /* port number on switch */ | ||
37 | bool disabled; /* disabled by eeprom */ | ||
38 | struct tb_port *dual_link_port; | ||
39 | u8 link_nr:1; | ||
40 | }; | ||
41 | |||
42 | /** | ||
43 | * struct tb_path_hop - routing information for a tb_path | ||
44 | * | ||
45 | * Hop configuration is always done on the IN port of a switch. | ||
46 | * in_port and out_port have to be on the same switch. Packets arriving on | ||
47 | * in_port with "hop" = in_hop_index will get routed to through out_port. The | ||
48 | * next hop to take (on out_port->remote) is determined by next_hop_index. | ||
49 | * | ||
50 | * in_counter_index is the index of a counter (in TB_CFG_COUNTERS) on the in | ||
51 | * port. | ||
52 | */ | ||
53 | struct tb_path_hop { | ||
54 | struct tb_port *in_port; | ||
55 | struct tb_port *out_port; | ||
56 | int in_hop_index; | ||
57 | int in_counter_index; /* write -1 to disable counters for this hop. */ | ||
58 | int next_hop_index; | ||
59 | }; | ||
60 | |||
61 | /** | ||
62 | * enum tb_path_port - path options mask | ||
63 | */ | ||
64 | enum tb_path_port { | ||
65 | TB_PATH_NONE = 0, | ||
66 | TB_PATH_SOURCE = 1, /* activate on the first hop (out of src) */ | ||
67 | TB_PATH_INTERNAL = 2, /* activate on other hops (not the first/last) */ | ||
68 | TB_PATH_DESTINATION = 4, /* activate on the last hop (into dst) */ | ||
69 | TB_PATH_ALL = 7, | ||
70 | }; | ||
71 | |||
72 | /** | ||
73 | * struct tb_path - a unidirectional path between two ports | ||
74 | * | ||
75 | * A path consists of a number of hops (see tb_path_hop). To establish a PCIe | ||
76 | * tunnel two paths have to be created between the two PCIe ports. | ||
77 | * | ||
78 | */ | ||
79 | struct tb_path { | ||
80 | struct tb *tb; | ||
81 | int nfc_credits; /* non flow controlled credits */ | ||
82 | enum tb_path_port ingress_shared_buffer; | ||
83 | enum tb_path_port egress_shared_buffer; | ||
84 | enum tb_path_port ingress_fc_enable; | ||
85 | enum tb_path_port egress_fc_enable; | ||
86 | |||
87 | int priority:3; | ||
88 | int weight:4; | ||
89 | bool drop_packages; | ||
90 | bool activated; | ||
91 | struct tb_path_hop *hops; | ||
92 | int path_length; /* number of hops */ | ||
93 | }; | ||
94 | |||
95 | |||
96 | /** | ||
97 | * struct tb - main thunderbolt bus structure | ||
98 | */ | ||
99 | struct tb { | ||
100 | struct mutex lock; /* | ||
101 | * Big lock. Must be held when accessing cfg or | ||
102 | * any struct tb_switch / struct tb_port. | ||
103 | */ | ||
104 | struct tb_nhi *nhi; | ||
105 | struct tb_ctl *ctl; | ||
106 | struct workqueue_struct *wq; /* ordered workqueue for plug events */ | ||
107 | struct tb_switch *root_switch; | ||
108 | struct list_head tunnel_list; /* list of active PCIe tunnels */ | ||
109 | bool hotplug_active; /* | ||
110 | * tb_handle_hotplug will stop progressing plug | ||
111 | * events and exit if this is not set (it needs to | ||
112 | * acquire the lock one more time). Used to drain | ||
113 | * wq after cfg has been paused. | ||
114 | */ | ||
115 | |||
116 | }; | ||
117 | |||
118 | /* helper functions & macros */ | ||
119 | |||
120 | /** | ||
121 | * tb_upstream_port() - return the upstream port of a switch | ||
122 | * | ||
123 | * Every switch has an upstream port (for the root switch it is the NHI). | ||
124 | * | ||
125 | * During switch alloc/init tb_upstream_port()->remote may be NULL, even for | ||
126 | * non root switches (on the NHI port remote is always NULL). | ||
127 | * | ||
128 | * Return: Returns the upstream port of the switch. | ||
129 | */ | ||
130 | static inline struct tb_port *tb_upstream_port(struct tb_switch *sw) | ||
131 | { | ||
132 | return &sw->ports[sw->config.upstream_port_number]; | ||
133 | } | ||
134 | |||
135 | static inline u64 tb_route(struct tb_switch *sw) | ||
136 | { | ||
137 | return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo; | ||
138 | } | ||
139 | |||
140 | static inline int tb_sw_read(struct tb_switch *sw, void *buffer, | ||
141 | enum tb_cfg_space space, u32 offset, u32 length) | ||
142 | { | ||
143 | return tb_cfg_read(sw->tb->ctl, | ||
144 | buffer, | ||
145 | tb_route(sw), | ||
146 | 0, | ||
147 | space, | ||
148 | offset, | ||
149 | length); | ||
150 | } | ||
151 | |||
152 | static inline int tb_sw_write(struct tb_switch *sw, void *buffer, | ||
153 | enum tb_cfg_space space, u32 offset, u32 length) | ||
154 | { | ||
155 | return tb_cfg_write(sw->tb->ctl, | ||
156 | buffer, | ||
157 | tb_route(sw), | ||
158 | 0, | ||
159 | space, | ||
160 | offset, | ||
161 | length); | ||
162 | } | ||
163 | |||
164 | static inline int tb_port_read(struct tb_port *port, void *buffer, | ||
165 | enum tb_cfg_space space, u32 offset, u32 length) | ||
166 | { | ||
167 | return tb_cfg_read(port->sw->tb->ctl, | ||
168 | buffer, | ||
169 | tb_route(port->sw), | ||
170 | port->port, | ||
171 | space, | ||
172 | offset, | ||
173 | length); | ||
174 | } | ||
175 | |||
176 | static inline int tb_port_write(struct tb_port *port, void *buffer, | ||
177 | enum tb_cfg_space space, u32 offset, u32 length) | ||
178 | { | ||
179 | return tb_cfg_write(port->sw->tb->ctl, | ||
180 | buffer, | ||
181 | tb_route(port->sw), | ||
182 | port->port, | ||
183 | space, | ||
184 | offset, | ||
185 | length); | ||
186 | } | ||
187 | |||
188 | #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg) | ||
189 | #define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg) | ||
190 | #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg) | ||
191 | #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg) | ||
192 | |||
193 | |||
194 | #define __TB_SW_PRINT(level, sw, fmt, arg...) \ | ||
195 | do { \ | ||
196 | struct tb_switch *__sw = (sw); \ | ||
197 | level(__sw->tb, "%llx: " fmt, \ | ||
198 | tb_route(__sw), ## arg); \ | ||
199 | } while (0) | ||
200 | #define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg) | ||
201 | #define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg) | ||
202 | #define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg) | ||
203 | |||
204 | |||
205 | #define __TB_PORT_PRINT(level, _port, fmt, arg...) \ | ||
206 | do { \ | ||
207 | struct tb_port *__port = (_port); \ | ||
208 | level(__port->sw->tb, "%llx:%x: " fmt, \ | ||
209 | tb_route(__port->sw), __port->port, ## arg); \ | ||
210 | } while (0) | ||
211 | #define tb_port_WARN(port, fmt, arg...) \ | ||
212 | __TB_PORT_PRINT(tb_WARN, port, fmt, ##arg) | ||
213 | #define tb_port_warn(port, fmt, arg...) \ | ||
214 | __TB_PORT_PRINT(tb_warn, port, fmt, ##arg) | ||
215 | #define tb_port_info(port, fmt, arg...) \ | ||
216 | __TB_PORT_PRINT(tb_info, port, fmt, ##arg) | ||
217 | |||
218 | |||
219 | struct tb *thunderbolt_alloc_and_start(struct tb_nhi *nhi); | ||
220 | void thunderbolt_shutdown_and_free(struct tb *tb); | ||
221 | void thunderbolt_suspend(struct tb *tb); | ||
222 | void thunderbolt_resume(struct tb *tb); | ||
223 | |||
224 | struct tb_switch *tb_switch_alloc(struct tb *tb, u64 route); | ||
225 | void tb_switch_free(struct tb_switch *sw); | ||
226 | void tb_switch_suspend(struct tb_switch *sw); | ||
227 | int tb_switch_resume(struct tb_switch *sw); | ||
228 | int tb_switch_reset(struct tb *tb, u64 route); | ||
229 | void tb_sw_set_unpplugged(struct tb_switch *sw); | ||
230 | struct tb_switch *get_switch_at_route(struct tb_switch *sw, u64 route); | ||
231 | |||
232 | int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); | ||
233 | int tb_port_add_nfc_credits(struct tb_port *port, int credits); | ||
234 | int tb_port_clear_counter(struct tb_port *port, int counter); | ||
235 | |||
236 | int tb_find_cap(struct tb_port *port, enum tb_cfg_space space, enum tb_cap cap); | ||
237 | |||
238 | struct tb_path *tb_path_alloc(struct tb *tb, int num_hops); | ||
239 | void tb_path_free(struct tb_path *path); | ||
240 | int tb_path_activate(struct tb_path *path); | ||
241 | void tb_path_deactivate(struct tb_path *path); | ||
242 | bool tb_path_is_invalid(struct tb_path *path); | ||
243 | |||
244 | int tb_drom_read(struct tb_switch *sw); | ||
245 | int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); | ||
246 | |||
247 | |||
248 | static inline int tb_route_length(u64 route) | ||
249 | { | ||
250 | return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT; | ||
251 | } | ||
252 | |||
253 | static inline bool tb_is_upstream_port(struct tb_port *port) | ||
254 | { | ||
255 | return port == tb_upstream_port(port->sw); | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * tb_downstream_route() - get route to downstream switch | ||
260 | * | ||
261 | * Port must not be the upstream port (otherwise a loop is created). | ||
262 | * | ||
263 | * Return: Returns a route to the switch behind @port. | ||
264 | */ | ||
265 | static inline u64 tb_downstream_route(struct tb_port *port) | ||
266 | { | ||
267 | return tb_route(port->sw) | ||
268 | | ((u64) port->port << (port->sw->config.depth * 8)); | ||
269 | } | ||
270 | |||
271 | #endif | ||
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h new file mode 100644 index 000000000000..6577af75d9dc --- /dev/null +++ b/drivers/thunderbolt/tb_regs.h | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - Port/Switch config area registers | ||
3 | * | ||
4 | * Every thunderbolt device consists (logically) of a switch with multiple | ||
5 | * ports. Every port contains up to four config regions (HOPS, PORT, SWITCH, | ||
6 | * COUNTERS) which are used to configure the device. | ||
7 | * | ||
8 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
9 | */ | ||
10 | |||
11 | #ifndef _TB_REGS | ||
12 | #define _TB_REGS | ||
13 | |||
14 | #include <linux/types.h> | ||
15 | |||
16 | |||
17 | #define TB_ROUTE_SHIFT 8 /* number of bits in a port entry of a route */ | ||
18 | |||
19 | |||
20 | /* | ||
21 | * TODO: should be 63? But we do not know how to receive frames larger than 256 | ||
22 | * bytes at the frame level. (header + checksum = 16, 60*4 = 240) | ||
23 | */ | ||
24 | #define TB_MAX_CONFIG_RW_LENGTH 60 | ||
25 | |||
26 | enum tb_cap { | ||
27 | TB_CAP_PHY = 0x0001, | ||
28 | TB_CAP_TIME1 = 0x0003, | ||
29 | TB_CAP_PCIE = 0x0004, | ||
30 | TB_CAP_I2C = 0x0005, | ||
31 | TB_CAP_PLUG_EVENTS = 0x0105, /* also EEPROM */ | ||
32 | TB_CAP_TIME2 = 0x0305, | ||
33 | TB_CAL_IECS = 0x0405, | ||
34 | TB_CAP_LINK_CONTROLLER = 0x0605, /* also IECS */ | ||
35 | }; | ||
36 | |||
37 | enum tb_port_state { | ||
38 | TB_PORT_DISABLED = 0, /* tb_cap_phy.disable == 1 */ | ||
39 | TB_PORT_CONNECTING = 1, /* retry */ | ||
40 | TB_PORT_UP = 2, | ||
41 | TB_PORT_UNPLUGGED = 7, | ||
42 | }; | ||
43 | |||
44 | /* capability headers */ | ||
45 | |||
46 | struct tb_cap_basic { | ||
47 | u8 next; | ||
48 | /* enum tb_cap cap:8; prevent "narrower than values of its type" */ | ||
49 | u8 cap; /* if cap == 0x05 then we have a extended capability */ | ||
50 | } __packed; | ||
51 | |||
52 | struct tb_cap_extended_short { | ||
53 | u8 next; /* if next and length are zero then we have a long cap */ | ||
54 | enum tb_cap cap:16; | ||
55 | u8 length; | ||
56 | } __packed; | ||
57 | |||
58 | struct tb_cap_extended_long { | ||
59 | u8 zero1; | ||
60 | enum tb_cap cap:16; | ||
61 | u8 zero2; | ||
62 | u16 next; | ||
63 | u16 length; | ||
64 | } __packed; | ||
65 | |||
66 | /* capabilities */ | ||
67 | |||
68 | struct tb_cap_link_controller { | ||
69 | struct tb_cap_extended_long cap_header; | ||
70 | u32 count:4; /* number of link controllers */ | ||
71 | u32 unknown1:4; | ||
72 | u32 base_offset:8; /* | ||
73 | * offset (into this capability) of the configuration | ||
74 | * area of the first link controller | ||
75 | */ | ||
76 | u32 length:12; /* link controller configuration area length */ | ||
77 | u32 unknown2:4; /* TODO check that length is correct */ | ||
78 | } __packed; | ||
79 | |||
80 | struct tb_cap_phy { | ||
81 | struct tb_cap_basic cap_header; | ||
82 | u32 unknown1:16; | ||
83 | u32 unknown2:14; | ||
84 | bool disable:1; | ||
85 | u32 unknown3:11; | ||
86 | enum tb_port_state state:4; | ||
87 | u32 unknown4:2; | ||
88 | } __packed; | ||
89 | |||
90 | struct tb_eeprom_ctl { | ||
91 | bool clock:1; /* send pulse to transfer one bit */ | ||
92 | bool access_low:1; /* set to 0 before access */ | ||
93 | bool data_out:1; /* to eeprom */ | ||
94 | bool data_in:1; /* from eeprom */ | ||
95 | bool access_high:1; /* set to 1 before access */ | ||
96 | bool not_present:1; /* should be 0 */ | ||
97 | bool unknown1:1; | ||
98 | bool present:1; /* should be 1 */ | ||
99 | u32 unknown2:24; | ||
100 | } __packed; | ||
101 | |||
102 | struct tb_cap_plug_events { | ||
103 | struct tb_cap_extended_short cap_header; | ||
104 | u32 __unknown1:2; | ||
105 | u32 plug_events:5; | ||
106 | u32 __unknown2:25; | ||
107 | u32 __unknown3; | ||
108 | u32 __unknown4; | ||
109 | struct tb_eeprom_ctl eeprom_ctl; | ||
110 | u32 __unknown5[7]; | ||
111 | u32 drom_offset; /* 32 bit register, but eeprom addresses are 16 bit */ | ||
112 | } __packed; | ||
113 | |||
114 | /* device headers */ | ||
115 | |||
116 | /* Present on port 0 in TB_CFG_SWITCH at address zero. */ | ||
117 | struct tb_regs_switch_header { | ||
118 | /* DWORD 0 */ | ||
119 | u16 vendor_id; | ||
120 | u16 device_id; | ||
121 | /* DWORD 1 */ | ||
122 | u32 first_cap_offset:8; | ||
123 | u32 upstream_port_number:6; | ||
124 | u32 max_port_number:6; | ||
125 | u32 depth:3; | ||
126 | u32 __unknown1:1; | ||
127 | u32 revision:8; | ||
128 | /* DWORD 2 */ | ||
129 | u32 route_lo; | ||
130 | /* DWORD 3 */ | ||
131 | u32 route_hi:31; | ||
132 | bool enabled:1; | ||
133 | /* DWORD 4 */ | ||
134 | u32 plug_events_delay:8; /* | ||
135 | * RW, pause between plug events in | ||
136 | * milliseconds. Writing 0x00 is interpreted | ||
137 | * as 255ms. | ||
138 | */ | ||
139 | u32 __unknown4:16; | ||
140 | u32 thunderbolt_version:8; | ||
141 | } __packed; | ||
142 | |||
143 | enum tb_port_type { | ||
144 | TB_TYPE_INACTIVE = 0x000000, | ||
145 | TB_TYPE_PORT = 0x000001, | ||
146 | TB_TYPE_NHI = 0x000002, | ||
147 | /* TB_TYPE_ETHERNET = 0x020000, lower order bits are not known */ | ||
148 | /* TB_TYPE_SATA = 0x080000, lower order bits are not known */ | ||
149 | TB_TYPE_DP_HDMI_IN = 0x0e0101, | ||
150 | TB_TYPE_DP_HDMI_OUT = 0x0e0102, | ||
151 | TB_TYPE_PCIE_DOWN = 0x100101, | ||
152 | TB_TYPE_PCIE_UP = 0x100102, | ||
153 | /* TB_TYPE_USB = 0x200000, lower order bits are not known */ | ||
154 | }; | ||
155 | |||
156 | /* Present on every port in TB_CF_PORT at address zero. */ | ||
157 | struct tb_regs_port_header { | ||
158 | /* DWORD 0 */ | ||
159 | u16 vendor_id; | ||
160 | u16 device_id; | ||
161 | /* DWORD 1 */ | ||
162 | u32 first_cap_offset:8; | ||
163 | u32 max_counters:11; | ||
164 | u32 __unknown1:5; | ||
165 | u32 revision:8; | ||
166 | /* DWORD 2 */ | ||
167 | enum tb_port_type type:24; | ||
168 | u32 thunderbolt_version:8; | ||
169 | /* DWORD 3 */ | ||
170 | u32 __unknown2:20; | ||
171 | u32 port_number:6; | ||
172 | u32 __unknown3:6; | ||
173 | /* DWORD 4 */ | ||
174 | u32 nfc_credits; | ||
175 | /* DWORD 5 */ | ||
176 | u32 max_in_hop_id:11; | ||
177 | u32 max_out_hop_id:11; | ||
178 | u32 __unkown4:10; | ||
179 | /* DWORD 6 */ | ||
180 | u32 __unknown5; | ||
181 | /* DWORD 7 */ | ||
182 | u32 __unknown6; | ||
183 | |||
184 | } __packed; | ||
185 | |||
186 | /* Hop register from TB_CFG_HOPS. 8 byte per entry. */ | ||
187 | struct tb_regs_hop { | ||
188 | /* DWORD 0 */ | ||
189 | u32 next_hop:11; /* | ||
190 | * hop to take after sending the packet through | ||
191 | * out_port (on the incoming port of the next switch) | ||
192 | */ | ||
193 | u32 out_port:6; /* next port of the path (on the same switch) */ | ||
194 | u32 initial_credits:8; | ||
195 | u32 unknown1:6; /* set to zero */ | ||
196 | bool enable:1; | ||
197 | |||
198 | /* DWORD 1 */ | ||
199 | u32 weight:4; | ||
200 | u32 unknown2:4; /* set to zero */ | ||
201 | u32 priority:3; | ||
202 | bool drop_packages:1; | ||
203 | u32 counter:11; /* index into TB_CFG_COUNTERS on this port */ | ||
204 | bool counter_enable:1; | ||
205 | bool ingress_fc:1; | ||
206 | bool egress_fc:1; | ||
207 | bool ingress_shared_buffer:1; | ||
208 | bool egress_shared_buffer:1; | ||
209 | u32 unknown3:4; /* set to zero */ | ||
210 | } __packed; | ||
211 | |||
212 | |||
213 | #endif | ||
diff --git a/drivers/thunderbolt/tunnel_pci.c b/drivers/thunderbolt/tunnel_pci.c new file mode 100644 index 000000000000..baf1cd370446 --- /dev/null +++ b/drivers/thunderbolt/tunnel_pci.c | |||
@@ -0,0 +1,232 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - PCIe tunnel | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/slab.h> | ||
8 | #include <linux/list.h> | ||
9 | |||
10 | #include "tunnel_pci.h" | ||
11 | #include "tb.h" | ||
12 | |||
13 | #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ | ||
14 | do { \ | ||
15 | struct tb_pci_tunnel *__tunnel = (tunnel); \ | ||
16 | level(__tunnel->tb, "%llx:%x <-> %llx:%x (PCI): " fmt, \ | ||
17 | tb_route(__tunnel->down_port->sw), \ | ||
18 | __tunnel->down_port->port, \ | ||
19 | tb_route(__tunnel->up_port->sw), \ | ||
20 | __tunnel->up_port->port, \ | ||
21 | ## arg); \ | ||
22 | } while (0) | ||
23 | |||
24 | #define tb_tunnel_WARN(tunnel, fmt, arg...) \ | ||
25 | __TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg) | ||
26 | #define tb_tunnel_warn(tunnel, fmt, arg...) \ | ||
27 | __TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg) | ||
28 | #define tb_tunnel_info(tunnel, fmt, arg...) \ | ||
29 | __TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg) | ||
30 | |||
31 | static void tb_pci_init_path(struct tb_path *path) | ||
32 | { | ||
33 | path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; | ||
34 | path->egress_shared_buffer = TB_PATH_NONE; | ||
35 | path->ingress_fc_enable = TB_PATH_ALL; | ||
36 | path->ingress_shared_buffer = TB_PATH_NONE; | ||
37 | path->priority = 3; | ||
38 | path->weight = 1; | ||
39 | path->drop_packages = 0; | ||
40 | path->nfc_credits = 0; | ||
41 | } | ||
42 | |||
43 | /** | ||
44 | * tb_pci_alloc() - allocate a pci tunnel | ||
45 | * | ||
46 | * Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and | ||
47 | * TB_TYPE_PCIE_DOWN. | ||
48 | * | ||
49 | * Currently only paths consisting of two hops are supported (that is the | ||
50 | * ports must be on "adjacent" switches). | ||
51 | * | ||
52 | * The paths are hard-coded to use hop 8 (the only working hop id available on | ||
53 | * my thunderbolt devices). Therefore at most ONE path per device may be | ||
54 | * activated. | ||
55 | * | ||
56 | * Return: Returns a tb_pci_tunnel on success or NULL on failure. | ||
57 | */ | ||
58 | struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up, | ||
59 | struct tb_port *down) | ||
60 | { | ||
61 | struct tb_pci_tunnel *tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL); | ||
62 | if (!tunnel) | ||
63 | goto err; | ||
64 | tunnel->tb = tb; | ||
65 | tunnel->down_port = down; | ||
66 | tunnel->up_port = up; | ||
67 | INIT_LIST_HEAD(&tunnel->list); | ||
68 | tunnel->path_to_up = tb_path_alloc(up->sw->tb, 2); | ||
69 | if (!tunnel->path_to_up) | ||
70 | goto err; | ||
71 | tunnel->path_to_down = tb_path_alloc(up->sw->tb, 2); | ||
72 | if (!tunnel->path_to_down) | ||
73 | goto err; | ||
74 | tb_pci_init_path(tunnel->path_to_up); | ||
75 | tb_pci_init_path(tunnel->path_to_down); | ||
76 | |||
77 | tunnel->path_to_up->hops[0].in_port = down; | ||
78 | tunnel->path_to_up->hops[0].in_hop_index = 8; | ||
79 | tunnel->path_to_up->hops[0].in_counter_index = -1; | ||
80 | tunnel->path_to_up->hops[0].out_port = tb_upstream_port(up->sw)->remote; | ||
81 | tunnel->path_to_up->hops[0].next_hop_index = 8; | ||
82 | |||
83 | tunnel->path_to_up->hops[1].in_port = tb_upstream_port(up->sw); | ||
84 | tunnel->path_to_up->hops[1].in_hop_index = 8; | ||
85 | tunnel->path_to_up->hops[1].in_counter_index = -1; | ||
86 | tunnel->path_to_up->hops[1].out_port = up; | ||
87 | tunnel->path_to_up->hops[1].next_hop_index = 8; | ||
88 | |||
89 | tunnel->path_to_down->hops[0].in_port = up; | ||
90 | tunnel->path_to_down->hops[0].in_hop_index = 8; | ||
91 | tunnel->path_to_down->hops[0].in_counter_index = -1; | ||
92 | tunnel->path_to_down->hops[0].out_port = tb_upstream_port(up->sw); | ||
93 | tunnel->path_to_down->hops[0].next_hop_index = 8; | ||
94 | |||
95 | tunnel->path_to_down->hops[1].in_port = | ||
96 | tb_upstream_port(up->sw)->remote; | ||
97 | tunnel->path_to_down->hops[1].in_hop_index = 8; | ||
98 | tunnel->path_to_down->hops[1].in_counter_index = -1; | ||
99 | tunnel->path_to_down->hops[1].out_port = down; | ||
100 | tunnel->path_to_down->hops[1].next_hop_index = 8; | ||
101 | return tunnel; | ||
102 | |||
103 | err: | ||
104 | if (tunnel) { | ||
105 | if (tunnel->path_to_down) | ||
106 | tb_path_free(tunnel->path_to_down); | ||
107 | if (tunnel->path_to_up) | ||
108 | tb_path_free(tunnel->path_to_up); | ||
109 | kfree(tunnel); | ||
110 | } | ||
111 | return NULL; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * tb_pci_free() - free a tunnel | ||
116 | * | ||
117 | * The tunnel must have been deactivated. | ||
118 | */ | ||
119 | void tb_pci_free(struct tb_pci_tunnel *tunnel) | ||
120 | { | ||
121 | if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) { | ||
122 | tb_tunnel_WARN(tunnel, "trying to free an activated tunnel\n"); | ||
123 | return; | ||
124 | } | ||
125 | tb_path_free(tunnel->path_to_up); | ||
126 | tb_path_free(tunnel->path_to_down); | ||
127 | kfree(tunnel); | ||
128 | } | ||
129 | |||
130 | /** | ||
131 | * tb_pci_is_invalid - check whether an activated path is still valid | ||
132 | */ | ||
133 | bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel) | ||
134 | { | ||
135 | WARN_ON(!tunnel->path_to_up->activated); | ||
136 | WARN_ON(!tunnel->path_to_down->activated); | ||
137 | |||
138 | return tb_path_is_invalid(tunnel->path_to_up) | ||
139 | || tb_path_is_invalid(tunnel->path_to_down); | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * tb_pci_port_active() - activate/deactivate PCI capability | ||
144 | * | ||
145 | * Return: Returns 0 on success or an error code on failure. | ||
146 | */ | ||
147 | static int tb_pci_port_active(struct tb_port *port, bool active) | ||
148 | { | ||
149 | u32 word = active ? 0x80000000 : 0x0; | ||
150 | int cap = tb_find_cap(port, TB_CFG_PORT, TB_CAP_PCIE); | ||
151 | if (cap <= 0) { | ||
152 | tb_port_warn(port, "TB_CAP_PCIE not found: %d\n", cap); | ||
153 | return cap ? cap : -ENXIO; | ||
154 | } | ||
155 | return tb_port_write(port, &word, TB_CFG_PORT, cap, 1); | ||
156 | } | ||
157 | |||
158 | /** | ||
159 | * tb_pci_restart() - activate a tunnel after a hardware reset | ||
160 | */ | ||
161 | int tb_pci_restart(struct tb_pci_tunnel *tunnel) | ||
162 | { | ||
163 | int res; | ||
164 | tunnel->path_to_up->activated = false; | ||
165 | tunnel->path_to_down->activated = false; | ||
166 | |||
167 | tb_tunnel_info(tunnel, "activating\n"); | ||
168 | |||
169 | res = tb_path_activate(tunnel->path_to_up); | ||
170 | if (res) | ||
171 | goto err; | ||
172 | res = tb_path_activate(tunnel->path_to_down); | ||
173 | if (res) | ||
174 | goto err; | ||
175 | |||
176 | res = tb_pci_port_active(tunnel->down_port, true); | ||
177 | if (res) | ||
178 | goto err; | ||
179 | |||
180 | res = tb_pci_port_active(tunnel->up_port, true); | ||
181 | if (res) | ||
182 | goto err; | ||
183 | return 0; | ||
184 | err: | ||
185 | tb_tunnel_warn(tunnel, "activation failed\n"); | ||
186 | tb_pci_deactivate(tunnel); | ||
187 | return res; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * tb_pci_activate() - activate a tunnel | ||
192 | * | ||
193 | * Return: Returns 0 on success or an error code on failure. | ||
194 | */ | ||
195 | int tb_pci_activate(struct tb_pci_tunnel *tunnel) | ||
196 | { | ||
197 | int res; | ||
198 | if (tunnel->path_to_up->activated || tunnel->path_to_down->activated) { | ||
199 | tb_tunnel_WARN(tunnel, | ||
200 | "trying to activate an already activated tunnel\n"); | ||
201 | return -EINVAL; | ||
202 | } | ||
203 | |||
204 | res = tb_pci_restart(tunnel); | ||
205 | if (res) | ||
206 | return res; | ||
207 | |||
208 | list_add(&tunnel->list, &tunnel->tb->tunnel_list); | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | |||
213 | |||
214 | /** | ||
215 | * tb_pci_deactivate() - deactivate a tunnel | ||
216 | */ | ||
217 | void tb_pci_deactivate(struct tb_pci_tunnel *tunnel) | ||
218 | { | ||
219 | tb_tunnel_info(tunnel, "deactivating\n"); | ||
220 | /* | ||
221 | * TODO: enable reset by writing 0x04000000 to TB_CAP_PCIE + 1 on up | ||
222 | * port. Seems to have no effect? | ||
223 | */ | ||
224 | tb_pci_port_active(tunnel->up_port, false); | ||
225 | tb_pci_port_active(tunnel->down_port, false); | ||
226 | if (tunnel->path_to_down->activated) | ||
227 | tb_path_deactivate(tunnel->path_to_down); | ||
228 | if (tunnel->path_to_up->activated) | ||
229 | tb_path_deactivate(tunnel->path_to_up); | ||
230 | list_del_init(&tunnel->list); | ||
231 | } | ||
232 | |||
diff --git a/drivers/thunderbolt/tunnel_pci.h b/drivers/thunderbolt/tunnel_pci.h new file mode 100644 index 000000000000..a67f93c140fa --- /dev/null +++ b/drivers/thunderbolt/tunnel_pci.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Thunderbolt Cactus Ridge driver - PCIe tunnel | ||
3 | * | ||
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef TB_PCI_H_ | ||
8 | #define TB_PCI_H_ | ||
9 | |||
10 | #include "tb.h" | ||
11 | |||
12 | struct tb_pci_tunnel { | ||
13 | struct tb *tb; | ||
14 | struct tb_port *up_port; | ||
15 | struct tb_port *down_port; | ||
16 | struct tb_path *path_to_up; | ||
17 | struct tb_path *path_to_down; | ||
18 | struct list_head list; | ||
19 | }; | ||
20 | |||
21 | struct tb_pci_tunnel *tb_pci_alloc(struct tb *tb, struct tb_port *up, | ||
22 | struct tb_port *down); | ||
23 | void tb_pci_free(struct tb_pci_tunnel *tunnel); | ||
24 | int tb_pci_activate(struct tb_pci_tunnel *tunnel); | ||
25 | int tb_pci_restart(struct tb_pci_tunnel *tunnel); | ||
26 | void tb_pci_deactivate(struct tb_pci_tunnel *tunnel); | ||
27 | bool tb_pci_is_invalid(struct tb_pci_tunnel *tunnel); | ||
28 | |||
29 | #endif | ||
30 | |||