aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_dp_mst_topology.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 20:36:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 20:36:12 -0400
commita7d7a143d0b4cb1914705884ca5c25e322dba693 (patch)
tree0ee5e9e43f0863b38a29e8abc293e80eab177d74 /drivers/gpu/drm/drm_dp_mst_topology.c
parent43c40df2c7fedce640a6c39fcdf58764f6bbac5c (diff)
parent7963e9db1b1f842fdc53309baa8714d38e9f5681 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull DRM updates from Dave Airlie: "Like all good pull reqs this ends with a revert, so it must mean we tested it, [ Ed. That's _one_ way of looking at it ] This pull is missing nouveau, Ben has been stuck trying to track down a very longstanding bug that revealed itself due to some other changes. I've asked him to send you a direct pull request for nouveau once he cleans things up. I'm away until Monday so don't want to delay things, you can make a decision on that when he sends it, I have my phone so I can ack things just not really merge much. It has one trivial conflict with your tree in armada_drv.c, and also the pull request contains some component changes that are already in your tree, the base tree from Russell went via Greg's tree already, but some stuff still shows up in here that doesn't when I merge my tree into yours. Otherwise all pretty standard graphics fare, one new driver and changes all over the place. New drivers: - sti kms driver for STMicroelectronics chipsets stih416 and stih407. core: - lots of cleanups to the drm core - DP MST helper code merged - universal cursor planes. - render nodes enabled by default panel: - better panel interfaces - new panel support - non-continuous cock advertising ability ttm: - shrinker fixes i915: - hopefully ditched UMS support - runtime pm fixes - psr tracking and locking - now enabled by default - userptr fixes - backlight brightness fixes - MST support merged - runtime PM for dpms - primary planes locking fixes - gen8 hw semaphore support - fbc fixes - runtime PM on SOix sleep state hw. - mmio base page flipping - lots of vlv/chv fixes. - universal cursor planes radeon: - Hawaii fixes - display scalar support for non-fixed mode displays - new firmware format support - dpm on more asics by default - GPUVM improvements - uncached and wc GTT buffers - BOs > visible VRAM exynos: - i80 interface support - module auto-loading - ipp driver consolidated. armada: - irq handling in crtc layer only - crtc renumbering - add component support - DT interaction changes. tegra: - load as module fixes - eDP bpp and sync polarity fixed - DSI non-continuous clock mode support - better support for importing buffers from nouveau msm: - mdp5/adq8084 v1.3 hw enablement - devicetree clk changse - ifc6410 board working tda998x: - component support - DT documentation update vmwgfx: - fix compat shader namespace" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (551 commits) Revert "drm: drop redundant drm_file->is_master" drm/panel: simple: Use devm_gpiod_get_optional() drm/dsi: Replace upcasting macro by function drm/panel: ld9040: Replace upcasting macro by function drm/exynos: dp: Modify driver to support drm_panel drm/exynos: Move DP setup into commit() drm/panel: simple: Add AUO B133HTN01 panel support drm/panel: simple: Support delays in panel functions drm/panel: simple: Add proper definition for prepare and unprepare drm/panel: s6e8aa0: Add proper definition for prepare and unprepare drm/panel: ld9040: Add proper definition for prepare and unprepare drm/tegra: Add support for panel prepare and unprepare routines drm/exynos: dsi: Add support for panel prepare and unprepare routines drm/exynos: dpi: Add support for panel prepare and unprepare routines drm/panel: simple: Add dummy prepare and unprepare routines drm/panel: s6e8aa0: Add dummy prepare and unprepare routines drm/panel: ld9040: Add dummy prepare and unprepare routines drm/panel: Provide convenience wrapper for .get_modes() drm/panel: add .prepare() and .unprepare() functions drm/panel: simple: Remove simple-panel compatible ...
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c2715
1 files changed, 2715 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
new file mode 100644
index 000000000000..ac3c2738db94
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -0,0 +1,2715 @@
1/*
2 * Copyright © 2014 Red Hat
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/seq_file.h>
29#include <linux/i2c.h>
30#include <drm/drm_dp_mst_helper.h>
31#include <drm/drmP.h>
32
33#include <drm/drm_fixed.h>
34
35/**
36 * DOC: dp mst helper
37 *
38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
39 * protocol. The helpers contain a topology manager and bandwidth manager.
40 * The helpers encapsulate the sending and received of sideband msgs.
41 */
42static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
43 char *buf);
44static int test_calc_pbn_mode(void);
45
46static void drm_dp_put_port(struct drm_dp_mst_port *port);
47
48static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
49 int id,
50 struct drm_dp_payload *payload);
51
52static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
53 struct drm_dp_mst_port *port,
54 int offset, int size, u8 *bytes);
55
56static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
57 struct drm_dp_mst_branch *mstb);
58static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
59 struct drm_dp_mst_branch *mstb,
60 struct drm_dp_mst_port *port);
61static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
62 u8 *guid);
63
64static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
65static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
66static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
67/* sideband msg handling */
68static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
69{
70 u8 bitmask = 0x80;
71 u8 bitshift = 7;
72 u8 array_index = 0;
73 int number_of_bits = num_nibbles * 4;
74 u8 remainder = 0;
75
76 while (number_of_bits != 0) {
77 number_of_bits--;
78 remainder <<= 1;
79 remainder |= (data[array_index] & bitmask) >> bitshift;
80 bitmask >>= 1;
81 bitshift--;
82 if (bitmask == 0) {
83 bitmask = 0x80;
84 bitshift = 7;
85 array_index++;
86 }
87 if ((remainder & 0x10) == 0x10)
88 remainder ^= 0x13;
89 }
90
91 number_of_bits = 4;
92 while (number_of_bits != 0) {
93 number_of_bits--;
94 remainder <<= 1;
95 if ((remainder & 0x10) != 0)
96 remainder ^= 0x13;
97 }
98
99 return remainder;
100}
101
102static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
103{
104 u8 bitmask = 0x80;
105 u8 bitshift = 7;
106 u8 array_index = 0;
107 int number_of_bits = number_of_bytes * 8;
108 u16 remainder = 0;
109
110 while (number_of_bits != 0) {
111 number_of_bits--;
112 remainder <<= 1;
113 remainder |= (data[array_index] & bitmask) >> bitshift;
114 bitmask >>= 1;
115 bitshift--;
116 if (bitmask == 0) {
117 bitmask = 0x80;
118 bitshift = 7;
119 array_index++;
120 }
121 if ((remainder & 0x100) == 0x100)
122 remainder ^= 0xd5;
123 }
124
125 number_of_bits = 8;
126 while (number_of_bits != 0) {
127 number_of_bits--;
128 remainder <<= 1;
129 if ((remainder & 0x100) != 0)
130 remainder ^= 0xd5;
131 }
132
133 return remainder & 0xff;
134}
135static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
136{
137 u8 size = 3;
138 size += (hdr->lct / 2);
139 return size;
140}
141
142static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
143 u8 *buf, int *len)
144{
145 int idx = 0;
146 int i;
147 u8 crc4;
148 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
149 for (i = 0; i < (hdr->lct / 2); i++)
150 buf[idx++] = hdr->rad[i];
151 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
152 (hdr->msg_len & 0x3f);
153 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
154
155 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
156 buf[idx - 1] |= (crc4 & 0xf);
157
158 *len = idx;
159}
160
161static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
162 u8 *buf, int buflen, u8 *hdrlen)
163{
164 u8 crc4;
165 u8 len;
166 int i;
167 u8 idx;
168 if (buf[0] == 0)
169 return false;
170 len = 3;
171 len += ((buf[0] & 0xf0) >> 4) / 2;
172 if (len > buflen)
173 return false;
174 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
175
176 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
178 return false;
179 }
180
181 hdr->lct = (buf[0] & 0xf0) >> 4;
182 hdr->lcr = (buf[0] & 0xf);
183 idx = 1;
184 for (i = 0; i < (hdr->lct / 2); i++)
185 hdr->rad[i] = buf[idx++];
186 hdr->broadcast = (buf[idx] >> 7) & 0x1;
187 hdr->path_msg = (buf[idx] >> 6) & 0x1;
188 hdr->msg_len = buf[idx] & 0x3f;
189 idx++;
190 hdr->somt = (buf[idx] >> 7) & 0x1;
191 hdr->eomt = (buf[idx] >> 6) & 0x1;
192 hdr->seqno = (buf[idx] >> 4) & 0x1;
193 idx++;
194 *hdrlen = idx;
195 return true;
196}
197
198static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
199 struct drm_dp_sideband_msg_tx *raw)
200{
201 int idx = 0;
202 int i;
203 u8 *buf = raw->msg;
204 buf[idx++] = req->req_type & 0x7f;
205
206 switch (req->req_type) {
207 case DP_ENUM_PATH_RESOURCES:
208 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
209 idx++;
210 break;
211 case DP_ALLOCATE_PAYLOAD:
212 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
213 (req->u.allocate_payload.number_sdp_streams & 0xf);
214 idx++;
215 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
216 idx++;
217 buf[idx] = (req->u.allocate_payload.pbn >> 8);
218 idx++;
219 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
220 idx++;
221 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
222 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
223 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
224 idx++;
225 }
226 if (req->u.allocate_payload.number_sdp_streams & 1) {
227 i = req->u.allocate_payload.number_sdp_streams - 1;
228 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
229 idx++;
230 }
231 break;
232 case DP_QUERY_PAYLOAD:
233 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
234 idx++;
235 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
236 idx++;
237 break;
238 case DP_REMOTE_DPCD_READ:
239 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
240 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
241 idx++;
242 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
243 idx++;
244 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
245 idx++;
246 buf[idx] = (req->u.dpcd_read.num_bytes);
247 idx++;
248 break;
249
250 case DP_REMOTE_DPCD_WRITE:
251 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
252 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
253 idx++;
254 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
255 idx++;
256 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
257 idx++;
258 buf[idx] = (req->u.dpcd_write.num_bytes);
259 idx++;
260 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
261 idx += req->u.dpcd_write.num_bytes;
262 break;
263 case DP_REMOTE_I2C_READ:
264 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
265 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
266 idx++;
267 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
268 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
269 idx++;
270 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
271 idx++;
272 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
273 idx += req->u.i2c_read.transactions[i].num_bytes;
274
275 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
276 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
277 idx++;
278 }
279 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
280 idx++;
281 buf[idx] = (req->u.i2c_read.num_bytes_read);
282 idx++;
283 break;
284
285 case DP_REMOTE_I2C_WRITE:
286 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
287 idx++;
288 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
289 idx++;
290 buf[idx] = (req->u.i2c_write.num_bytes);
291 idx++;
292 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
293 idx += req->u.i2c_write.num_bytes;
294 break;
295 }
296 raw->cur_len = idx;
297}
298
299static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
300{
301 u8 crc4;
302 crc4 = drm_dp_msg_data_crc4(msg, len);
303 msg[len] = crc4;
304}
305
306static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
307 struct drm_dp_sideband_msg_tx *raw)
308{
309 int idx = 0;
310 u8 *buf = raw->msg;
311
312 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
313
314 raw->cur_len = idx;
315}
316
317/* this adds a chunk of msg to the builder to get the final msg */
318static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
319 u8 *replybuf, u8 replybuflen, bool hdr)
320{
321 int ret;
322 u8 crc4;
323
324 if (hdr) {
325 u8 hdrlen;
326 struct drm_dp_sideband_msg_hdr recv_hdr;
327 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
328 if (ret == false) {
329 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
330 return false;
331 }
332
333 /* get length contained in this portion */
334 msg->curchunk_len = recv_hdr.msg_len;
335 msg->curchunk_hdrlen = hdrlen;
336
337 /* we have already gotten an somt - don't bother parsing */
338 if (recv_hdr.somt && msg->have_somt)
339 return false;
340
341 if (recv_hdr.somt) {
342 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
343 msg->have_somt = true;
344 }
345 if (recv_hdr.eomt)
346 msg->have_eomt = true;
347
348 /* copy the bytes for the remainder of this header chunk */
349 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
350 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
351 } else {
352 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
353 msg->curchunk_idx += replybuflen;
354 }
355
356 if (msg->curchunk_idx >= msg->curchunk_len) {
357 /* do CRC */
358 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
359 /* copy chunk into bigger msg */
360 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
361 msg->curlen += msg->curchunk_len - 1;
362 }
363 return true;
364}
365
366static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
367 struct drm_dp_sideband_msg_reply_body *repmsg)
368{
369 int idx = 1;
370 int i;
371 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
372 idx += 16;
373 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
374 idx++;
375 if (idx > raw->curlen)
376 goto fail_len;
377 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
378 if (raw->msg[idx] & 0x80)
379 repmsg->u.link_addr.ports[i].input_port = 1;
380
381 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
382 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
383
384 idx++;
385 if (idx > raw->curlen)
386 goto fail_len;
387 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
388 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
389 if (repmsg->u.link_addr.ports[i].input_port == 0)
390 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
391 idx++;
392 if (idx > raw->curlen)
393 goto fail_len;
394 if (repmsg->u.link_addr.ports[i].input_port == 0) {
395 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
396 idx++;
397 if (idx > raw->curlen)
398 goto fail_len;
399 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
400 idx += 16;
401 if (idx > raw->curlen)
402 goto fail_len;
403 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
404 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
405 idx++;
406
407 }
408 if (idx > raw->curlen)
409 goto fail_len;
410 }
411
412 return true;
413fail_len:
414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
415 return false;
416}
417
418static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
419 struct drm_dp_sideband_msg_reply_body *repmsg)
420{
421 int idx = 1;
422 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
423 idx++;
424 if (idx > raw->curlen)
425 goto fail_len;
426 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
427 if (idx > raw->curlen)
428 goto fail_len;
429
430 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
431 return true;
432fail_len:
433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
434 return false;
435}
436
437static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
438 struct drm_dp_sideband_msg_reply_body *repmsg)
439{
440 int idx = 1;
441 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
442 idx++;
443 if (idx > raw->curlen)
444 goto fail_len;
445 return true;
446fail_len:
447 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
448 return false;
449}
450
451static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
452 struct drm_dp_sideband_msg_reply_body *repmsg)
453{
454 int idx = 1;
455
456 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
457 idx++;
458 if (idx > raw->curlen)
459 goto fail_len;
460 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
461 idx++;
462 /* TODO check */
463 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
464 return true;
465fail_len:
466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
467 return false;
468}
469
470static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
471 struct drm_dp_sideband_msg_reply_body *repmsg)
472{
473 int idx = 1;
474 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
475 idx++;
476 if (idx > raw->curlen)
477 goto fail_len;
478 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
479 idx += 2;
480 if (idx > raw->curlen)
481 goto fail_len;
482 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
483 idx += 2;
484 if (idx > raw->curlen)
485 goto fail_len;
486 return true;
487fail_len:
488 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
489 return false;
490}
491
492static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
493 struct drm_dp_sideband_msg_reply_body *repmsg)
494{
495 int idx = 1;
496 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
497 idx++;
498 if (idx > raw->curlen)
499 goto fail_len;
500 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
501 idx++;
502 if (idx > raw->curlen)
503 goto fail_len;
504 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
505 idx += 2;
506 if (idx > raw->curlen)
507 goto fail_len;
508 return true;
509fail_len:
510 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
511 return false;
512}
513
514static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
515 struct drm_dp_sideband_msg_reply_body *repmsg)
516{
517 int idx = 1;
518 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
519 idx++;
520 if (idx > raw->curlen)
521 goto fail_len;
522 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
523 idx += 2;
524 if (idx > raw->curlen)
525 goto fail_len;
526 return true;
527fail_len:
528 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
529 return false;
530}
531
532static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
533 struct drm_dp_sideband_msg_reply_body *msg)
534{
535 memset(msg, 0, sizeof(*msg));
536 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
537 msg->req_type = (raw->msg[0] & 0x7f);
538
539 if (msg->reply_type) {
540 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
541 msg->u.nak.reason = raw->msg[17];
542 msg->u.nak.nak_data = raw->msg[18];
543 return false;
544 }
545
546 switch (msg->req_type) {
547 case DP_LINK_ADDRESS:
548 return drm_dp_sideband_parse_link_address(raw, msg);
549 case DP_QUERY_PAYLOAD:
550 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
551 case DP_REMOTE_DPCD_READ:
552 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
553 case DP_REMOTE_DPCD_WRITE:
554 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
555 case DP_REMOTE_I2C_READ:
556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
557 case DP_ENUM_PATH_RESOURCES:
558 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
559 case DP_ALLOCATE_PAYLOAD:
560 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
561 default:
562 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
563 return false;
564 }
565}
566
567static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
568 struct drm_dp_sideband_msg_req_body *msg)
569{
570 int idx = 1;
571
572 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
573 idx++;
574 if (idx > raw->curlen)
575 goto fail_len;
576
577 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
578 idx += 16;
579 if (idx > raw->curlen)
580 goto fail_len;
581
582 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
583 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
584 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
585 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
586 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
587 idx++;
588 return true;
589fail_len:
590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
591 return false;
592}
593
594static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
595 struct drm_dp_sideband_msg_req_body *msg)
596{
597 int idx = 1;
598
599 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
600 idx++;
601 if (idx > raw->curlen)
602 goto fail_len;
603
604 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
605 idx += 16;
606 if (idx > raw->curlen)
607 goto fail_len;
608
609 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
610 idx++;
611 return true;
612fail_len:
613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
614 return false;
615}
616
617static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
618 struct drm_dp_sideband_msg_req_body *msg)
619{
620 memset(msg, 0, sizeof(*msg));
621 msg->req_type = (raw->msg[0] & 0x7f);
622
623 switch (msg->req_type) {
624 case DP_CONNECTION_STATUS_NOTIFY:
625 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
626 case DP_RESOURCE_STATUS_NOTIFY:
627 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
628 default:
629 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
630 return false;
631 }
632}
633
634static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
635{
636 struct drm_dp_sideband_msg_req_body req;
637
638 req.req_type = DP_REMOTE_DPCD_WRITE;
639 req.u.dpcd_write.port_number = port_num;
640 req.u.dpcd_write.dpcd_address = offset;
641 req.u.dpcd_write.num_bytes = num_bytes;
642 req.u.dpcd_write.bytes = bytes;
643 drm_dp_encode_sideband_req(&req, msg);
644
645 return 0;
646}
647
648static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
649{
650 struct drm_dp_sideband_msg_req_body req;
651
652 req.req_type = DP_LINK_ADDRESS;
653 drm_dp_encode_sideband_req(&req, msg);
654 return 0;
655}
656
657static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
658{
659 struct drm_dp_sideband_msg_req_body req;
660
661 req.req_type = DP_ENUM_PATH_RESOURCES;
662 req.u.port_num.port_number = port_num;
663 drm_dp_encode_sideband_req(&req, msg);
664 msg->path_msg = true;
665 return 0;
666}
667
668static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
669 u8 vcpi, uint16_t pbn)
670{
671 struct drm_dp_sideband_msg_req_body req;
672 memset(&req, 0, sizeof(req));
673 req.req_type = DP_ALLOCATE_PAYLOAD;
674 req.u.allocate_payload.port_number = port_num;
675 req.u.allocate_payload.vcpi = vcpi;
676 req.u.allocate_payload.pbn = pbn;
677 drm_dp_encode_sideband_req(&req, msg);
678 msg->path_msg = true;
679 return 0;
680}
681
682static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
683 struct drm_dp_vcpi *vcpi)
684{
685 int ret;
686
687 mutex_lock(&mgr->payload_lock);
688 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
689 if (ret > mgr->max_payloads) {
690 ret = -EINVAL;
691 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
692 goto out_unlock;
693 }
694
695 set_bit(ret, &mgr->payload_mask);
696 vcpi->vcpi = ret;
697 mgr->proposed_vcpis[ret - 1] = vcpi;
698out_unlock:
699 mutex_unlock(&mgr->payload_lock);
700 return ret;
701}
702
703static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
704 int id)
705{
706 if (id == 0)
707 return;
708
709 mutex_lock(&mgr->payload_lock);
710 DRM_DEBUG_KMS("putting payload %d\n", id);
711 clear_bit(id, &mgr->payload_mask);
712 mgr->proposed_vcpis[id - 1] = NULL;
713 mutex_unlock(&mgr->payload_lock);
714}
715
716static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
717 struct drm_dp_sideband_msg_tx *txmsg)
718{
719 bool ret;
720 mutex_lock(&mgr->qlock);
721 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
722 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
723 mutex_unlock(&mgr->qlock);
724 return ret;
725}
726
727static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
728 struct drm_dp_sideband_msg_tx *txmsg)
729{
730 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
731 int ret;
732
733 ret = wait_event_timeout(mgr->tx_waitq,
734 check_txmsg_state(mgr, txmsg),
735 (4 * HZ));
736 mutex_lock(&mstb->mgr->qlock);
737 if (ret > 0) {
738 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
739 ret = -EIO;
740 goto out;
741 }
742 } else {
743 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
744
745 /* dump some state */
746 ret = -EIO;
747
748 /* remove from q */
749 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
750 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
751 list_del(&txmsg->next);
752 }
753
754 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
755 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
756 mstb->tx_slots[txmsg->seqno] = NULL;
757 }
758 }
759out:
760 mutex_unlock(&mgr->qlock);
761
762 return ret;
763}
764
765static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
766{
767 struct drm_dp_mst_branch *mstb;
768
769 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
770 if (!mstb)
771 return NULL;
772
773 mstb->lct = lct;
774 if (lct > 1)
775 memcpy(mstb->rad, rad, lct / 2);
776 INIT_LIST_HEAD(&mstb->ports);
777 kref_init(&mstb->kref);
778 return mstb;
779}
780
781static void drm_dp_destroy_mst_branch_device(struct kref *kref)
782{
783 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
784 struct drm_dp_mst_port *port, *tmp;
785 bool wake_tx = false;
786
787 cancel_work_sync(&mstb->mgr->work);
788
789 /*
790 * destroy all ports - don't need lock
791 * as there are no more references to the mst branch
792 * device at this point.
793 */
794 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
795 list_del(&port->next);
796 drm_dp_put_port(port);
797 }
798
799 /* drop any tx slots msg */
800 mutex_lock(&mstb->mgr->qlock);
801 if (mstb->tx_slots[0]) {
802 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
803 mstb->tx_slots[0] = NULL;
804 wake_tx = true;
805 }
806 if (mstb->tx_slots[1]) {
807 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
808 mstb->tx_slots[1] = NULL;
809 wake_tx = true;
810 }
811 mutex_unlock(&mstb->mgr->qlock);
812
813 if (wake_tx)
814 wake_up(&mstb->mgr->tx_waitq);
815 kfree(mstb);
816}
817
818static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
819{
820 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
821}
822
823
824static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
825{
826 switch (old_pdt) {
827 case DP_PEER_DEVICE_DP_LEGACY_CONV:
828 case DP_PEER_DEVICE_SST_SINK:
829 /* remove i2c over sideband */
830 drm_dp_mst_unregister_i2c_bus(&port->aux);
831 break;
832 case DP_PEER_DEVICE_MST_BRANCHING:
833 drm_dp_put_mst_branch_device(port->mstb);
834 port->mstb = NULL;
835 break;
836 }
837}
838
839static void drm_dp_destroy_port(struct kref *kref)
840{
841 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
842 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
843 if (!port->input) {
844 port->vcpi.num_slots = 0;
845 if (port->connector)
846 (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
847 drm_dp_port_teardown_pdt(port, port->pdt);
848
849 if (!port->input && port->vcpi.vcpi > 0)
850 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
851 }
852 kfree(port);
853
854 (*mgr->cbs->hotplug)(mgr);
855}
856
857static void drm_dp_put_port(struct drm_dp_mst_port *port)
858{
859 kref_put(&port->kref, drm_dp_destroy_port);
860}
861
862static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
863{
864 struct drm_dp_mst_port *port;
865 struct drm_dp_mst_branch *rmstb;
866 if (to_find == mstb) {
867 kref_get(&mstb->kref);
868 return mstb;
869 }
870 list_for_each_entry(port, &mstb->ports, next) {
871 if (port->mstb) {
872 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
873 if (rmstb)
874 return rmstb;
875 }
876 }
877 return NULL;
878}
879
880static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
881{
882 struct drm_dp_mst_branch *rmstb = NULL;
883 mutex_lock(&mgr->lock);
884 if (mgr->mst_primary)
885 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
886 mutex_unlock(&mgr->lock);
887 return rmstb;
888}
889
890static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
891{
892 struct drm_dp_mst_port *port, *mport;
893
894 list_for_each_entry(port, &mstb->ports, next) {
895 if (port == to_find) {
896 kref_get(&port->kref);
897 return port;
898 }
899 if (port->mstb) {
900 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
901 if (mport)
902 return mport;
903 }
904 }
905 return NULL;
906}
907
908static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
909{
910 struct drm_dp_mst_port *rport = NULL;
911 mutex_lock(&mgr->lock);
912 if (mgr->mst_primary)
913 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
914 mutex_unlock(&mgr->lock);
915 return rport;
916}
917
918static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
919{
920 struct drm_dp_mst_port *port;
921
922 list_for_each_entry(port, &mstb->ports, next) {
923 if (port->port_num == port_num) {
924 kref_get(&port->kref);
925 return port;
926 }
927 }
928
929 return NULL;
930}
931
932/*
933 * calculate a new RAD for this MST branch device
934 * if parent has an LCT of 2 then it has 1 nibble of RAD,
935 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
936 */
937static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
938 u8 *rad)
939{
940 int lct = port->parent->lct;
941 int shift = 4;
942 int idx = lct / 2;
943 if (lct > 1) {
944 memcpy(rad, port->parent->rad, idx);
945 shift = (lct % 2) ? 4 : 0;
946 } else
947 rad[0] = 0;
948
949 rad[idx] |= port->port_num << shift;
950 return lct + 1;
951}
952
953/*
954 * return sends link address for new mstb
955 */
956static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
957{
958 int ret;
959 u8 rad[6], lct;
960 bool send_link = false;
961 switch (port->pdt) {
962 case DP_PEER_DEVICE_DP_LEGACY_CONV:
963 case DP_PEER_DEVICE_SST_SINK:
964 /* add i2c over sideband */
965 ret = drm_dp_mst_register_i2c_bus(&port->aux);
966 break;
967 case DP_PEER_DEVICE_MST_BRANCHING:
968 lct = drm_dp_calculate_rad(port, rad);
969
970 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
971 port->mstb->mgr = port->mgr;
972 port->mstb->port_parent = port;
973
974 send_link = true;
975 break;
976 }
977 return send_link;
978}
979
980static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
981 struct drm_dp_mst_port *port)
982{
983 int ret;
984 if (port->dpcd_rev >= 0x12) {
985 port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
986 if (!port->guid_valid) {
987 ret = drm_dp_send_dpcd_write(mstb->mgr,
988 port,
989 DP_GUID,
990 16, port->guid);
991 port->guid_valid = true;
992 }
993 }
994}
995
996static void build_mst_prop_path(struct drm_dp_mst_port *port,
997 struct drm_dp_mst_branch *mstb,
998 char *proppath)
999{
1000 int i;
1001 char temp[8];
1002 snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
1003 for (i = 0; i < (mstb->lct - 1); i++) {
1004 int shift = (i % 2) ? 0 : 4;
1005 int port_num = mstb->rad[i / 2] >> shift;
1006 snprintf(temp, 8, "-%d", port_num);
1007 strncat(proppath, temp, 255);
1008 }
1009 snprintf(temp, 8, "-%d", port->port_num);
1010 strncat(proppath, temp, 255);
1011}
1012
1013static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1014 struct device *dev,
1015 struct drm_dp_link_addr_reply_port *port_msg)
1016{
1017 struct drm_dp_mst_port *port;
1018 bool ret;
1019 bool created = false;
1020 int old_pdt = 0;
1021 int old_ddps = 0;
1022 port = drm_dp_get_port(mstb, port_msg->port_number);
1023 if (!port) {
1024 port = kzalloc(sizeof(*port), GFP_KERNEL);
1025 if (!port)
1026 return;
1027 kref_init(&port->kref);
1028 port->parent = mstb;
1029 port->port_num = port_msg->port_number;
1030 port->mgr = mstb->mgr;
1031 port->aux.name = "DPMST";
1032 port->aux.dev = dev;
1033 created = true;
1034 } else {
1035 old_pdt = port->pdt;
1036 old_ddps = port->ddps;
1037 }
1038
1039 port->pdt = port_msg->peer_device_type;
1040 port->input = port_msg->input_port;
1041 port->mcs = port_msg->mcs;
1042 port->ddps = port_msg->ddps;
1043 port->ldps = port_msg->legacy_device_plug_status;
1044 port->dpcd_rev = port_msg->dpcd_revision;
1045 port->num_sdp_streams = port_msg->num_sdp_streams;
1046 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1047 memcpy(port->guid, port_msg->peer_guid, 16);
1048
1049 /* manage mstb port lists with mgr lock - take a reference
1050 for this list */
1051 if (created) {
1052 mutex_lock(&mstb->mgr->lock);
1053 kref_get(&port->kref);
1054 list_add(&port->next, &mstb->ports);
1055 mutex_unlock(&mstb->mgr->lock);
1056 }
1057
1058 if (old_ddps != port->ddps) {
1059 if (port->ddps) {
1060 drm_dp_check_port_guid(mstb, port);
1061 if (!port->input)
1062 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1063 } else {
1064 port->guid_valid = false;
1065 port->available_pbn = 0;
1066 }
1067 }
1068
1069 if (old_pdt != port->pdt && !port->input) {
1070 drm_dp_port_teardown_pdt(port, old_pdt);
1071
1072 ret = drm_dp_port_setup_pdt(port);
1073 if (ret == true) {
1074 drm_dp_send_link_address(mstb->mgr, port->mstb);
1075 port->mstb->link_address_sent = true;
1076 }
1077 }
1078
1079 if (created && !port->input) {
1080 char proppath[255];
1081 build_mst_prop_path(port, mstb, proppath);
1082 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1083 }
1084
1085 /* put reference to this port */
1086 drm_dp_put_port(port);
1087}
1088
1089static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1090 struct drm_dp_connection_status_notify *conn_stat)
1091{
1092 struct drm_dp_mst_port *port;
1093 int old_pdt;
1094 int old_ddps;
1095 bool dowork = false;
1096 port = drm_dp_get_port(mstb, conn_stat->port_number);
1097 if (!port)
1098 return;
1099
1100 old_ddps = port->ddps;
1101 old_pdt = port->pdt;
1102 port->pdt = conn_stat->peer_device_type;
1103 port->mcs = conn_stat->message_capability_status;
1104 port->ldps = conn_stat->legacy_device_plug_status;
1105 port->ddps = conn_stat->displayport_device_plug_status;
1106
1107 if (old_ddps != port->ddps) {
1108 if (port->ddps) {
1109 drm_dp_check_port_guid(mstb, port);
1110 dowork = true;
1111 } else {
1112 port->guid_valid = false;
1113 port->available_pbn = 0;
1114 }
1115 }
1116 if (old_pdt != port->pdt && !port->input) {
1117 drm_dp_port_teardown_pdt(port, old_pdt);
1118
1119 if (drm_dp_port_setup_pdt(port))
1120 dowork = true;
1121 }
1122
1123 drm_dp_put_port(port);
1124 if (dowork)
1125 queue_work(system_long_wq, &mstb->mgr->work);
1126
1127}
1128
1129static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1130 u8 lct, u8 *rad)
1131{
1132 struct drm_dp_mst_branch *mstb;
1133 struct drm_dp_mst_port *port;
1134 int i;
1135 /* find the port by iterating down */
1136 mstb = mgr->mst_primary;
1137
1138 for (i = 0; i < lct - 1; i++) {
1139 int shift = (i % 2) ? 0 : 4;
1140 int port_num = rad[i / 2] >> shift;
1141
1142 list_for_each_entry(port, &mstb->ports, next) {
1143 if (port->port_num == port_num) {
1144 if (!port->mstb) {
1145 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1146 return NULL;
1147 }
1148
1149 mstb = port->mstb;
1150 break;
1151 }
1152 }
1153 }
1154 kref_get(&mstb->kref);
1155 return mstb;
1156}
1157
1158static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1159 struct drm_dp_mst_branch *mstb)
1160{
1161 struct drm_dp_mst_port *port;
1162
1163 if (!mstb->link_address_sent) {
1164 drm_dp_send_link_address(mgr, mstb);
1165 mstb->link_address_sent = true;
1166 }
1167 list_for_each_entry(port, &mstb->ports, next) {
1168 if (port->input)
1169 continue;
1170
1171 if (!port->ddps)
1172 continue;
1173
1174 if (!port->available_pbn)
1175 drm_dp_send_enum_path_resources(mgr, mstb, port);
1176
1177 if (port->mstb)
1178 drm_dp_check_and_send_link_address(mgr, port->mstb);
1179 }
1180}
1181
1182static void drm_dp_mst_link_probe_work(struct work_struct *work)
1183{
1184 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1185
1186 drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
1187
1188}
1189
1190static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1191 u8 *guid)
1192{
1193 static u8 zero_guid[16];
1194
1195 if (!memcmp(guid, zero_guid, 16)) {
1196 u64 salt = get_jiffies_64();
1197 memcpy(&guid[0], &salt, sizeof(u64));
1198 memcpy(&guid[8], &salt, sizeof(u64));
1199 return false;
1200 }
1201 return true;
1202}
1203
1204#if 0
1205static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1206{
1207 struct drm_dp_sideband_msg_req_body req;
1208
1209 req.req_type = DP_REMOTE_DPCD_READ;
1210 req.u.dpcd_read.port_number = port_num;
1211 req.u.dpcd_read.dpcd_address = offset;
1212 req.u.dpcd_read.num_bytes = num_bytes;
1213 drm_dp_encode_sideband_req(&req, msg);
1214
1215 return 0;
1216}
1217#endif
1218
1219static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1220 bool up, u8 *msg, int len)
1221{
1222 int ret;
1223 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1224 int tosend, total, offset;
1225 int retries = 0;
1226
1227retry:
1228 total = len;
1229 offset = 0;
1230 do {
1231 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1232
1233 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1234 &msg[offset],
1235 tosend);
1236 if (ret != tosend) {
1237 if (ret == -EIO && retries < 5) {
1238 retries++;
1239 goto retry;
1240 }
1241 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1242 WARN(1, "fail\n");
1243
1244 return -EIO;
1245 }
1246 offset += tosend;
1247 total -= tosend;
1248 } while (total > 0);
1249 return 0;
1250}
1251
1252static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1253 struct drm_dp_sideband_msg_tx *txmsg)
1254{
1255 struct drm_dp_mst_branch *mstb = txmsg->dst;
1256
1257 /* both msg slots are full */
1258 if (txmsg->seqno == -1) {
1259 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1260 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1261 return -EAGAIN;
1262 }
1263 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1264 txmsg->seqno = mstb->last_seqno;
1265 mstb->last_seqno ^= 1;
1266 } else if (mstb->tx_slots[0] == NULL)
1267 txmsg->seqno = 0;
1268 else
1269 txmsg->seqno = 1;
1270 mstb->tx_slots[txmsg->seqno] = txmsg;
1271 }
1272 hdr->broadcast = 0;
1273 hdr->path_msg = txmsg->path_msg;
1274 hdr->lct = mstb->lct;
1275 hdr->lcr = mstb->lct - 1;
1276 if (mstb->lct > 1)
1277 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1278 hdr->seqno = txmsg->seqno;
1279 return 0;
1280}
1281/*
1282 * process a single block of the next message in the sideband queue
1283 */
1284static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1285 struct drm_dp_sideband_msg_tx *txmsg,
1286 bool up)
1287{
1288 u8 chunk[48];
1289 struct drm_dp_sideband_msg_hdr hdr;
1290 int len, space, idx, tosend;
1291 int ret;
1292
1293 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1294
1295 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1296 txmsg->seqno = -1;
1297 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1298 }
1299
1300 /* make hdr from dst mst - for replies use seqno
1301 otherwise assign one */
1302 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1303 if (ret < 0)
1304 return ret;
1305
1306 /* amount left to send in this message */
1307 len = txmsg->cur_len - txmsg->cur_offset;
1308
1309 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1310 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1311
1312 tosend = min(len, space);
1313 if (len == txmsg->cur_len)
1314 hdr.somt = 1;
1315 if (space >= len)
1316 hdr.eomt = 1;
1317
1318
1319 hdr.msg_len = tosend + 1;
1320 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1321 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1322 /* add crc at end */
1323 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1324 idx += tosend + 1;
1325
1326 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1327 if (ret) {
1328 DRM_DEBUG_KMS("sideband msg failed to send\n");
1329 return ret;
1330 }
1331
1332 txmsg->cur_offset += tosend;
1333 if (txmsg->cur_offset == txmsg->cur_len) {
1334 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1335 return 1;
1336 }
1337 return 0;
1338}
1339
1340/* must be called holding qlock */
1341static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1342{
1343 struct drm_dp_sideband_msg_tx *txmsg;
1344 int ret;
1345
1346 /* construct a chunk from the first msg in the tx_msg queue */
1347 if (list_empty(&mgr->tx_msg_downq)) {
1348 mgr->tx_down_in_progress = false;
1349 return;
1350 }
1351 mgr->tx_down_in_progress = true;
1352
1353 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1354 ret = process_single_tx_qlock(mgr, txmsg, false);
1355 if (ret == 1) {
1356 /* txmsg is sent it should be in the slots now */
1357 list_del(&txmsg->next);
1358 } else if (ret) {
1359 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1360 list_del(&txmsg->next);
1361 if (txmsg->seqno != -1)
1362 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1363 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1364 wake_up(&mgr->tx_waitq);
1365 }
1366 if (list_empty(&mgr->tx_msg_downq)) {
1367 mgr->tx_down_in_progress = false;
1368 return;
1369 }
1370}
1371
1372/* called holding qlock */
1373static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1374{
1375 struct drm_dp_sideband_msg_tx *txmsg;
1376 int ret;
1377
1378 /* construct a chunk from the first msg in the tx_msg queue */
1379 if (list_empty(&mgr->tx_msg_upq)) {
1380 mgr->tx_up_in_progress = false;
1381 return;
1382 }
1383
1384 txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1385 ret = process_single_tx_qlock(mgr, txmsg, true);
1386 if (ret == 1) {
1387 /* up txmsgs aren't put in slots - so free after we send it */
1388 list_del(&txmsg->next);
1389 kfree(txmsg);
1390 } else if (ret)
1391 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1392 mgr->tx_up_in_progress = true;
1393}
1394
1395static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1396 struct drm_dp_sideband_msg_tx *txmsg)
1397{
1398 mutex_lock(&mgr->qlock);
1399 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1400 if (!mgr->tx_down_in_progress)
1401 process_single_down_tx_qlock(mgr);
1402 mutex_unlock(&mgr->qlock);
1403}
1404
1405static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1406 struct drm_dp_mst_branch *mstb)
1407{
1408 int len;
1409 struct drm_dp_sideband_msg_tx *txmsg;
1410 int ret;
1411
1412 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1413 if (!txmsg)
1414 return -ENOMEM;
1415
1416 txmsg->dst = mstb;
1417 len = build_link_address(txmsg);
1418
1419 drm_dp_queue_down_tx(mgr, txmsg);
1420
1421 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1422 if (ret > 0) {
1423 int i;
1424
1425 if (txmsg->reply.reply_type == 1)
1426 DRM_DEBUG_KMS("link address nak received\n");
1427 else {
1428 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1429 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1430 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1431 txmsg->reply.u.link_addr.ports[i].input_port,
1432 txmsg->reply.u.link_addr.ports[i].peer_device_type,
1433 txmsg->reply.u.link_addr.ports[i].port_number,
1434 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1435 txmsg->reply.u.link_addr.ports[i].mcs,
1436 txmsg->reply.u.link_addr.ports[i].ddps,
1437 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1438 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1439 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1440 }
1441 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1442 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1443 }
1444 (*mgr->cbs->hotplug)(mgr);
1445 }
1446 } else
1447 DRM_DEBUG_KMS("link address failed %d\n", ret);
1448
1449 kfree(txmsg);
1450 return 0;
1451}
1452
1453static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1454 struct drm_dp_mst_branch *mstb,
1455 struct drm_dp_mst_port *port)
1456{
1457 int len;
1458 struct drm_dp_sideband_msg_tx *txmsg;
1459 int ret;
1460
1461 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1462 if (!txmsg)
1463 return -ENOMEM;
1464
1465 txmsg->dst = mstb;
1466 len = build_enum_path_resources(txmsg, port->port_num);
1467
1468 drm_dp_queue_down_tx(mgr, txmsg);
1469
1470 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1471 if (ret > 0) {
1472 if (txmsg->reply.reply_type == 1)
1473 DRM_DEBUG_KMS("enum path resources nak received\n");
1474 else {
1475 if (port->port_num != txmsg->reply.u.path_resources.port_number)
1476 DRM_ERROR("got incorrect port in response\n");
1477 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1478 txmsg->reply.u.path_resources.avail_payload_bw_number);
1479 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1480 }
1481 }
1482
1483 kfree(txmsg);
1484 return 0;
1485}
1486
1487static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1488 struct drm_dp_mst_port *port,
1489 int id,
1490 int pbn)
1491{
1492 struct drm_dp_sideband_msg_tx *txmsg;
1493 struct drm_dp_mst_branch *mstb;
1494 int len, ret;
1495
1496 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1497 if (!mstb)
1498 return -EINVAL;
1499
1500 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1501 if (!txmsg) {
1502 ret = -ENOMEM;
1503 goto fail_put;
1504 }
1505
1506 txmsg->dst = mstb;
1507 len = build_allocate_payload(txmsg, port->port_num,
1508 id,
1509 pbn);
1510
1511 drm_dp_queue_down_tx(mgr, txmsg);
1512
1513 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1514 if (ret > 0) {
1515 if (txmsg->reply.reply_type == 1) {
1516 ret = -EINVAL;
1517 } else
1518 ret = 0;
1519 }
1520 kfree(txmsg);
1521fail_put:
1522 drm_dp_put_mst_branch_device(mstb);
1523 return ret;
1524}
1525
1526static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1527 int id,
1528 struct drm_dp_payload *payload)
1529{
1530 int ret;
1531
1532 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1533 if (ret < 0) {
1534 payload->payload_state = 0;
1535 return ret;
1536 }
1537 payload->payload_state = DP_PAYLOAD_LOCAL;
1538 return 0;
1539}
1540
1541static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1542 struct drm_dp_mst_port *port,
1543 int id,
1544 struct drm_dp_payload *payload)
1545{
1546 int ret;
1547 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1548 if (ret < 0)
1549 return ret;
1550 payload->payload_state = DP_PAYLOAD_REMOTE;
1551 return ret;
1552}
1553
1554static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1555 struct drm_dp_mst_port *port,
1556 int id,
1557 struct drm_dp_payload *payload)
1558{
1559 DRM_DEBUG_KMS("\n");
1560 /* its okay for these to fail */
1561 if (port) {
1562 drm_dp_payload_send_msg(mgr, port, id, 0);
1563 }
1564
1565 drm_dp_dpcd_write_payload(mgr, id, payload);
1566 payload->payload_state = 0;
1567 return 0;
1568}
1569
1570static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1571 int id,
1572 struct drm_dp_payload *payload)
1573{
1574 payload->payload_state = 0;
1575 return 0;
1576}
1577
1578/**
1579 * drm_dp_update_payload_part1() - Execute payload update part 1
1580 * @mgr: manager to use.
1581 *
1582 * This iterates over all proposed virtual channels, and tries to
1583 * allocate space in the link for them. For 0->slots transitions,
1584 * this step just writes the VCPI to the MST device. For slots->0
1585 * transitions, this writes the updated VCPIs and removes the
1586 * remote VC payloads.
1587 *
1588 * after calling this the driver should generate ACT and payload
1589 * packets.
1590 */
1591int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1592{
1593 int i;
1594 int cur_slots = 1;
1595 struct drm_dp_payload req_payload;
1596 struct drm_dp_mst_port *port;
1597
1598 mutex_lock(&mgr->payload_lock);
1599 for (i = 0; i < mgr->max_payloads; i++) {
1600 /* solve the current payloads - compare to the hw ones
1601 - update the hw view */
1602 req_payload.start_slot = cur_slots;
1603 if (mgr->proposed_vcpis[i]) {
1604 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1605 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1606 } else {
1607 port = NULL;
1608 req_payload.num_slots = 0;
1609 }
1610 /* work out what is required to happen with this payload */
1611 if (mgr->payloads[i].start_slot != req_payload.start_slot ||
1612 mgr->payloads[i].num_slots != req_payload.num_slots) {
1613
1614 /* need to push an update for this payload */
1615 if (req_payload.num_slots) {
1616 drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
1617 mgr->payloads[i].num_slots = req_payload.num_slots;
1618 } else if (mgr->payloads[i].num_slots) {
1619 mgr->payloads[i].num_slots = 0;
1620 drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
1621 req_payload.payload_state = mgr->payloads[i].payload_state;
1622 } else
1623 req_payload.payload_state = 0;
1624
1625 mgr->payloads[i].start_slot = req_payload.start_slot;
1626 mgr->payloads[i].payload_state = req_payload.payload_state;
1627 }
1628 cur_slots += req_payload.num_slots;
1629 }
1630 mutex_unlock(&mgr->payload_lock);
1631
1632 return 0;
1633}
1634EXPORT_SYMBOL(drm_dp_update_payload_part1);
1635
1636/**
1637 * drm_dp_update_payload_part2() - Execute payload update part 2
1638 * @mgr: manager to use.
1639 *
1640 * This iterates over all proposed virtual channels, and tries to
1641 * allocate space in the link for them. For 0->slots transitions,
1642 * this step writes the remote VC payload commands. For slots->0
1643 * this just resets some internal state.
1644 */
1645int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1646{
1647 struct drm_dp_mst_port *port;
1648 int i;
1649 int ret = 0;
1650 mutex_lock(&mgr->payload_lock);
1651 for (i = 0; i < mgr->max_payloads; i++) {
1652
1653 if (!mgr->proposed_vcpis[i])
1654 continue;
1655
1656 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1657
1658 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1659 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1660 ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
1661 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1662 ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
1663 }
1664 if (ret) {
1665 mutex_unlock(&mgr->payload_lock);
1666 return ret;
1667 }
1668 }
1669 mutex_unlock(&mgr->payload_lock);
1670 return 0;
1671}
1672EXPORT_SYMBOL(drm_dp_update_payload_part2);
1673
1674#if 0 /* unused as of yet */
1675static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1676 struct drm_dp_mst_port *port,
1677 int offset, int size)
1678{
1679 int len;
1680 struct drm_dp_sideband_msg_tx *txmsg;
1681
1682 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1683 if (!txmsg)
1684 return -ENOMEM;
1685
1686 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1687 txmsg->dst = port->parent;
1688
1689 drm_dp_queue_down_tx(mgr, txmsg);
1690
1691 return 0;
1692}
1693#endif
1694
1695static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1696 struct drm_dp_mst_port *port,
1697 int offset, int size, u8 *bytes)
1698{
1699 int len;
1700 int ret;
1701 struct drm_dp_sideband_msg_tx *txmsg;
1702 struct drm_dp_mst_branch *mstb;
1703
1704 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1705 if (!mstb)
1706 return -EINVAL;
1707
1708 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1709 if (!txmsg) {
1710 ret = -ENOMEM;
1711 goto fail_put;
1712 }
1713
1714 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1715 txmsg->dst = mstb;
1716
1717 drm_dp_queue_down_tx(mgr, txmsg);
1718
1719 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1720 if (ret > 0) {
1721 if (txmsg->reply.reply_type == 1) {
1722 ret = -EINVAL;
1723 } else
1724 ret = 0;
1725 }
1726 kfree(txmsg);
1727fail_put:
1728 drm_dp_put_mst_branch_device(mstb);
1729 return ret;
1730}
1731
1732static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1733{
1734 struct drm_dp_sideband_msg_reply_body reply;
1735
1736 reply.reply_type = 1;
1737 reply.req_type = req_type;
1738 drm_dp_encode_sideband_reply(&reply, msg);
1739 return 0;
1740}
1741
1742static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1743 struct drm_dp_mst_branch *mstb,
1744 int req_type, int seqno, bool broadcast)
1745{
1746 struct drm_dp_sideband_msg_tx *txmsg;
1747
1748 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1749 if (!txmsg)
1750 return -ENOMEM;
1751
1752 txmsg->dst = mstb;
1753 txmsg->seqno = seqno;
1754 drm_dp_encode_up_ack_reply(txmsg, req_type);
1755
1756 mutex_lock(&mgr->qlock);
1757 list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
1758 if (!mgr->tx_up_in_progress) {
1759 process_single_up_tx_qlock(mgr);
1760 }
1761 mutex_unlock(&mgr->qlock);
1762 return 0;
1763}
1764
1765static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
1766{
1767 switch (dp_link_bw) {
1768 case DP_LINK_BW_1_62:
1769 return 3 * dp_link_count;
1770 case DP_LINK_BW_2_7:
1771 return 5 * dp_link_count;
1772 case DP_LINK_BW_5_4:
1773 return 10 * dp_link_count;
1774 }
1775 return 0;
1776}
1777
1778/**
1779 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1780 * @mgr: manager to set state for
1781 * @mst_state: true to enable MST on this connector - false to disable.
1782 *
1783 * This is called by the driver when it detects an MST capable device plugged
1784 * into a DP MST capable port, or when a DP MST capable device is unplugged.
1785 */
1786int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
1787{
1788 int ret = 0;
1789 struct drm_dp_mst_branch *mstb = NULL;
1790
1791 mutex_lock(&mgr->lock);
1792 if (mst_state == mgr->mst_state)
1793 goto out_unlock;
1794
1795 mgr->mst_state = mst_state;
1796 /* set the device into MST mode */
1797 if (mst_state) {
1798 WARN_ON(mgr->mst_primary);
1799
1800 /* get dpcd info */
1801 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1802 if (ret != DP_RECEIVER_CAP_SIZE) {
1803 DRM_DEBUG_KMS("failed to read DPCD\n");
1804 goto out_unlock;
1805 }
1806
1807 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
1808 mgr->total_pbn = 2560;
1809 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
1810 mgr->avail_slots = mgr->total_slots;
1811
1812 /* add initial branch device at LCT 1 */
1813 mstb = drm_dp_add_mst_branch_device(1, NULL);
1814 if (mstb == NULL) {
1815 ret = -ENOMEM;
1816 goto out_unlock;
1817 }
1818 mstb->mgr = mgr;
1819
1820 /* give this the main reference */
1821 mgr->mst_primary = mstb;
1822 kref_get(&mgr->mst_primary->kref);
1823
1824 {
1825 struct drm_dp_payload reset_pay;
1826 reset_pay.start_slot = 0;
1827 reset_pay.num_slots = 0x3f;
1828 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
1829 }
1830
1831 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1832 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1833 if (ret < 0) {
1834 goto out_unlock;
1835 }
1836
1837
1838 /* sort out guid */
1839 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
1840 if (ret != 16) {
1841 DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
1842 goto out_unlock;
1843 }
1844
1845 mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
1846 if (!mgr->guid_valid) {
1847 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
1848 mgr->guid_valid = true;
1849 }
1850
1851 queue_work(system_long_wq, &mgr->work);
1852
1853 ret = 0;
1854 } else {
1855 /* disable MST on the device */
1856 mstb = mgr->mst_primary;
1857 mgr->mst_primary = NULL;
1858 /* this can fail if the device is gone */
1859 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
1860 ret = 0;
1861 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
1862 mgr->payload_mask = 0;
1863 set_bit(0, &mgr->payload_mask);
1864 }
1865
1866out_unlock:
1867 mutex_unlock(&mgr->lock);
1868 if (mstb)
1869 drm_dp_put_mst_branch_device(mstb);
1870 return ret;
1871
1872}
1873EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
1874
1875/**
1876 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
1877 * @mgr: manager to suspend
1878 *
1879 * This function tells the MST device that we can't handle UP messages
1880 * anymore. This should stop it from sending any since we are suspended.
1881 */
1882void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1883{
1884 mutex_lock(&mgr->lock);
1885 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1886 DP_MST_EN | DP_UPSTREAM_IS_SRC);
1887 mutex_unlock(&mgr->lock);
1888}
1889EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
1890
1891/**
1892 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
1893 * @mgr: manager to resume
1894 *
1895 * This will fetch DPCD and see if the device is still there,
1896 * if it is, it will rewrite the MSTM control bits, and return.
1897 *
1898 * if the device fails this returns -1, and the driver should do
1899 * a full MST reprobe, in case we were undocked.
1900 */
1901int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
1902{
1903 int ret = 0;
1904
1905 mutex_lock(&mgr->lock);
1906
1907 if (mgr->mst_primary) {
1908 int sret;
1909 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1910 if (sret != DP_RECEIVER_CAP_SIZE) {
1911 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1912 ret = -1;
1913 goto out_unlock;
1914 }
1915
1916 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1917 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1918 if (ret < 0) {
1919 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
1920 ret = -1;
1921 goto out_unlock;
1922 }
1923 ret = 0;
1924 } else
1925 ret = -1;
1926
1927out_unlock:
1928 mutex_unlock(&mgr->lock);
1929 return ret;
1930}
1931EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
1932
1933static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
1934{
1935 int len;
1936 u8 replyblock[32];
1937 int replylen, origlen, curreply;
1938 int ret;
1939 struct drm_dp_sideband_msg_rx *msg;
1940 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
1941 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
1942
1943 len = min(mgr->max_dpcd_transaction_bytes, 16);
1944 ret = drm_dp_dpcd_read(mgr->aux, basereg,
1945 replyblock, len);
1946 if (ret != len) {
1947 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
1948 return;
1949 }
1950 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
1951 if (!ret) {
1952 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
1953 return;
1954 }
1955 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
1956
1957 origlen = replylen;
1958 replylen -= len;
1959 curreply = len;
1960 while (replylen > 0) {
1961 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
1962 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
1963 replyblock, len);
1964 if (ret != len) {
1965 DRM_DEBUG_KMS("failed to read a chunk\n");
1966 }
1967 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
1968 if (ret == false)
1969 DRM_DEBUG_KMS("failed to build sideband msg\n");
1970 curreply += len;
1971 replylen -= len;
1972 }
1973}
1974
1975static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
1976{
1977 int ret = 0;
1978
1979 drm_dp_get_one_sb_msg(mgr, false);
1980
1981 if (mgr->down_rep_recv.have_eomt) {
1982 struct drm_dp_sideband_msg_tx *txmsg;
1983 struct drm_dp_mst_branch *mstb;
1984 int slot = -1;
1985 mstb = drm_dp_get_mst_branch_device(mgr,
1986 mgr->down_rep_recv.initial_hdr.lct,
1987 mgr->down_rep_recv.initial_hdr.rad);
1988
1989 if (!mstb) {
1990 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
1991 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
1992 return 0;
1993 }
1994
1995 /* find the message */
1996 slot = mgr->down_rep_recv.initial_hdr.seqno;
1997 mutex_lock(&mgr->qlock);
1998 txmsg = mstb->tx_slots[slot];
1999 /* remove from slots */
2000 mutex_unlock(&mgr->qlock);
2001
2002 if (!txmsg) {
2003 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2004 mstb,
2005 mgr->down_rep_recv.initial_hdr.seqno,
2006 mgr->down_rep_recv.initial_hdr.lct,
2007 mgr->down_rep_recv.initial_hdr.rad[0],
2008 mgr->down_rep_recv.msg[0]);
2009 drm_dp_put_mst_branch_device(mstb);
2010 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2011 return 0;
2012 }
2013
2014 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2015 if (txmsg->reply.reply_type == 1) {
2016 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2017 }
2018
2019 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2020 drm_dp_put_mst_branch_device(mstb);
2021
2022 mutex_lock(&mgr->qlock);
2023 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2024 mstb->tx_slots[slot] = NULL;
2025 mutex_unlock(&mgr->qlock);
2026
2027 wake_up(&mgr->tx_waitq);
2028 }
2029 return ret;
2030}
2031
2032static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2033{
2034 int ret = 0;
2035 drm_dp_get_one_sb_msg(mgr, true);
2036
2037 if (mgr->up_req_recv.have_eomt) {
2038 struct drm_dp_sideband_msg_req_body msg;
2039 struct drm_dp_mst_branch *mstb;
2040 bool seqno;
2041 mstb = drm_dp_get_mst_branch_device(mgr,
2042 mgr->up_req_recv.initial_hdr.lct,
2043 mgr->up_req_recv.initial_hdr.rad);
2044 if (!mstb) {
2045 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2046 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2047 return 0;
2048 }
2049
2050 seqno = mgr->up_req_recv.initial_hdr.seqno;
2051 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2052
2053 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2054 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2055 drm_dp_update_port(mstb, &msg.u.conn_stat);
2056 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2057 (*mgr->cbs->hotplug)(mgr);
2058
2059 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2060 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2061 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2062 }
2063
2064 drm_dp_put_mst_branch_device(mstb);
2065 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2066 }
2067 return ret;
2068}
2069
2070/**
2071 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2072 * @mgr: manager to notify irq for.
2073 * @esi: 4 bytes from SINK_COUNT_ESI
2074 *
2075 * This should be called from the driver when it detects a short IRQ,
2076 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2077 * topology manager will process the sideband messages received as a result
2078 * of this.
2079 */
2080int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2081{
2082 int ret = 0;
2083 int sc;
2084 *handled = false;
2085 sc = esi[0] & 0x3f;
2086
2087 if (sc != mgr->sink_count) {
2088 mgr->sink_count = sc;
2089 *handled = true;
2090 }
2091
2092 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2093 ret = drm_dp_mst_handle_down_rep(mgr);
2094 *handled = true;
2095 }
2096
2097 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2098 ret |= drm_dp_mst_handle_up_req(mgr);
2099 *handled = true;
2100 }
2101
2102 drm_dp_mst_kick_tx(mgr);
2103 return ret;
2104}
2105EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2106
2107/**
2108 * drm_dp_mst_detect_port() - get connection status for an MST port
2109 * @mgr: manager for this port
2110 * @port: unverified pointer to a port
2111 *
2112 * This returns the current connection state for a port. It validates the
2113 * port pointer still exists so the caller doesn't require a reference
2114 */
2115enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2116{
2117 enum drm_connector_status status = connector_status_disconnected;
2118
2119 /* we need to search for the port in the mgr in case its gone */
2120 port = drm_dp_get_validated_port_ref(mgr, port);
2121 if (!port)
2122 return connector_status_disconnected;
2123
2124 if (!port->ddps)
2125 goto out;
2126
2127 switch (port->pdt) {
2128 case DP_PEER_DEVICE_NONE:
2129 case DP_PEER_DEVICE_MST_BRANCHING:
2130 break;
2131
2132 case DP_PEER_DEVICE_SST_SINK:
2133 status = connector_status_connected;
2134 break;
2135 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2136 if (port->ldps)
2137 status = connector_status_connected;
2138 break;
2139 }
2140out:
2141 drm_dp_put_port(port);
2142 return status;
2143}
2144EXPORT_SYMBOL(drm_dp_mst_detect_port);
2145
2146/**
2147 * drm_dp_mst_get_edid() - get EDID for an MST port
2148 * @connector: toplevel connector to get EDID for
2149 * @mgr: manager for this port
2150 * @port: unverified pointer to a port.
2151 *
2152 * This returns an EDID for the port connected to a connector,
2153 * It validates the pointer still exists so the caller doesn't require a
2154 * reference.
2155 */
2156struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2157{
2158 struct edid *edid = NULL;
2159
2160 /* we need to search for the port in the mgr in case its gone */
2161 port = drm_dp_get_validated_port_ref(mgr, port);
2162 if (!port)
2163 return NULL;
2164
2165 edid = drm_get_edid(connector, &port->aux.ddc);
2166 drm_dp_put_port(port);
2167 return edid;
2168}
2169EXPORT_SYMBOL(drm_dp_mst_get_edid);
2170
2171/**
2172 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2173 * @mgr: manager to use
2174 * @pbn: payload bandwidth to convert into slots.
2175 */
2176int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2177 int pbn)
2178{
2179 int num_slots;
2180
2181 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2182
2183 if (num_slots > mgr->avail_slots)
2184 return -ENOSPC;
2185 return num_slots;
2186}
2187EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2188
2189static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2190 struct drm_dp_vcpi *vcpi, int pbn)
2191{
2192 int num_slots;
2193 int ret;
2194
2195 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2196
2197 if (num_slots > mgr->avail_slots)
2198 return -ENOSPC;
2199
2200 vcpi->pbn = pbn;
2201 vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2202 vcpi->num_slots = num_slots;
2203
2204 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2205 if (ret < 0)
2206 return ret;
2207 return 0;
2208}
2209
2210/**
2211 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2212 * @mgr: manager for this port
2213 * @port: port to allocate a virtual channel for.
2214 * @pbn: payload bandwidth number to request
2215 * @slots: returned number of slots for this PBN.
2216 */
2217bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2218{
2219 int ret;
2220
2221 port = drm_dp_get_validated_port_ref(mgr, port);
2222 if (!port)
2223 return false;
2224
2225 if (port->vcpi.vcpi > 0) {
2226 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2227 if (pbn == port->vcpi.pbn) {
2228 *slots = port->vcpi.num_slots;
2229 return true;
2230 }
2231 }
2232
2233 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2234 if (ret) {
2235 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2236 goto out;
2237 }
2238 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2239 *slots = port->vcpi.num_slots;
2240
2241 drm_dp_put_port(port);
2242 return true;
2243out:
2244 return false;
2245}
2246EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2247
2248/**
2249 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2250 * @mgr: manager for this port
2251 * @port: unverified pointer to a port.
2252 *
2253 * This just resets the number of slots for the ports VCPI for later programming.
2254 */
2255void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2256{
2257 port = drm_dp_get_validated_port_ref(mgr, port);
2258 if (!port)
2259 return;
2260 port->vcpi.num_slots = 0;
2261 drm_dp_put_port(port);
2262}
2263EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2264
2265/**
2266 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2267 * @mgr: manager for this port
2268 * @port: unverified port to deallocate vcpi for
2269 */
2270void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2271{
2272 port = drm_dp_get_validated_port_ref(mgr, port);
2273 if (!port)
2274 return;
2275
2276 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2277 port->vcpi.num_slots = 0;
2278 port->vcpi.pbn = 0;
2279 port->vcpi.aligned_pbn = 0;
2280 port->vcpi.vcpi = 0;
2281 drm_dp_put_port(port);
2282}
2283EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2284
2285static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2286 int id, struct drm_dp_payload *payload)
2287{
2288 u8 payload_alloc[3], status;
2289 int ret;
2290 int retries = 0;
2291
2292 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2293 DP_PAYLOAD_TABLE_UPDATED);
2294
2295 payload_alloc[0] = id;
2296 payload_alloc[1] = payload->start_slot;
2297 payload_alloc[2] = payload->num_slots;
2298
2299 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2300 if (ret != 3) {
2301 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2302 goto fail;
2303 }
2304
2305retry:
2306 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2307 if (ret < 0) {
2308 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2309 goto fail;
2310 }
2311
2312 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2313 retries++;
2314 if (retries < 20) {
2315 usleep_range(10000, 20000);
2316 goto retry;
2317 }
2318 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2319 ret = -EINVAL;
2320 goto fail;
2321 }
2322 ret = 0;
2323fail:
2324 return ret;
2325}
2326
2327
2328/**
2329 * drm_dp_check_act_status() - Check ACT handled status.
2330 * @mgr: manager to use
2331 *
2332 * Check the payload status bits in the DPCD for ACT handled completion.
2333 */
2334int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2335{
2336 u8 status;
2337 int ret;
2338 int count = 0;
2339
2340 do {
2341 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2342
2343 if (ret < 0) {
2344 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2345 goto fail;
2346 }
2347
2348 if (status & DP_PAYLOAD_ACT_HANDLED)
2349 break;
2350 count++;
2351 udelay(100);
2352
2353 } while (count < 30);
2354
2355 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2356 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2357 ret = -EINVAL;
2358 goto fail;
2359 }
2360 return 0;
2361fail:
2362 return ret;
2363}
2364EXPORT_SYMBOL(drm_dp_check_act_status);
2365
2366/**
2367 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2368 * @clock: dot clock for the mode
2369 * @bpp: bpp for the mode.
2370 *
2371 * This uses the formula in the spec to calculate the PBN value for a mode.
2372 */
2373int drm_dp_calc_pbn_mode(int clock, int bpp)
2374{
2375 fixed20_12 pix_bw;
2376 fixed20_12 fbpp;
2377 fixed20_12 result;
2378 fixed20_12 margin, tmp;
2379 u32 res;
2380
2381 pix_bw.full = dfixed_const(clock);
2382 fbpp.full = dfixed_const(bpp);
2383 tmp.full = dfixed_const(8);
2384 fbpp.full = dfixed_div(fbpp, tmp);
2385
2386 result.full = dfixed_mul(pix_bw, fbpp);
2387 margin.full = dfixed_const(54);
2388 tmp.full = dfixed_const(64);
2389 margin.full = dfixed_div(margin, tmp);
2390 result.full = dfixed_div(result, margin);
2391
2392 margin.full = dfixed_const(1006);
2393 tmp.full = dfixed_const(1000);
2394 margin.full = dfixed_div(margin, tmp);
2395 result.full = dfixed_mul(result, margin);
2396
2397 result.full = dfixed_div(result, tmp);
2398 result.full = dfixed_ceil(result);
2399 res = dfixed_trunc(result);
2400 return res;
2401}
2402EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2403
2404static int test_calc_pbn_mode(void)
2405{
2406 int ret;
2407 ret = drm_dp_calc_pbn_mode(154000, 30);
2408 if (ret != 689)
2409 return -EINVAL;
2410 ret = drm_dp_calc_pbn_mode(234000, 30);
2411 if (ret != 1047)
2412 return -EINVAL;
2413 return 0;
2414}
2415
2416/* we want to kick the TX after we've ack the up/down IRQs. */
2417static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2418{
2419 queue_work(system_long_wq, &mgr->tx_work);
2420}
2421
2422static void drm_dp_mst_dump_mstb(struct seq_file *m,
2423 struct drm_dp_mst_branch *mstb)
2424{
2425 struct drm_dp_mst_port *port;
2426 int tabs = mstb->lct;
2427 char prefix[10];
2428 int i;
2429
2430 for (i = 0; i < tabs; i++)
2431 prefix[i] = '\t';
2432 prefix[i] = '\0';
2433
2434 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2435 list_for_each_entry(port, &mstb->ports, next) {
2436 seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2437 if (port->mstb)
2438 drm_dp_mst_dump_mstb(m, port->mstb);
2439 }
2440}
2441
2442static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2443 char *buf)
2444{
2445 int ret;
2446 int i;
2447 for (i = 0; i < 4; i++) {
2448 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2449 if (ret != 16)
2450 break;
2451 }
2452 if (i == 4)
2453 return true;
2454 return false;
2455}
2456
2457/**
2458 * drm_dp_mst_dump_topology(): dump topology to seq file.
2459 * @m: seq_file to dump output to
2460 * @mgr: manager to dump current topology for.
2461 *
2462 * helper to dump MST topology to a seq file for debugfs.
2463 */
2464void drm_dp_mst_dump_topology(struct seq_file *m,
2465 struct drm_dp_mst_topology_mgr *mgr)
2466{
2467 int i;
2468 struct drm_dp_mst_port *port;
2469 mutex_lock(&mgr->lock);
2470 if (mgr->mst_primary)
2471 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2472
2473 /* dump VCPIs */
2474 mutex_unlock(&mgr->lock);
2475
2476 mutex_lock(&mgr->payload_lock);
2477 seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
2478
2479 for (i = 0; i < mgr->max_payloads; i++) {
2480 if (mgr->proposed_vcpis[i]) {
2481 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2482 seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
2483 } else
2484 seq_printf(m, "vcpi %d:unsed\n", i);
2485 }
2486 for (i = 0; i < mgr->max_payloads; i++) {
2487 seq_printf(m, "payload %d: %d, %d, %d\n",
2488 i,
2489 mgr->payloads[i].payload_state,
2490 mgr->payloads[i].start_slot,
2491 mgr->payloads[i].num_slots);
2492
2493
2494 }
2495 mutex_unlock(&mgr->payload_lock);
2496
2497 mutex_lock(&mgr->lock);
2498 if (mgr->mst_primary) {
2499 u8 buf[64];
2500 bool bret;
2501 int ret;
2502 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
2503 seq_printf(m, "dpcd: ");
2504 for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
2505 seq_printf(m, "%02x ", buf[i]);
2506 seq_printf(m, "\n");
2507 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
2508 seq_printf(m, "faux/mst: ");
2509 for (i = 0; i < 2; i++)
2510 seq_printf(m, "%02x ", buf[i]);
2511 seq_printf(m, "\n");
2512 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
2513 seq_printf(m, "mst ctrl: ");
2514 for (i = 0; i < 1; i++)
2515 seq_printf(m, "%02x ", buf[i]);
2516 seq_printf(m, "\n");
2517
2518 bret = dump_dp_payload_table(mgr, buf);
2519 if (bret == true) {
2520 seq_printf(m, "payload table: ");
2521 for (i = 0; i < 63; i++)
2522 seq_printf(m, "%02x ", buf[i]);
2523 seq_printf(m, "\n");
2524 }
2525
2526 }
2527
2528 mutex_unlock(&mgr->lock);
2529
2530}
2531EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2532
2533static void drm_dp_tx_work(struct work_struct *work)
2534{
2535 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2536
2537 mutex_lock(&mgr->qlock);
2538 if (mgr->tx_down_in_progress)
2539 process_single_down_tx_qlock(mgr);
2540 mutex_unlock(&mgr->qlock);
2541}
2542
2543/**
2544 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2545 * @mgr: manager struct to initialise
2546 * @dev: device providing this structure - for i2c addition.
2547 * @aux: DP helper aux channel to talk to this device
2548 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2549 * @max_payloads: maximum number of payloads this GPU can source
2550 * @conn_base_id: the connector object ID the MST device is connected to.
2551 *
2552 * Return 0 for success, or negative error code on failure
2553 */
2554int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2555 struct device *dev, struct drm_dp_aux *aux,
2556 int max_dpcd_transaction_bytes,
2557 int max_payloads, int conn_base_id)
2558{
2559 mutex_init(&mgr->lock);
2560 mutex_init(&mgr->qlock);
2561 mutex_init(&mgr->payload_lock);
2562 INIT_LIST_HEAD(&mgr->tx_msg_upq);
2563 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2564 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2565 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2566 init_waitqueue_head(&mgr->tx_waitq);
2567 mgr->dev = dev;
2568 mgr->aux = aux;
2569 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2570 mgr->max_payloads = max_payloads;
2571 mgr->conn_base_id = conn_base_id;
2572 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2573 if (!mgr->payloads)
2574 return -ENOMEM;
2575 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2576 if (!mgr->proposed_vcpis)
2577 return -ENOMEM;
2578 set_bit(0, &mgr->payload_mask);
2579 test_calc_pbn_mode();
2580 return 0;
2581}
2582EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2583
2584/**
2585 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2586 * @mgr: manager to destroy
2587 */
2588void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2589{
2590 mutex_lock(&mgr->payload_lock);
2591 kfree(mgr->payloads);
2592 mgr->payloads = NULL;
2593 kfree(mgr->proposed_vcpis);
2594 mgr->proposed_vcpis = NULL;
2595 mutex_unlock(&mgr->payload_lock);
2596 mgr->dev = NULL;
2597 mgr->aux = NULL;
2598}
2599EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2600
2601/* I2C device */
2602static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2603 int num)
2604{
2605 struct drm_dp_aux *aux = adapter->algo_data;
2606 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2607 struct drm_dp_mst_branch *mstb;
2608 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2609 unsigned int i;
2610 bool reading = false;
2611 struct drm_dp_sideband_msg_req_body msg;
2612 struct drm_dp_sideband_msg_tx *txmsg = NULL;
2613 int ret;
2614
2615 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2616 if (!mstb)
2617 return -EREMOTEIO;
2618
2619 /* construct i2c msg */
2620 /* see if last msg is a read */
2621 if (msgs[num - 1].flags & I2C_M_RD)
2622 reading = true;
2623
2624 if (!reading) {
2625 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2626 ret = -EIO;
2627 goto out;
2628 }
2629
2630 msg.req_type = DP_REMOTE_I2C_READ;
2631 msg.u.i2c_read.num_transactions = num - 1;
2632 msg.u.i2c_read.port_number = port->port_num;
2633 for (i = 0; i < num - 1; i++) {
2634 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2635 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2636 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2637 }
2638 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2639 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2640
2641 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2642 if (!txmsg) {
2643 ret = -ENOMEM;
2644 goto out;
2645 }
2646
2647 txmsg->dst = mstb;
2648 drm_dp_encode_sideband_req(&msg, txmsg);
2649
2650 drm_dp_queue_down_tx(mgr, txmsg);
2651
2652 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2653 if (ret > 0) {
2654
2655 if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2656 ret = -EREMOTEIO;
2657 goto out;
2658 }
2659 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2660 ret = -EIO;
2661 goto out;
2662 }
2663 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2664 ret = num;
2665 }
2666out:
2667 kfree(txmsg);
2668 drm_dp_put_mst_branch_device(mstb);
2669 return ret;
2670}
2671
2672static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2673{
2674 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2675 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2676 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2677 I2C_FUNC_10BIT_ADDR;
2678}
2679
2680static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2681 .functionality = drm_dp_mst_i2c_functionality,
2682 .master_xfer = drm_dp_mst_i2c_xfer,
2683};
2684
2685/**
2686 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2687 * @aux: DisplayPort AUX channel
2688 *
2689 * Returns 0 on success or a negative error code on failure.
2690 */
2691static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2692{
2693 aux->ddc.algo = &drm_dp_mst_i2c_algo;
2694 aux->ddc.algo_data = aux;
2695 aux->ddc.retries = 3;
2696
2697 aux->ddc.class = I2C_CLASS_DDC;
2698 aux->ddc.owner = THIS_MODULE;
2699 aux->ddc.dev.parent = aux->dev;
2700 aux->ddc.dev.of_node = aux->dev->of_node;
2701
2702 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
2703 sizeof(aux->ddc.name));
2704
2705 return i2c_add_adapter(&aux->ddc);
2706}
2707
2708/**
2709 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2710 * @aux: DisplayPort AUX channel
2711 */
2712static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2713{
2714 i2c_del_adapter(&aux->ddc);
2715}