Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[deliverable/linux.git] / drivers / gpu / drm / drm_dp_mst_topology.c
1 /*
2 * Copyright © 2014 Red Hat
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/seq_file.h>
29 #include <linux/i2c.h>
30 #include <drm/drm_dp_mst_helper.h>
31 #include <drm/drmP.h>
32
33 #include <drm/drm_fixed.h>
34
35 /**
36 * DOC: dp mst helper
37 *
38 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
39 * protocol. The helpers contain a topology manager and bandwidth manager.
40 * The helpers encapsulate the sending and received of sideband msgs.
41 */
42 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
43 char *buf);
44 static int test_calc_pbn_mode(void);
45
46 static void drm_dp_put_port(struct drm_dp_mst_port *port);
47
48 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
49 int id,
50 struct drm_dp_payload *payload);
51
52 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
53 struct drm_dp_mst_port *port,
54 int offset, int size, u8 *bytes);
55
56 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
57 struct drm_dp_mst_branch *mstb);
58 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
59 struct drm_dp_mst_branch *mstb,
60 struct drm_dp_mst_port *port);
61 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
62 u8 *guid);
63
64 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
65 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
66 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
67 /* sideband msg handling */
68 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
69 {
70 u8 bitmask = 0x80;
71 u8 bitshift = 7;
72 u8 array_index = 0;
73 int number_of_bits = num_nibbles * 4;
74 u8 remainder = 0;
75
76 while (number_of_bits != 0) {
77 number_of_bits--;
78 remainder <<= 1;
79 remainder |= (data[array_index] & bitmask) >> bitshift;
80 bitmask >>= 1;
81 bitshift--;
82 if (bitmask == 0) {
83 bitmask = 0x80;
84 bitshift = 7;
85 array_index++;
86 }
87 if ((remainder & 0x10) == 0x10)
88 remainder ^= 0x13;
89 }
90
91 number_of_bits = 4;
92 while (number_of_bits != 0) {
93 number_of_bits--;
94 remainder <<= 1;
95 if ((remainder & 0x10) != 0)
96 remainder ^= 0x13;
97 }
98
99 return remainder;
100 }
101
102 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
103 {
104 u8 bitmask = 0x80;
105 u8 bitshift = 7;
106 u8 array_index = 0;
107 int number_of_bits = number_of_bytes * 8;
108 u16 remainder = 0;
109
110 while (number_of_bits != 0) {
111 number_of_bits--;
112 remainder <<= 1;
113 remainder |= (data[array_index] & bitmask) >> bitshift;
114 bitmask >>= 1;
115 bitshift--;
116 if (bitmask == 0) {
117 bitmask = 0x80;
118 bitshift = 7;
119 array_index++;
120 }
121 if ((remainder & 0x100) == 0x100)
122 remainder ^= 0xd5;
123 }
124
125 number_of_bits = 8;
126 while (number_of_bits != 0) {
127 number_of_bits--;
128 remainder <<= 1;
129 if ((remainder & 0x100) != 0)
130 remainder ^= 0xd5;
131 }
132
133 return remainder & 0xff;
134 }
135 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
136 {
137 u8 size = 3;
138 size += (hdr->lct / 2);
139 return size;
140 }
141
142 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
143 u8 *buf, int *len)
144 {
145 int idx = 0;
146 int i;
147 u8 crc4;
148 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
149 for (i = 0; i < (hdr->lct / 2); i++)
150 buf[idx++] = hdr->rad[i];
151 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
152 (hdr->msg_len & 0x3f);
153 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
154
155 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
156 buf[idx - 1] |= (crc4 & 0xf);
157
158 *len = idx;
159 }
160
161 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
162 u8 *buf, int buflen, u8 *hdrlen)
163 {
164 u8 crc4;
165 u8 len;
166 int i;
167 u8 idx;
168 if (buf[0] == 0)
169 return false;
170 len = 3;
171 len += ((buf[0] & 0xf0) >> 4) / 2;
172 if (len > buflen)
173 return false;
174 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
175
176 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
177 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
178 return false;
179 }
180
181 hdr->lct = (buf[0] & 0xf0) >> 4;
182 hdr->lcr = (buf[0] & 0xf);
183 idx = 1;
184 for (i = 0; i < (hdr->lct / 2); i++)
185 hdr->rad[i] = buf[idx++];
186 hdr->broadcast = (buf[idx] >> 7) & 0x1;
187 hdr->path_msg = (buf[idx] >> 6) & 0x1;
188 hdr->msg_len = buf[idx] & 0x3f;
189 idx++;
190 hdr->somt = (buf[idx] >> 7) & 0x1;
191 hdr->eomt = (buf[idx] >> 6) & 0x1;
192 hdr->seqno = (buf[idx] >> 4) & 0x1;
193 idx++;
194 *hdrlen = idx;
195 return true;
196 }
197
198 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
199 struct drm_dp_sideband_msg_tx *raw)
200 {
201 int idx = 0;
202 int i;
203 u8 *buf = raw->msg;
204 buf[idx++] = req->req_type & 0x7f;
205
206 switch (req->req_type) {
207 case DP_ENUM_PATH_RESOURCES:
208 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
209 idx++;
210 break;
211 case DP_ALLOCATE_PAYLOAD:
212 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
213 (req->u.allocate_payload.number_sdp_streams & 0xf);
214 idx++;
215 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
216 idx++;
217 buf[idx] = (req->u.allocate_payload.pbn >> 8);
218 idx++;
219 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
220 idx++;
221 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
222 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
223 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
224 idx++;
225 }
226 if (req->u.allocate_payload.number_sdp_streams & 1) {
227 i = req->u.allocate_payload.number_sdp_streams - 1;
228 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
229 idx++;
230 }
231 break;
232 case DP_QUERY_PAYLOAD:
233 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
234 idx++;
235 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
236 idx++;
237 break;
238 case DP_REMOTE_DPCD_READ:
239 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
240 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
241 idx++;
242 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
243 idx++;
244 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
245 idx++;
246 buf[idx] = (req->u.dpcd_read.num_bytes);
247 idx++;
248 break;
249
250 case DP_REMOTE_DPCD_WRITE:
251 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
252 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
253 idx++;
254 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
255 idx++;
256 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
257 idx++;
258 buf[idx] = (req->u.dpcd_write.num_bytes);
259 idx++;
260 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
261 idx += req->u.dpcd_write.num_bytes;
262 break;
263 case DP_REMOTE_I2C_READ:
264 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
265 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
266 idx++;
267 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
268 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
269 idx++;
270 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
271 idx++;
272 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
273 idx += req->u.i2c_read.transactions[i].num_bytes;
274
275 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
276 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
277 idx++;
278 }
279 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
280 idx++;
281 buf[idx] = (req->u.i2c_read.num_bytes_read);
282 idx++;
283 break;
284
285 case DP_REMOTE_I2C_WRITE:
286 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
287 idx++;
288 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
289 idx++;
290 buf[idx] = (req->u.i2c_write.num_bytes);
291 idx++;
292 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
293 idx += req->u.i2c_write.num_bytes;
294 break;
295 }
296 raw->cur_len = idx;
297 }
298
299 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
300 {
301 u8 crc4;
302 crc4 = drm_dp_msg_data_crc4(msg, len);
303 msg[len] = crc4;
304 }
305
306 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
307 struct drm_dp_sideband_msg_tx *raw)
308 {
309 int idx = 0;
310 u8 *buf = raw->msg;
311
312 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
313
314 raw->cur_len = idx;
315 }
316
317 /* this adds a chunk of msg to the builder to get the final msg */
318 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
319 u8 *replybuf, u8 replybuflen, bool hdr)
320 {
321 int ret;
322 u8 crc4;
323
324 if (hdr) {
325 u8 hdrlen;
326 struct drm_dp_sideband_msg_hdr recv_hdr;
327 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
328 if (ret == false) {
329 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
330 return false;
331 }
332
333 /* get length contained in this portion */
334 msg->curchunk_len = recv_hdr.msg_len;
335 msg->curchunk_hdrlen = hdrlen;
336
337 /* we have already gotten an somt - don't bother parsing */
338 if (recv_hdr.somt && msg->have_somt)
339 return false;
340
341 if (recv_hdr.somt) {
342 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
343 msg->have_somt = true;
344 }
345 if (recv_hdr.eomt)
346 msg->have_eomt = true;
347
348 /* copy the bytes for the remainder of this header chunk */
349 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
350 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
351 } else {
352 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
353 msg->curchunk_idx += replybuflen;
354 }
355
356 if (msg->curchunk_idx >= msg->curchunk_len) {
357 /* do CRC */
358 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
359 /* copy chunk into bigger msg */
360 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
361 msg->curlen += msg->curchunk_len - 1;
362 }
363 return true;
364 }
365
366 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
367 struct drm_dp_sideband_msg_reply_body *repmsg)
368 {
369 int idx = 1;
370 int i;
371 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
372 idx += 16;
373 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
374 idx++;
375 if (idx > raw->curlen)
376 goto fail_len;
377 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
378 if (raw->msg[idx] & 0x80)
379 repmsg->u.link_addr.ports[i].input_port = 1;
380
381 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
382 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
383
384 idx++;
385 if (idx > raw->curlen)
386 goto fail_len;
387 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
388 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
389 if (repmsg->u.link_addr.ports[i].input_port == 0)
390 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
391 idx++;
392 if (idx > raw->curlen)
393 goto fail_len;
394 if (repmsg->u.link_addr.ports[i].input_port == 0) {
395 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
396 idx++;
397 if (idx > raw->curlen)
398 goto fail_len;
399 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
400 idx += 16;
401 if (idx > raw->curlen)
402 goto fail_len;
403 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
404 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
405 idx++;
406
407 }
408 if (idx > raw->curlen)
409 goto fail_len;
410 }
411
412 return true;
413 fail_len:
414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
415 return false;
416 }
417
418 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
419 struct drm_dp_sideband_msg_reply_body *repmsg)
420 {
421 int idx = 1;
422 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
423 idx++;
424 if (idx > raw->curlen)
425 goto fail_len;
426 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
427 if (idx > raw->curlen)
428 goto fail_len;
429
430 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
431 return true;
432 fail_len:
433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
434 return false;
435 }
436
437 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
438 struct drm_dp_sideband_msg_reply_body *repmsg)
439 {
440 int idx = 1;
441 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
442 idx++;
443 if (idx > raw->curlen)
444 goto fail_len;
445 return true;
446 fail_len:
447 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
448 return false;
449 }
450
451 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
452 struct drm_dp_sideband_msg_reply_body *repmsg)
453 {
454 int idx = 1;
455
456 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
457 idx++;
458 if (idx > raw->curlen)
459 goto fail_len;
460 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
461 idx++;
462 /* TODO check */
463 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
464 return true;
465 fail_len:
466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
467 return false;
468 }
469
470 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
471 struct drm_dp_sideband_msg_reply_body *repmsg)
472 {
473 int idx = 1;
474 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
475 idx++;
476 if (idx > raw->curlen)
477 goto fail_len;
478 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
479 idx += 2;
480 if (idx > raw->curlen)
481 goto fail_len;
482 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
483 idx += 2;
484 if (idx > raw->curlen)
485 goto fail_len;
486 return true;
487 fail_len:
488 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
489 return false;
490 }
491
492 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
493 struct drm_dp_sideband_msg_reply_body *repmsg)
494 {
495 int idx = 1;
496 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
497 idx++;
498 if (idx > raw->curlen)
499 goto fail_len;
500 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
501 idx++;
502 if (idx > raw->curlen)
503 goto fail_len;
504 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
505 idx += 2;
506 if (idx > raw->curlen)
507 goto fail_len;
508 return true;
509 fail_len:
510 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
511 return false;
512 }
513
514 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
515 struct drm_dp_sideband_msg_reply_body *repmsg)
516 {
517 int idx = 1;
518 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
519 idx++;
520 if (idx > raw->curlen)
521 goto fail_len;
522 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
523 idx += 2;
524 if (idx > raw->curlen)
525 goto fail_len;
526 return true;
527 fail_len:
528 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
529 return false;
530 }
531
532 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
533 struct drm_dp_sideband_msg_reply_body *msg)
534 {
535 memset(msg, 0, sizeof(*msg));
536 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
537 msg->req_type = (raw->msg[0] & 0x7f);
538
539 if (msg->reply_type) {
540 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
541 msg->u.nak.reason = raw->msg[17];
542 msg->u.nak.nak_data = raw->msg[18];
543 return false;
544 }
545
546 switch (msg->req_type) {
547 case DP_LINK_ADDRESS:
548 return drm_dp_sideband_parse_link_address(raw, msg);
549 case DP_QUERY_PAYLOAD:
550 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
551 case DP_REMOTE_DPCD_READ:
552 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
553 case DP_REMOTE_DPCD_WRITE:
554 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
555 case DP_REMOTE_I2C_READ:
556 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
557 case DP_ENUM_PATH_RESOURCES:
558 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
559 case DP_ALLOCATE_PAYLOAD:
560 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
561 default:
562 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
563 return false;
564 }
565 }
566
567 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
568 struct drm_dp_sideband_msg_req_body *msg)
569 {
570 int idx = 1;
571
572 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
573 idx++;
574 if (idx > raw->curlen)
575 goto fail_len;
576
577 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
578 idx += 16;
579 if (idx > raw->curlen)
580 goto fail_len;
581
582 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
583 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
584 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
585 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
586 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
587 idx++;
588 return true;
589 fail_len:
590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
591 return false;
592 }
593
594 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
595 struct drm_dp_sideband_msg_req_body *msg)
596 {
597 int idx = 1;
598
599 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
600 idx++;
601 if (idx > raw->curlen)
602 goto fail_len;
603
604 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
605 idx += 16;
606 if (idx > raw->curlen)
607 goto fail_len;
608
609 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
610 idx++;
611 return true;
612 fail_len:
613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
614 return false;
615 }
616
617 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
618 struct drm_dp_sideband_msg_req_body *msg)
619 {
620 memset(msg, 0, sizeof(*msg));
621 msg->req_type = (raw->msg[0] & 0x7f);
622
623 switch (msg->req_type) {
624 case DP_CONNECTION_STATUS_NOTIFY:
625 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
626 case DP_RESOURCE_STATUS_NOTIFY:
627 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
628 default:
629 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
630 return false;
631 }
632 }
633
634 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
635 {
636 struct drm_dp_sideband_msg_req_body req;
637
638 req.req_type = DP_REMOTE_DPCD_WRITE;
639 req.u.dpcd_write.port_number = port_num;
640 req.u.dpcd_write.dpcd_address = offset;
641 req.u.dpcd_write.num_bytes = num_bytes;
642 req.u.dpcd_write.bytes = bytes;
643 drm_dp_encode_sideband_req(&req, msg);
644
645 return 0;
646 }
647
648 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
649 {
650 struct drm_dp_sideband_msg_req_body req;
651
652 req.req_type = DP_LINK_ADDRESS;
653 drm_dp_encode_sideband_req(&req, msg);
654 return 0;
655 }
656
657 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
658 {
659 struct drm_dp_sideband_msg_req_body req;
660
661 req.req_type = DP_ENUM_PATH_RESOURCES;
662 req.u.port_num.port_number = port_num;
663 drm_dp_encode_sideband_req(&req, msg);
664 msg->path_msg = true;
665 return 0;
666 }
667
668 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
669 u8 vcpi, uint16_t pbn,
670 u8 number_sdp_streams,
671 u8 *sdp_stream_sink)
672 {
673 struct drm_dp_sideband_msg_req_body req;
674 memset(&req, 0, sizeof(req));
675 req.req_type = DP_ALLOCATE_PAYLOAD;
676 req.u.allocate_payload.port_number = port_num;
677 req.u.allocate_payload.vcpi = vcpi;
678 req.u.allocate_payload.pbn = pbn;
679 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
680 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
681 number_sdp_streams);
682 drm_dp_encode_sideband_req(&req, msg);
683 msg->path_msg = true;
684 return 0;
685 }
686
687 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
688 struct drm_dp_vcpi *vcpi)
689 {
690 int ret, vcpi_ret;
691
692 mutex_lock(&mgr->payload_lock);
693 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
694 if (ret > mgr->max_payloads) {
695 ret = -EINVAL;
696 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
697 goto out_unlock;
698 }
699
700 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
701 if (vcpi_ret > mgr->max_payloads) {
702 ret = -EINVAL;
703 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
704 goto out_unlock;
705 }
706
707 set_bit(ret, &mgr->payload_mask);
708 set_bit(vcpi_ret, &mgr->vcpi_mask);
709 vcpi->vcpi = vcpi_ret + 1;
710 mgr->proposed_vcpis[ret - 1] = vcpi;
711 out_unlock:
712 mutex_unlock(&mgr->payload_lock);
713 return ret;
714 }
715
716 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
717 int vcpi)
718 {
719 int i;
720 if (vcpi == 0)
721 return;
722
723 mutex_lock(&mgr->payload_lock);
724 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
725 clear_bit(vcpi - 1, &mgr->vcpi_mask);
726
727 for (i = 0; i < mgr->max_payloads; i++) {
728 if (mgr->proposed_vcpis[i])
729 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
730 mgr->proposed_vcpis[i] = NULL;
731 clear_bit(i + 1, &mgr->payload_mask);
732 }
733 }
734 mutex_unlock(&mgr->payload_lock);
735 }
736
737 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
738 struct drm_dp_sideband_msg_tx *txmsg)
739 {
740 bool ret;
741
742 /*
743 * All updates to txmsg->state are protected by mgr->qlock, and the two
744 * cases we check here are terminal states. For those the barriers
745 * provided by the wake_up/wait_event pair are enough.
746 */
747 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
748 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
749 return ret;
750 }
751
752 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
753 struct drm_dp_sideband_msg_tx *txmsg)
754 {
755 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
756 int ret;
757
758 ret = wait_event_timeout(mgr->tx_waitq,
759 check_txmsg_state(mgr, txmsg),
760 (4 * HZ));
761 mutex_lock(&mstb->mgr->qlock);
762 if (ret > 0) {
763 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
764 ret = -EIO;
765 goto out;
766 }
767 } else {
768 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
769
770 /* dump some state */
771 ret = -EIO;
772
773 /* remove from q */
774 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
775 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
776 list_del(&txmsg->next);
777 }
778
779 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
780 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
781 mstb->tx_slots[txmsg->seqno] = NULL;
782 }
783 }
784 out:
785 mutex_unlock(&mgr->qlock);
786
787 return ret;
788 }
789
790 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
791 {
792 struct drm_dp_mst_branch *mstb;
793
794 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
795 if (!mstb)
796 return NULL;
797
798 mstb->lct = lct;
799 if (lct > 1)
800 memcpy(mstb->rad, rad, lct / 2);
801 INIT_LIST_HEAD(&mstb->ports);
802 kref_init(&mstb->kref);
803 return mstb;
804 }
805
806 static void drm_dp_free_mst_port(struct kref *kref);
807
808 static void drm_dp_free_mst_branch_device(struct kref *kref)
809 {
810 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
811 if (mstb->port_parent) {
812 if (list_empty(&mstb->port_parent->next))
813 kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
814 }
815 kfree(mstb);
816 }
817
818 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
819 {
820 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
821 struct drm_dp_mst_port *port, *tmp;
822 bool wake_tx = false;
823
824 /*
825 * init kref again to be used by ports to remove mst branch when it is
826 * not needed anymore
827 */
828 kref_init(kref);
829
830 if (mstb->port_parent && list_empty(&mstb->port_parent->next))
831 kref_get(&mstb->port_parent->kref);
832
833 /*
834 * destroy all ports - don't need lock
835 * as there are no more references to the mst branch
836 * device at this point.
837 */
838 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
839 list_del(&port->next);
840 drm_dp_put_port(port);
841 }
842
843 /* drop any tx slots msg */
844 mutex_lock(&mstb->mgr->qlock);
845 if (mstb->tx_slots[0]) {
846 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
847 mstb->tx_slots[0] = NULL;
848 wake_tx = true;
849 }
850 if (mstb->tx_slots[1]) {
851 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
852 mstb->tx_slots[1] = NULL;
853 wake_tx = true;
854 }
855 mutex_unlock(&mstb->mgr->qlock);
856
857 if (wake_tx)
858 wake_up(&mstb->mgr->tx_waitq);
859
860 kref_put(kref, drm_dp_free_mst_branch_device);
861 }
862
863 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
864 {
865 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
866 }
867
868
869 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
870 {
871 struct drm_dp_mst_branch *mstb;
872
873 switch (old_pdt) {
874 case DP_PEER_DEVICE_DP_LEGACY_CONV:
875 case DP_PEER_DEVICE_SST_SINK:
876 /* remove i2c over sideband */
877 drm_dp_mst_unregister_i2c_bus(&port->aux);
878 break;
879 case DP_PEER_DEVICE_MST_BRANCHING:
880 mstb = port->mstb;
881 port->mstb = NULL;
882 drm_dp_put_mst_branch_device(mstb);
883 break;
884 }
885 }
886
887 static void drm_dp_destroy_port(struct kref *kref)
888 {
889 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
890 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
891
892 if (!port->input) {
893 port->vcpi.num_slots = 0;
894
895 kfree(port->cached_edid);
896
897 /*
898 * The only time we don't have a connector
899 * on an output port is if the connector init
900 * fails.
901 */
902 if (port->connector) {
903 /* we can't destroy the connector here, as
904 * we might be holding the mode_config.mutex
905 * from an EDID retrieval */
906
907 mutex_lock(&mgr->destroy_connector_lock);
908 kref_get(&port->parent->kref);
909 list_add(&port->next, &mgr->destroy_connector_list);
910 mutex_unlock(&mgr->destroy_connector_lock);
911 schedule_work(&mgr->destroy_connector_work);
912 return;
913 }
914 /* no need to clean up vcpi
915 * as if we have no connector we never setup a vcpi */
916 drm_dp_port_teardown_pdt(port, port->pdt);
917 }
918 kfree(port);
919 }
920
921 static void drm_dp_put_port(struct drm_dp_mst_port *port)
922 {
923 kref_put(&port->kref, drm_dp_destroy_port);
924 }
925
926 static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
927 {
928 struct drm_dp_mst_port *port;
929 struct drm_dp_mst_branch *rmstb;
930 if (to_find == mstb) {
931 kref_get(&mstb->kref);
932 return mstb;
933 }
934 list_for_each_entry(port, &mstb->ports, next) {
935 if (port->mstb) {
936 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
937 if (rmstb)
938 return rmstb;
939 }
940 }
941 return NULL;
942 }
943
944 static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
945 {
946 struct drm_dp_mst_branch *rmstb = NULL;
947 mutex_lock(&mgr->lock);
948 if (mgr->mst_primary)
949 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
950 mutex_unlock(&mgr->lock);
951 return rmstb;
952 }
953
954 static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
955 {
956 struct drm_dp_mst_port *port, *mport;
957
958 list_for_each_entry(port, &mstb->ports, next) {
959 if (port == to_find) {
960 kref_get(&port->kref);
961 return port;
962 }
963 if (port->mstb) {
964 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
965 if (mport)
966 return mport;
967 }
968 }
969 return NULL;
970 }
971
972 static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
973 {
974 struct drm_dp_mst_port *rport = NULL;
975 mutex_lock(&mgr->lock);
976 if (mgr->mst_primary)
977 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
978 mutex_unlock(&mgr->lock);
979 return rport;
980 }
981
982 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
983 {
984 struct drm_dp_mst_port *port;
985
986 list_for_each_entry(port, &mstb->ports, next) {
987 if (port->port_num == port_num) {
988 kref_get(&port->kref);
989 return port;
990 }
991 }
992
993 return NULL;
994 }
995
996 /*
997 * calculate a new RAD for this MST branch device
998 * if parent has an LCT of 2 then it has 1 nibble of RAD,
999 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1000 */
1001 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1002 u8 *rad)
1003 {
1004 int parent_lct = port->parent->lct;
1005 int shift = 4;
1006 int idx = (parent_lct - 1) / 2;
1007 if (parent_lct > 1) {
1008 memcpy(rad, port->parent->rad, idx + 1);
1009 shift = (parent_lct % 2) ? 4 : 0;
1010 } else
1011 rad[0] = 0;
1012
1013 rad[idx] |= port->port_num << shift;
1014 return parent_lct + 1;
1015 }
1016
1017 /*
1018 * return sends link address for new mstb
1019 */
1020 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1021 {
1022 int ret;
1023 u8 rad[6], lct;
1024 bool send_link = false;
1025 switch (port->pdt) {
1026 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1027 case DP_PEER_DEVICE_SST_SINK:
1028 /* add i2c over sideband */
1029 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1030 break;
1031 case DP_PEER_DEVICE_MST_BRANCHING:
1032 lct = drm_dp_calculate_rad(port, rad);
1033
1034 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1035 port->mstb->mgr = port->mgr;
1036 port->mstb->port_parent = port;
1037
1038 send_link = true;
1039 break;
1040 }
1041 return send_link;
1042 }
1043
1044 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1045 {
1046 int ret;
1047
1048 memcpy(mstb->guid, guid, 16);
1049
1050 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1051 if (mstb->port_parent) {
1052 ret = drm_dp_send_dpcd_write(
1053 mstb->mgr,
1054 mstb->port_parent,
1055 DP_GUID,
1056 16,
1057 mstb->guid);
1058 } else {
1059
1060 ret = drm_dp_dpcd_write(
1061 mstb->mgr->aux,
1062 DP_GUID,
1063 mstb->guid,
1064 16);
1065 }
1066 }
1067 }
1068
1069 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1070 int pnum,
1071 char *proppath,
1072 size_t proppath_size)
1073 {
1074 int i;
1075 char temp[8];
1076 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1077 for (i = 0; i < (mstb->lct - 1); i++) {
1078 int shift = (i % 2) ? 0 : 4;
1079 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1080 snprintf(temp, sizeof(temp), "-%d", port_num);
1081 strlcat(proppath, temp, proppath_size);
1082 }
1083 snprintf(temp, sizeof(temp), "-%d", pnum);
1084 strlcat(proppath, temp, proppath_size);
1085 }
1086
1087 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1088 struct device *dev,
1089 struct drm_dp_link_addr_reply_port *port_msg)
1090 {
1091 struct drm_dp_mst_port *port;
1092 bool ret;
1093 bool created = false;
1094 int old_pdt = 0;
1095 int old_ddps = 0;
1096 port = drm_dp_get_port(mstb, port_msg->port_number);
1097 if (!port) {
1098 port = kzalloc(sizeof(*port), GFP_KERNEL);
1099 if (!port)
1100 return;
1101 kref_init(&port->kref);
1102 port->parent = mstb;
1103 port->port_num = port_msg->port_number;
1104 port->mgr = mstb->mgr;
1105 port->aux.name = "DPMST";
1106 port->aux.dev = dev;
1107 created = true;
1108 } else {
1109 old_pdt = port->pdt;
1110 old_ddps = port->ddps;
1111 }
1112
1113 port->pdt = port_msg->peer_device_type;
1114 port->input = port_msg->input_port;
1115 port->mcs = port_msg->mcs;
1116 port->ddps = port_msg->ddps;
1117 port->ldps = port_msg->legacy_device_plug_status;
1118 port->dpcd_rev = port_msg->dpcd_revision;
1119 port->num_sdp_streams = port_msg->num_sdp_streams;
1120 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1121
1122 /* manage mstb port lists with mgr lock - take a reference
1123 for this list */
1124 if (created) {
1125 mutex_lock(&mstb->mgr->lock);
1126 kref_get(&port->kref);
1127 list_add(&port->next, &mstb->ports);
1128 mutex_unlock(&mstb->mgr->lock);
1129 }
1130
1131 if (old_ddps != port->ddps) {
1132 if (port->ddps) {
1133 if (!port->input)
1134 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1135 } else {
1136 port->available_pbn = 0;
1137 }
1138 }
1139
1140 if (old_pdt != port->pdt && !port->input) {
1141 drm_dp_port_teardown_pdt(port, old_pdt);
1142
1143 ret = drm_dp_port_setup_pdt(port);
1144 if (ret == true)
1145 drm_dp_send_link_address(mstb->mgr, port->mstb);
1146 }
1147
1148 if (created && !port->input) {
1149 char proppath[255];
1150
1151 build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
1152 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1153 if (!port->connector) {
1154 /* remove it from the port list */
1155 mutex_lock(&mstb->mgr->lock);
1156 list_del(&port->next);
1157 mutex_unlock(&mstb->mgr->lock);
1158 /* drop port list reference */
1159 drm_dp_put_port(port);
1160 goto out;
1161 }
1162 if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1163 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1164 drm_mode_connector_set_tile_property(port->connector);
1165 }
1166 (*mstb->mgr->cbs->register_connector)(port->connector);
1167 }
1168
1169 out:
1170 /* put reference to this port */
1171 drm_dp_put_port(port);
1172 }
1173
1174 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1175 struct drm_dp_connection_status_notify *conn_stat)
1176 {
1177 struct drm_dp_mst_port *port;
1178 int old_pdt;
1179 int old_ddps;
1180 bool dowork = false;
1181 port = drm_dp_get_port(mstb, conn_stat->port_number);
1182 if (!port)
1183 return;
1184
1185 old_ddps = port->ddps;
1186 old_pdt = port->pdt;
1187 port->pdt = conn_stat->peer_device_type;
1188 port->mcs = conn_stat->message_capability_status;
1189 port->ldps = conn_stat->legacy_device_plug_status;
1190 port->ddps = conn_stat->displayport_device_plug_status;
1191
1192 if (old_ddps != port->ddps) {
1193 if (port->ddps) {
1194 dowork = true;
1195 } else {
1196 port->available_pbn = 0;
1197 }
1198 }
1199 if (old_pdt != port->pdt && !port->input) {
1200 drm_dp_port_teardown_pdt(port, old_pdt);
1201
1202 if (drm_dp_port_setup_pdt(port))
1203 dowork = true;
1204 }
1205
1206 drm_dp_put_port(port);
1207 if (dowork)
1208 queue_work(system_long_wq, &mstb->mgr->work);
1209
1210 }
1211
1212 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1213 u8 lct, u8 *rad)
1214 {
1215 struct drm_dp_mst_branch *mstb;
1216 struct drm_dp_mst_port *port;
1217 int i;
1218 /* find the port by iterating down */
1219
1220 mutex_lock(&mgr->lock);
1221 mstb = mgr->mst_primary;
1222
1223 for (i = 0; i < lct - 1; i++) {
1224 int shift = (i % 2) ? 0 : 4;
1225 int port_num = (rad[i / 2] >> shift) & 0xf;
1226
1227 list_for_each_entry(port, &mstb->ports, next) {
1228 if (port->port_num == port_num) {
1229 mstb = port->mstb;
1230 if (!mstb) {
1231 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1232 goto out;
1233 }
1234
1235 break;
1236 }
1237 }
1238 }
1239 kref_get(&mstb->kref);
1240 out:
1241 mutex_unlock(&mgr->lock);
1242 return mstb;
1243 }
1244
1245 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1246 struct drm_dp_mst_branch *mstb,
1247 uint8_t *guid)
1248 {
1249 struct drm_dp_mst_branch *found_mstb;
1250 struct drm_dp_mst_port *port;
1251
1252 if (memcmp(mstb->guid, guid, 16) == 0)
1253 return mstb;
1254
1255
1256 list_for_each_entry(port, &mstb->ports, next) {
1257 if (!port->mstb)
1258 continue;
1259
1260 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1261
1262 if (found_mstb)
1263 return found_mstb;
1264 }
1265
1266 return NULL;
1267 }
1268
1269 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
1270 struct drm_dp_mst_topology_mgr *mgr,
1271 uint8_t *guid)
1272 {
1273 struct drm_dp_mst_branch *mstb;
1274
1275 /* find the port by iterating down */
1276 mutex_lock(&mgr->lock);
1277
1278 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1279
1280 if (mstb)
1281 kref_get(&mstb->kref);
1282
1283 mutex_unlock(&mgr->lock);
1284 return mstb;
1285 }
1286
1287 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1288 struct drm_dp_mst_branch *mstb)
1289 {
1290 struct drm_dp_mst_port *port;
1291 struct drm_dp_mst_branch *mstb_child;
1292 if (!mstb->link_address_sent)
1293 drm_dp_send_link_address(mgr, mstb);
1294
1295 list_for_each_entry(port, &mstb->ports, next) {
1296 if (port->input)
1297 continue;
1298
1299 if (!port->ddps)
1300 continue;
1301
1302 if (!port->available_pbn)
1303 drm_dp_send_enum_path_resources(mgr, mstb, port);
1304
1305 if (port->mstb) {
1306 mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
1307 if (mstb_child) {
1308 drm_dp_check_and_send_link_address(mgr, mstb_child);
1309 drm_dp_put_mst_branch_device(mstb_child);
1310 }
1311 }
1312 }
1313 }
1314
1315 static void drm_dp_mst_link_probe_work(struct work_struct *work)
1316 {
1317 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1318 struct drm_dp_mst_branch *mstb;
1319
1320 mutex_lock(&mgr->lock);
1321 mstb = mgr->mst_primary;
1322 if (mstb) {
1323 kref_get(&mstb->kref);
1324 }
1325 mutex_unlock(&mgr->lock);
1326 if (mstb) {
1327 drm_dp_check_and_send_link_address(mgr, mstb);
1328 drm_dp_put_mst_branch_device(mstb);
1329 }
1330 }
1331
1332 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1333 u8 *guid)
1334 {
1335 static u8 zero_guid[16];
1336
1337 if (!memcmp(guid, zero_guid, 16)) {
1338 u64 salt = get_jiffies_64();
1339 memcpy(&guid[0], &salt, sizeof(u64));
1340 memcpy(&guid[8], &salt, sizeof(u64));
1341 return false;
1342 }
1343 return true;
1344 }
1345
1346 #if 0
1347 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1348 {
1349 struct drm_dp_sideband_msg_req_body req;
1350
1351 req.req_type = DP_REMOTE_DPCD_READ;
1352 req.u.dpcd_read.port_number = port_num;
1353 req.u.dpcd_read.dpcd_address = offset;
1354 req.u.dpcd_read.num_bytes = num_bytes;
1355 drm_dp_encode_sideband_req(&req, msg);
1356
1357 return 0;
1358 }
1359 #endif
1360
1361 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1362 bool up, u8 *msg, int len)
1363 {
1364 int ret;
1365 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1366 int tosend, total, offset;
1367 int retries = 0;
1368
1369 retry:
1370 total = len;
1371 offset = 0;
1372 do {
1373 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1374
1375 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1376 &msg[offset],
1377 tosend);
1378 if (ret != tosend) {
1379 if (ret == -EIO && retries < 5) {
1380 retries++;
1381 goto retry;
1382 }
1383 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1384
1385 return -EIO;
1386 }
1387 offset += tosend;
1388 total -= tosend;
1389 } while (total > 0);
1390 return 0;
1391 }
1392
1393 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1394 struct drm_dp_sideband_msg_tx *txmsg)
1395 {
1396 struct drm_dp_mst_branch *mstb = txmsg->dst;
1397 u8 req_type;
1398
1399 /* both msg slots are full */
1400 if (txmsg->seqno == -1) {
1401 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1402 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1403 return -EAGAIN;
1404 }
1405 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1406 txmsg->seqno = mstb->last_seqno;
1407 mstb->last_seqno ^= 1;
1408 } else if (mstb->tx_slots[0] == NULL)
1409 txmsg->seqno = 0;
1410 else
1411 txmsg->seqno = 1;
1412 mstb->tx_slots[txmsg->seqno] = txmsg;
1413 }
1414
1415 req_type = txmsg->msg[0] & 0x7f;
1416 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1417 req_type == DP_RESOURCE_STATUS_NOTIFY)
1418 hdr->broadcast = 1;
1419 else
1420 hdr->broadcast = 0;
1421 hdr->path_msg = txmsg->path_msg;
1422 hdr->lct = mstb->lct;
1423 hdr->lcr = mstb->lct - 1;
1424 if (mstb->lct > 1)
1425 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1426 hdr->seqno = txmsg->seqno;
1427 return 0;
1428 }
1429 /*
1430 * process a single block of the next message in the sideband queue
1431 */
1432 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1433 struct drm_dp_sideband_msg_tx *txmsg,
1434 bool up)
1435 {
1436 u8 chunk[48];
1437 struct drm_dp_sideband_msg_hdr hdr;
1438 int len, space, idx, tosend;
1439 int ret;
1440
1441 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1442
1443 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1444 txmsg->seqno = -1;
1445 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1446 }
1447
1448 /* make hdr from dst mst - for replies use seqno
1449 otherwise assign one */
1450 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1451 if (ret < 0)
1452 return ret;
1453
1454 /* amount left to send in this message */
1455 len = txmsg->cur_len - txmsg->cur_offset;
1456
1457 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1458 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1459
1460 tosend = min(len, space);
1461 if (len == txmsg->cur_len)
1462 hdr.somt = 1;
1463 if (space >= len)
1464 hdr.eomt = 1;
1465
1466
1467 hdr.msg_len = tosend + 1;
1468 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1469 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1470 /* add crc at end */
1471 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1472 idx += tosend + 1;
1473
1474 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1475 if (ret) {
1476 DRM_DEBUG_KMS("sideband msg failed to send\n");
1477 return ret;
1478 }
1479
1480 txmsg->cur_offset += tosend;
1481 if (txmsg->cur_offset == txmsg->cur_len) {
1482 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1483 return 1;
1484 }
1485 return 0;
1486 }
1487
1488 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1489 {
1490 struct drm_dp_sideband_msg_tx *txmsg;
1491 int ret;
1492
1493 WARN_ON(!mutex_is_locked(&mgr->qlock));
1494
1495 /* construct a chunk from the first msg in the tx_msg queue */
1496 if (list_empty(&mgr->tx_msg_downq)) {
1497 mgr->tx_down_in_progress = false;
1498 return;
1499 }
1500 mgr->tx_down_in_progress = true;
1501
1502 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1503 ret = process_single_tx_qlock(mgr, txmsg, false);
1504 if (ret == 1) {
1505 /* txmsg is sent it should be in the slots now */
1506 list_del(&txmsg->next);
1507 } else if (ret) {
1508 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1509 list_del(&txmsg->next);
1510 if (txmsg->seqno != -1)
1511 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1512 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1513 wake_up(&mgr->tx_waitq);
1514 }
1515 if (list_empty(&mgr->tx_msg_downq)) {
1516 mgr->tx_down_in_progress = false;
1517 return;
1518 }
1519 }
1520
1521 /* called holding qlock */
1522 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1523 struct drm_dp_sideband_msg_tx *txmsg)
1524 {
1525 int ret;
1526
1527 /* construct a chunk from the first msg in the tx_msg queue */
1528 ret = process_single_tx_qlock(mgr, txmsg, true);
1529
1530 if (ret != 1)
1531 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1532
1533 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1534 }
1535
1536 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1537 struct drm_dp_sideband_msg_tx *txmsg)
1538 {
1539 mutex_lock(&mgr->qlock);
1540 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1541 if (!mgr->tx_down_in_progress)
1542 process_single_down_tx_qlock(mgr);
1543 mutex_unlock(&mgr->qlock);
1544 }
1545
1546 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1547 struct drm_dp_mst_branch *mstb)
1548 {
1549 int len;
1550 struct drm_dp_sideband_msg_tx *txmsg;
1551 int ret;
1552
1553 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1554 if (!txmsg)
1555 return;
1556
1557 txmsg->dst = mstb;
1558 len = build_link_address(txmsg);
1559
1560 mstb->link_address_sent = true;
1561 drm_dp_queue_down_tx(mgr, txmsg);
1562
1563 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1564 if (ret > 0) {
1565 int i;
1566
1567 if (txmsg->reply.reply_type == 1)
1568 DRM_DEBUG_KMS("link address nak received\n");
1569 else {
1570 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1571 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1572 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1573 txmsg->reply.u.link_addr.ports[i].input_port,
1574 txmsg->reply.u.link_addr.ports[i].peer_device_type,
1575 txmsg->reply.u.link_addr.ports[i].port_number,
1576 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1577 txmsg->reply.u.link_addr.ports[i].mcs,
1578 txmsg->reply.u.link_addr.ports[i].ddps,
1579 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1580 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1581 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1582 }
1583
1584 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
1585
1586 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1587 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1588 }
1589 (*mgr->cbs->hotplug)(mgr);
1590 }
1591 } else {
1592 mstb->link_address_sent = false;
1593 DRM_DEBUG_KMS("link address failed %d\n", ret);
1594 }
1595
1596 kfree(txmsg);
1597 }
1598
1599 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1600 struct drm_dp_mst_branch *mstb,
1601 struct drm_dp_mst_port *port)
1602 {
1603 int len;
1604 struct drm_dp_sideband_msg_tx *txmsg;
1605 int ret;
1606
1607 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1608 if (!txmsg)
1609 return -ENOMEM;
1610
1611 txmsg->dst = mstb;
1612 len = build_enum_path_resources(txmsg, port->port_num);
1613
1614 drm_dp_queue_down_tx(mgr, txmsg);
1615
1616 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1617 if (ret > 0) {
1618 if (txmsg->reply.reply_type == 1)
1619 DRM_DEBUG_KMS("enum path resources nak received\n");
1620 else {
1621 if (port->port_num != txmsg->reply.u.path_resources.port_number)
1622 DRM_ERROR("got incorrect port in response\n");
1623 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1624 txmsg->reply.u.path_resources.avail_payload_bw_number);
1625 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1626 }
1627 }
1628
1629 kfree(txmsg);
1630 return 0;
1631 }
1632
1633 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
1634 {
1635 if (!mstb->port_parent)
1636 return NULL;
1637
1638 if (mstb->port_parent->mstb != mstb)
1639 return mstb->port_parent;
1640
1641 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
1642 }
1643
1644 static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
1645 struct drm_dp_mst_branch *mstb,
1646 int *port_num)
1647 {
1648 struct drm_dp_mst_branch *rmstb = NULL;
1649 struct drm_dp_mst_port *found_port;
1650 mutex_lock(&mgr->lock);
1651 if (mgr->mst_primary) {
1652 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
1653
1654 if (found_port) {
1655 rmstb = found_port->parent;
1656 kref_get(&rmstb->kref);
1657 *port_num = found_port->port_num;
1658 }
1659 }
1660 mutex_unlock(&mgr->lock);
1661 return rmstb;
1662 }
1663
1664 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1665 struct drm_dp_mst_port *port,
1666 int id,
1667 int pbn)
1668 {
1669 struct drm_dp_sideband_msg_tx *txmsg;
1670 struct drm_dp_mst_branch *mstb;
1671 int len, ret, port_num;
1672 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
1673 int i;
1674
1675 port = drm_dp_get_validated_port_ref(mgr, port);
1676 if (!port)
1677 return -EINVAL;
1678
1679 port_num = port->port_num;
1680 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1681 if (!mstb) {
1682 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1683
1684 if (!mstb) {
1685 drm_dp_put_port(port);
1686 return -EINVAL;
1687 }
1688 }
1689
1690 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1691 if (!txmsg) {
1692 ret = -ENOMEM;
1693 goto fail_put;
1694 }
1695
1696 for (i = 0; i < port->num_sdp_streams; i++)
1697 sinks[i] = i;
1698
1699 txmsg->dst = mstb;
1700 len = build_allocate_payload(txmsg, port_num,
1701 id,
1702 pbn, port->num_sdp_streams, sinks);
1703
1704 drm_dp_queue_down_tx(mgr, txmsg);
1705
1706 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1707 if (ret > 0) {
1708 if (txmsg->reply.reply_type == 1) {
1709 ret = -EINVAL;
1710 } else
1711 ret = 0;
1712 }
1713 kfree(txmsg);
1714 fail_put:
1715 drm_dp_put_mst_branch_device(mstb);
1716 drm_dp_put_port(port);
1717 return ret;
1718 }
1719
1720 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1721 int id,
1722 struct drm_dp_payload *payload)
1723 {
1724 int ret;
1725
1726 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1727 if (ret < 0) {
1728 payload->payload_state = 0;
1729 return ret;
1730 }
1731 payload->payload_state = DP_PAYLOAD_LOCAL;
1732 return 0;
1733 }
1734
1735 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1736 struct drm_dp_mst_port *port,
1737 int id,
1738 struct drm_dp_payload *payload)
1739 {
1740 int ret;
1741 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1742 if (ret < 0)
1743 return ret;
1744 payload->payload_state = DP_PAYLOAD_REMOTE;
1745 return ret;
1746 }
1747
1748 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1749 struct drm_dp_mst_port *port,
1750 int id,
1751 struct drm_dp_payload *payload)
1752 {
1753 DRM_DEBUG_KMS("\n");
1754 /* its okay for these to fail */
1755 if (port) {
1756 drm_dp_payload_send_msg(mgr, port, id, 0);
1757 }
1758
1759 drm_dp_dpcd_write_payload(mgr, id, payload);
1760 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
1761 return 0;
1762 }
1763
1764 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1765 int id,
1766 struct drm_dp_payload *payload)
1767 {
1768 payload->payload_state = 0;
1769 return 0;
1770 }
1771
1772 /**
1773 * drm_dp_update_payload_part1() - Execute payload update part 1
1774 * @mgr: manager to use.
1775 *
1776 * This iterates over all proposed virtual channels, and tries to
1777 * allocate space in the link for them. For 0->slots transitions,
1778 * this step just writes the VCPI to the MST device. For slots->0
1779 * transitions, this writes the updated VCPIs and removes the
1780 * remote VC payloads.
1781 *
1782 * after calling this the driver should generate ACT and payload
1783 * packets.
1784 */
1785 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1786 {
1787 int i, j;
1788 int cur_slots = 1;
1789 struct drm_dp_payload req_payload;
1790 struct drm_dp_mst_port *port;
1791
1792 mutex_lock(&mgr->payload_lock);
1793 for (i = 0; i < mgr->max_payloads; i++) {
1794 /* solve the current payloads - compare to the hw ones
1795 - update the hw view */
1796 req_payload.start_slot = cur_slots;
1797 if (mgr->proposed_vcpis[i]) {
1798 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1799 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1800 req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
1801 } else {
1802 port = NULL;
1803 req_payload.num_slots = 0;
1804 }
1805
1806 if (mgr->payloads[i].start_slot != req_payload.start_slot) {
1807 mgr->payloads[i].start_slot = req_payload.start_slot;
1808 }
1809 /* work out what is required to happen with this payload */
1810 if (mgr->payloads[i].num_slots != req_payload.num_slots) {
1811
1812 /* need to push an update for this payload */
1813 if (req_payload.num_slots) {
1814 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
1815 mgr->payloads[i].num_slots = req_payload.num_slots;
1816 mgr->payloads[i].vcpi = req_payload.vcpi;
1817 } else if (mgr->payloads[i].num_slots) {
1818 mgr->payloads[i].num_slots = 0;
1819 drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]);
1820 req_payload.payload_state = mgr->payloads[i].payload_state;
1821 mgr->payloads[i].start_slot = 0;
1822 }
1823 mgr->payloads[i].payload_state = req_payload.payload_state;
1824 }
1825 cur_slots += req_payload.num_slots;
1826 }
1827
1828 for (i = 0; i < mgr->max_payloads; i++) {
1829 if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1830 DRM_DEBUG_KMS("removing payload %d\n", i);
1831 for (j = i; j < mgr->max_payloads - 1; j++) {
1832 memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
1833 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
1834 if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
1835 set_bit(j + 1, &mgr->payload_mask);
1836 } else {
1837 clear_bit(j + 1, &mgr->payload_mask);
1838 }
1839 }
1840 memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
1841 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
1842 clear_bit(mgr->max_payloads, &mgr->payload_mask);
1843
1844 }
1845 }
1846 mutex_unlock(&mgr->payload_lock);
1847
1848 return 0;
1849 }
1850 EXPORT_SYMBOL(drm_dp_update_payload_part1);
1851
1852 /**
1853 * drm_dp_update_payload_part2() - Execute payload update part 2
1854 * @mgr: manager to use.
1855 *
1856 * This iterates over all proposed virtual channels, and tries to
1857 * allocate space in the link for them. For 0->slots transitions,
1858 * this step writes the remote VC payload commands. For slots->0
1859 * this just resets some internal state.
1860 */
1861 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1862 {
1863 struct drm_dp_mst_port *port;
1864 int i;
1865 int ret = 0;
1866 mutex_lock(&mgr->payload_lock);
1867 for (i = 0; i < mgr->max_payloads; i++) {
1868
1869 if (!mgr->proposed_vcpis[i])
1870 continue;
1871
1872 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1873
1874 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1875 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1876 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1877 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1878 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
1879 }
1880 if (ret) {
1881 mutex_unlock(&mgr->payload_lock);
1882 return ret;
1883 }
1884 }
1885 mutex_unlock(&mgr->payload_lock);
1886 return 0;
1887 }
1888 EXPORT_SYMBOL(drm_dp_update_payload_part2);
1889
1890 #if 0 /* unused as of yet */
1891 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1892 struct drm_dp_mst_port *port,
1893 int offset, int size)
1894 {
1895 int len;
1896 struct drm_dp_sideband_msg_tx *txmsg;
1897
1898 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1899 if (!txmsg)
1900 return -ENOMEM;
1901
1902 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1903 txmsg->dst = port->parent;
1904
1905 drm_dp_queue_down_tx(mgr, txmsg);
1906
1907 return 0;
1908 }
1909 #endif
1910
1911 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1912 struct drm_dp_mst_port *port,
1913 int offset, int size, u8 *bytes)
1914 {
1915 int len;
1916 int ret;
1917 struct drm_dp_sideband_msg_tx *txmsg;
1918 struct drm_dp_mst_branch *mstb;
1919
1920 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1921 if (!mstb)
1922 return -EINVAL;
1923
1924 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1925 if (!txmsg) {
1926 ret = -ENOMEM;
1927 goto fail_put;
1928 }
1929
1930 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1931 txmsg->dst = mstb;
1932
1933 drm_dp_queue_down_tx(mgr, txmsg);
1934
1935 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1936 if (ret > 0) {
1937 if (txmsg->reply.reply_type == 1) {
1938 ret = -EINVAL;
1939 } else
1940 ret = 0;
1941 }
1942 kfree(txmsg);
1943 fail_put:
1944 drm_dp_put_mst_branch_device(mstb);
1945 return ret;
1946 }
1947
1948 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1949 {
1950 struct drm_dp_sideband_msg_reply_body reply;
1951
1952 reply.reply_type = 0;
1953 reply.req_type = req_type;
1954 drm_dp_encode_sideband_reply(&reply, msg);
1955 return 0;
1956 }
1957
1958 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1959 struct drm_dp_mst_branch *mstb,
1960 int req_type, int seqno, bool broadcast)
1961 {
1962 struct drm_dp_sideband_msg_tx *txmsg;
1963
1964 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1965 if (!txmsg)
1966 return -ENOMEM;
1967
1968 txmsg->dst = mstb;
1969 txmsg->seqno = seqno;
1970 drm_dp_encode_up_ack_reply(txmsg, req_type);
1971
1972 mutex_lock(&mgr->qlock);
1973
1974 process_single_up_tx_qlock(mgr, txmsg);
1975
1976 mutex_unlock(&mgr->qlock);
1977
1978 kfree(txmsg);
1979 return 0;
1980 }
1981
1982 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1983 int dp_link_count,
1984 int *out)
1985 {
1986 switch (dp_link_bw) {
1987 default:
1988 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1989 dp_link_bw, dp_link_count);
1990 return false;
1991
1992 case DP_LINK_BW_1_62:
1993 *out = 3 * dp_link_count;
1994 break;
1995 case DP_LINK_BW_2_7:
1996 *out = 5 * dp_link_count;
1997 break;
1998 case DP_LINK_BW_5_4:
1999 *out = 10 * dp_link_count;
2000 break;
2001 }
2002 return true;
2003 }
2004
2005 /**
2006 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2007 * @mgr: manager to set state for
2008 * @mst_state: true to enable MST on this connector - false to disable.
2009 *
2010 * This is called by the driver when it detects an MST capable device plugged
2011 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2012 */
2013 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2014 {
2015 int ret = 0;
2016 struct drm_dp_mst_branch *mstb = NULL;
2017
2018 mutex_lock(&mgr->lock);
2019 if (mst_state == mgr->mst_state)
2020 goto out_unlock;
2021
2022 mgr->mst_state = mst_state;
2023 /* set the device into MST mode */
2024 if (mst_state) {
2025 WARN_ON(mgr->mst_primary);
2026
2027 /* get dpcd info */
2028 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2029 if (ret != DP_RECEIVER_CAP_SIZE) {
2030 DRM_DEBUG_KMS("failed to read DPCD\n");
2031 goto out_unlock;
2032 }
2033
2034 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2035 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2036 &mgr->pbn_div)) {
2037 ret = -EINVAL;
2038 goto out_unlock;
2039 }
2040
2041 mgr->total_pbn = 2560;
2042 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
2043 mgr->avail_slots = mgr->total_slots;
2044
2045 /* add initial branch device at LCT 1 */
2046 mstb = drm_dp_add_mst_branch_device(1, NULL);
2047 if (mstb == NULL) {
2048 ret = -ENOMEM;
2049 goto out_unlock;
2050 }
2051 mstb->mgr = mgr;
2052
2053 /* give this the main reference */
2054 mgr->mst_primary = mstb;
2055 kref_get(&mgr->mst_primary->kref);
2056
2057 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2058 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2059 if (ret < 0) {
2060 goto out_unlock;
2061 }
2062
2063 {
2064 struct drm_dp_payload reset_pay;
2065 reset_pay.start_slot = 0;
2066 reset_pay.num_slots = 0x3f;
2067 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2068 }
2069
2070 queue_work(system_long_wq, &mgr->work);
2071
2072 ret = 0;
2073 } else {
2074 /* disable MST on the device */
2075 mstb = mgr->mst_primary;
2076 mgr->mst_primary = NULL;
2077 /* this can fail if the device is gone */
2078 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2079 ret = 0;
2080 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2081 mgr->payload_mask = 0;
2082 set_bit(0, &mgr->payload_mask);
2083 mgr->vcpi_mask = 0;
2084 }
2085
2086 out_unlock:
2087 mutex_unlock(&mgr->lock);
2088 if (mstb)
2089 drm_dp_put_mst_branch_device(mstb);
2090 return ret;
2091
2092 }
2093 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2094
2095 /**
2096 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2097 * @mgr: manager to suspend
2098 *
2099 * This function tells the MST device that we can't handle UP messages
2100 * anymore. This should stop it from sending any since we are suspended.
2101 */
2102 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2103 {
2104 mutex_lock(&mgr->lock);
2105 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2106 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2107 mutex_unlock(&mgr->lock);
2108 flush_work(&mgr->work);
2109 flush_work(&mgr->destroy_connector_work);
2110 }
2111 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2112
2113 /**
2114 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2115 * @mgr: manager to resume
2116 *
2117 * This will fetch DPCD and see if the device is still there,
2118 * if it is, it will rewrite the MSTM control bits, and return.
2119 *
2120 * if the device fails this returns -1, and the driver should do
2121 * a full MST reprobe, in case we were undocked.
2122 */
2123 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2124 {
2125 int ret = 0;
2126
2127 mutex_lock(&mgr->lock);
2128
2129 if (mgr->mst_primary) {
2130 int sret;
2131 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2132 if (sret != DP_RECEIVER_CAP_SIZE) {
2133 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2134 ret = -1;
2135 goto out_unlock;
2136 }
2137
2138 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2139 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2140 if (ret < 0) {
2141 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2142 ret = -1;
2143 goto out_unlock;
2144 }
2145 ret = 0;
2146 } else
2147 ret = -1;
2148
2149 out_unlock:
2150 mutex_unlock(&mgr->lock);
2151 return ret;
2152 }
2153 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2154
2155 static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2156 {
2157 int len;
2158 u8 replyblock[32];
2159 int replylen, origlen, curreply;
2160 int ret;
2161 struct drm_dp_sideband_msg_rx *msg;
2162 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2163 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2164
2165 len = min(mgr->max_dpcd_transaction_bytes, 16);
2166 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2167 replyblock, len);
2168 if (ret != len) {
2169 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2170 return;
2171 }
2172 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2173 if (!ret) {
2174 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2175 return;
2176 }
2177 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2178
2179 origlen = replylen;
2180 replylen -= len;
2181 curreply = len;
2182 while (replylen > 0) {
2183 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2184 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2185 replyblock, len);
2186 if (ret != len) {
2187 DRM_DEBUG_KMS("failed to read a chunk\n");
2188 }
2189 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2190 if (ret == false)
2191 DRM_DEBUG_KMS("failed to build sideband msg\n");
2192 curreply += len;
2193 replylen -= len;
2194 }
2195 }
2196
2197 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2198 {
2199 int ret = 0;
2200
2201 drm_dp_get_one_sb_msg(mgr, false);
2202
2203 if (mgr->down_rep_recv.have_eomt) {
2204 struct drm_dp_sideband_msg_tx *txmsg;
2205 struct drm_dp_mst_branch *mstb;
2206 int slot = -1;
2207 mstb = drm_dp_get_mst_branch_device(mgr,
2208 mgr->down_rep_recv.initial_hdr.lct,
2209 mgr->down_rep_recv.initial_hdr.rad);
2210
2211 if (!mstb) {
2212 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2213 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2214 return 0;
2215 }
2216
2217 /* find the message */
2218 slot = mgr->down_rep_recv.initial_hdr.seqno;
2219 mutex_lock(&mgr->qlock);
2220 txmsg = mstb->tx_slots[slot];
2221 /* remove from slots */
2222 mutex_unlock(&mgr->qlock);
2223
2224 if (!txmsg) {
2225 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2226 mstb,
2227 mgr->down_rep_recv.initial_hdr.seqno,
2228 mgr->down_rep_recv.initial_hdr.lct,
2229 mgr->down_rep_recv.initial_hdr.rad[0],
2230 mgr->down_rep_recv.msg[0]);
2231 drm_dp_put_mst_branch_device(mstb);
2232 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2233 return 0;
2234 }
2235
2236 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2237 if (txmsg->reply.reply_type == 1) {
2238 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2239 }
2240
2241 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2242 drm_dp_put_mst_branch_device(mstb);
2243
2244 mutex_lock(&mgr->qlock);
2245 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2246 mstb->tx_slots[slot] = NULL;
2247 mutex_unlock(&mgr->qlock);
2248
2249 wake_up(&mgr->tx_waitq);
2250 }
2251 return ret;
2252 }
2253
2254 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2255 {
2256 int ret = 0;
2257 drm_dp_get_one_sb_msg(mgr, true);
2258
2259 if (mgr->up_req_recv.have_eomt) {
2260 struct drm_dp_sideband_msg_req_body msg;
2261 struct drm_dp_mst_branch *mstb = NULL;
2262 bool seqno;
2263
2264 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2265 mstb = drm_dp_get_mst_branch_device(mgr,
2266 mgr->up_req_recv.initial_hdr.lct,
2267 mgr->up_req_recv.initial_hdr.rad);
2268 if (!mstb) {
2269 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2270 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2271 return 0;
2272 }
2273 }
2274
2275 seqno = mgr->up_req_recv.initial_hdr.seqno;
2276 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2277
2278 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2279 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2280
2281 if (!mstb)
2282 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2283
2284 if (!mstb) {
2285 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2286 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2287 return 0;
2288 }
2289
2290 drm_dp_update_port(mstb, &msg.u.conn_stat);
2291
2292 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2293 (*mgr->cbs->hotplug)(mgr);
2294
2295 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2296 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2297 if (!mstb)
2298 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2299
2300 if (!mstb) {
2301 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2302 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2303 return 0;
2304 }
2305
2306 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2307 }
2308
2309 drm_dp_put_mst_branch_device(mstb);
2310 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2311 }
2312 return ret;
2313 }
2314
2315 /**
2316 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2317 * @mgr: manager to notify irq for.
2318 * @esi: 4 bytes from SINK_COUNT_ESI
2319 * @handled: whether the hpd interrupt was consumed or not
2320 *
2321 * This should be called from the driver when it detects a short IRQ,
2322 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2323 * topology manager will process the sideband messages received as a result
2324 * of this.
2325 */
2326 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2327 {
2328 int ret = 0;
2329 int sc;
2330 *handled = false;
2331 sc = esi[0] & 0x3f;
2332
2333 if (sc != mgr->sink_count) {
2334 mgr->sink_count = sc;
2335 *handled = true;
2336 }
2337
2338 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2339 ret = drm_dp_mst_handle_down_rep(mgr);
2340 *handled = true;
2341 }
2342
2343 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2344 ret |= drm_dp_mst_handle_up_req(mgr);
2345 *handled = true;
2346 }
2347
2348 drm_dp_mst_kick_tx(mgr);
2349 return ret;
2350 }
2351 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2352
2353 /**
2354 * drm_dp_mst_detect_port() - get connection status for an MST port
2355 * @mgr: manager for this port
2356 * @port: unverified pointer to a port
2357 *
2358 * This returns the current connection state for a port. It validates the
2359 * port pointer still exists so the caller doesn't require a reference
2360 */
2361 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2362 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2363 {
2364 enum drm_connector_status status = connector_status_disconnected;
2365
2366 /* we need to search for the port in the mgr in case its gone */
2367 port = drm_dp_get_validated_port_ref(mgr, port);
2368 if (!port)
2369 return connector_status_disconnected;
2370
2371 if (!port->ddps)
2372 goto out;
2373
2374 switch (port->pdt) {
2375 case DP_PEER_DEVICE_NONE:
2376 case DP_PEER_DEVICE_MST_BRANCHING:
2377 break;
2378
2379 case DP_PEER_DEVICE_SST_SINK:
2380 status = connector_status_connected;
2381 /* for logical ports - cache the EDID */
2382 if (port->port_num >= 8 && !port->cached_edid) {
2383 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2384 }
2385 break;
2386 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2387 if (port->ldps)
2388 status = connector_status_connected;
2389 break;
2390 }
2391 out:
2392 drm_dp_put_port(port);
2393 return status;
2394 }
2395 EXPORT_SYMBOL(drm_dp_mst_detect_port);
2396
2397 /**
2398 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
2399 * @mgr: manager for this port
2400 * @port: unverified pointer to a port.
2401 *
2402 * This returns whether the port supports audio or not.
2403 */
2404 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2405 struct drm_dp_mst_port *port)
2406 {
2407 bool ret = false;
2408
2409 port = drm_dp_get_validated_port_ref(mgr, port);
2410 if (!port)
2411 return ret;
2412 ret = port->has_audio;
2413 drm_dp_put_port(port);
2414 return ret;
2415 }
2416 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
2417
2418 /**
2419 * drm_dp_mst_get_edid() - get EDID for an MST port
2420 * @connector: toplevel connector to get EDID for
2421 * @mgr: manager for this port
2422 * @port: unverified pointer to a port.
2423 *
2424 * This returns an EDID for the port connected to a connector,
2425 * It validates the pointer still exists so the caller doesn't require a
2426 * reference.
2427 */
2428 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2429 {
2430 struct edid *edid = NULL;
2431
2432 /* we need to search for the port in the mgr in case its gone */
2433 port = drm_dp_get_validated_port_ref(mgr, port);
2434 if (!port)
2435 return NULL;
2436
2437 if (port->cached_edid)
2438 edid = drm_edid_duplicate(port->cached_edid);
2439 else {
2440 edid = drm_get_edid(connector, &port->aux.ddc);
2441 drm_mode_connector_set_tile_property(connector);
2442 }
2443 port->has_audio = drm_detect_monitor_audio(edid);
2444 drm_dp_put_port(port);
2445 return edid;
2446 }
2447 EXPORT_SYMBOL(drm_dp_mst_get_edid);
2448
2449 /**
2450 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2451 * @mgr: manager to use
2452 * @pbn: payload bandwidth to convert into slots.
2453 */
2454 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2455 int pbn)
2456 {
2457 int num_slots;
2458
2459 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2460
2461 if (num_slots > mgr->avail_slots)
2462 return -ENOSPC;
2463 return num_slots;
2464 }
2465 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2466
2467 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2468 struct drm_dp_vcpi *vcpi, int pbn)
2469 {
2470 int num_slots;
2471 int ret;
2472
2473 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2474
2475 if (num_slots > mgr->avail_slots)
2476 return -ENOSPC;
2477
2478 vcpi->pbn = pbn;
2479 vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2480 vcpi->num_slots = num_slots;
2481
2482 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2483 if (ret < 0)
2484 return ret;
2485 return 0;
2486 }
2487
2488 /**
2489 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2490 * @mgr: manager for this port
2491 * @port: port to allocate a virtual channel for.
2492 * @pbn: payload bandwidth number to request
2493 * @slots: returned number of slots for this PBN.
2494 */
2495 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2496 {
2497 int ret;
2498
2499 port = drm_dp_get_validated_port_ref(mgr, port);
2500 if (!port)
2501 return false;
2502
2503 if (port->vcpi.vcpi > 0) {
2504 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2505 if (pbn == port->vcpi.pbn) {
2506 *slots = port->vcpi.num_slots;
2507 drm_dp_put_port(port);
2508 return true;
2509 }
2510 }
2511
2512 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2513 if (ret) {
2514 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2515 goto out;
2516 }
2517 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2518 *slots = port->vcpi.num_slots;
2519
2520 drm_dp_put_port(port);
2521 return true;
2522 out:
2523 return false;
2524 }
2525 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2526
2527 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2528 {
2529 int slots = 0;
2530 port = drm_dp_get_validated_port_ref(mgr, port);
2531 if (!port)
2532 return slots;
2533
2534 slots = port->vcpi.num_slots;
2535 drm_dp_put_port(port);
2536 return slots;
2537 }
2538 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
2539
2540 /**
2541 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2542 * @mgr: manager for this port
2543 * @port: unverified pointer to a port.
2544 *
2545 * This just resets the number of slots for the ports VCPI for later programming.
2546 */
2547 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2548 {
2549 port = drm_dp_get_validated_port_ref(mgr, port);
2550 if (!port)
2551 return;
2552 port->vcpi.num_slots = 0;
2553 drm_dp_put_port(port);
2554 }
2555 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2556
2557 /**
2558 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2559 * @mgr: manager for this port
2560 * @port: unverified port to deallocate vcpi for
2561 */
2562 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2563 {
2564 port = drm_dp_get_validated_port_ref(mgr, port);
2565 if (!port)
2566 return;
2567
2568 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2569 port->vcpi.num_slots = 0;
2570 port->vcpi.pbn = 0;
2571 port->vcpi.aligned_pbn = 0;
2572 port->vcpi.vcpi = 0;
2573 drm_dp_put_port(port);
2574 }
2575 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2576
2577 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2578 int id, struct drm_dp_payload *payload)
2579 {
2580 u8 payload_alloc[3], status;
2581 int ret;
2582 int retries = 0;
2583
2584 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2585 DP_PAYLOAD_TABLE_UPDATED);
2586
2587 payload_alloc[0] = id;
2588 payload_alloc[1] = payload->start_slot;
2589 payload_alloc[2] = payload->num_slots;
2590
2591 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2592 if (ret != 3) {
2593 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2594 goto fail;
2595 }
2596
2597 retry:
2598 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2599 if (ret < 0) {
2600 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2601 goto fail;
2602 }
2603
2604 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2605 retries++;
2606 if (retries < 20) {
2607 usleep_range(10000, 20000);
2608 goto retry;
2609 }
2610 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2611 ret = -EINVAL;
2612 goto fail;
2613 }
2614 ret = 0;
2615 fail:
2616 return ret;
2617 }
2618
2619
2620 /**
2621 * drm_dp_check_act_status() - Check ACT handled status.
2622 * @mgr: manager to use
2623 *
2624 * Check the payload status bits in the DPCD for ACT handled completion.
2625 */
2626 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2627 {
2628 u8 status;
2629 int ret;
2630 int count = 0;
2631
2632 do {
2633 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2634
2635 if (ret < 0) {
2636 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2637 goto fail;
2638 }
2639
2640 if (status & DP_PAYLOAD_ACT_HANDLED)
2641 break;
2642 count++;
2643 udelay(100);
2644
2645 } while (count < 30);
2646
2647 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2648 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2649 ret = -EINVAL;
2650 goto fail;
2651 }
2652 return 0;
2653 fail:
2654 return ret;
2655 }
2656 EXPORT_SYMBOL(drm_dp_check_act_status);
2657
2658 /**
2659 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2660 * @clock: dot clock for the mode
2661 * @bpp: bpp for the mode.
2662 *
2663 * This uses the formula in the spec to calculate the PBN value for a mode.
2664 */
2665 int drm_dp_calc_pbn_mode(int clock, int bpp)
2666 {
2667 u64 kbps;
2668 s64 peak_kbps;
2669 u32 numerator;
2670 u32 denominator;
2671
2672 kbps = clock * bpp;
2673
2674 /*
2675 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
2676 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
2677 * common multiplier to render an integer PBN for all link rate/lane
2678 * counts combinations
2679 * calculate
2680 * peak_kbps *= (1006/1000)
2681 * peak_kbps *= (64/54)
2682 * peak_kbps *= 8 convert to bytes
2683 */
2684
2685 numerator = 64 * 1006;
2686 denominator = 54 * 8 * 1000 * 1000;
2687
2688 kbps *= numerator;
2689 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
2690
2691 return drm_fixp2int_ceil(peak_kbps);
2692 }
2693 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2694
2695 static int test_calc_pbn_mode(void)
2696 {
2697 int ret;
2698 ret = drm_dp_calc_pbn_mode(154000, 30);
2699 if (ret != 689) {
2700 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2701 154000, 30, 689, ret);
2702 return -EINVAL;
2703 }
2704 ret = drm_dp_calc_pbn_mode(234000, 30);
2705 if (ret != 1047) {
2706 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2707 234000, 30, 1047, ret);
2708 return -EINVAL;
2709 }
2710 ret = drm_dp_calc_pbn_mode(297000, 24);
2711 if (ret != 1063) {
2712 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
2713 297000, 24, 1063, ret);
2714 return -EINVAL;
2715 }
2716 return 0;
2717 }
2718
2719 /* we want to kick the TX after we've ack the up/down IRQs. */
2720 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2721 {
2722 queue_work(system_long_wq, &mgr->tx_work);
2723 }
2724
2725 static void drm_dp_mst_dump_mstb(struct seq_file *m,
2726 struct drm_dp_mst_branch *mstb)
2727 {
2728 struct drm_dp_mst_port *port;
2729 int tabs = mstb->lct;
2730 char prefix[10];
2731 int i;
2732
2733 for (i = 0; i < tabs; i++)
2734 prefix[i] = '\t';
2735 prefix[i] = '\0';
2736
2737 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2738 list_for_each_entry(port, &mstb->ports, next) {
2739 seq_printf(m, "%sport: %d: ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
2740 if (port->mstb)
2741 drm_dp_mst_dump_mstb(m, port->mstb);
2742 }
2743 }
2744
2745 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2746 char *buf)
2747 {
2748 int ret;
2749 int i;
2750 for (i = 0; i < 4; i++) {
2751 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2752 if (ret != 16)
2753 break;
2754 }
2755 if (i == 4)
2756 return true;
2757 return false;
2758 }
2759
2760 /**
2761 * drm_dp_mst_dump_topology(): dump topology to seq file.
2762 * @m: seq_file to dump output to
2763 * @mgr: manager to dump current topology for.
2764 *
2765 * helper to dump MST topology to a seq file for debugfs.
2766 */
2767 void drm_dp_mst_dump_topology(struct seq_file *m,
2768 struct drm_dp_mst_topology_mgr *mgr)
2769 {
2770 int i;
2771 struct drm_dp_mst_port *port;
2772 mutex_lock(&mgr->lock);
2773 if (mgr->mst_primary)
2774 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2775
2776 /* dump VCPIs */
2777 mutex_unlock(&mgr->lock);
2778
2779 mutex_lock(&mgr->payload_lock);
2780 seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
2781
2782 for (i = 0; i < mgr->max_payloads; i++) {
2783 if (mgr->proposed_vcpis[i]) {
2784 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2785 seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
2786 } else
2787 seq_printf(m, "vcpi %d:unsed\n", i);
2788 }
2789 for (i = 0; i < mgr->max_payloads; i++) {
2790 seq_printf(m, "payload %d: %d, %d, %d\n",
2791 i,
2792 mgr->payloads[i].payload_state,
2793 mgr->payloads[i].start_slot,
2794 mgr->payloads[i].num_slots);
2795
2796
2797 }
2798 mutex_unlock(&mgr->payload_lock);
2799
2800 mutex_lock(&mgr->lock);
2801 if (mgr->mst_primary) {
2802 u8 buf[64];
2803 bool bret;
2804 int ret;
2805 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
2806 seq_printf(m, "dpcd: ");
2807 for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
2808 seq_printf(m, "%02x ", buf[i]);
2809 seq_printf(m, "\n");
2810 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
2811 seq_printf(m, "faux/mst: ");
2812 for (i = 0; i < 2; i++)
2813 seq_printf(m, "%02x ", buf[i]);
2814 seq_printf(m, "\n");
2815 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
2816 seq_printf(m, "mst ctrl: ");
2817 for (i = 0; i < 1; i++)
2818 seq_printf(m, "%02x ", buf[i]);
2819 seq_printf(m, "\n");
2820
2821 /* dump the standard OUI branch header */
2822 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
2823 seq_printf(m, "branch oui: ");
2824 for (i = 0; i < 0x3; i++)
2825 seq_printf(m, "%02x", buf[i]);
2826 seq_printf(m, " devid: ");
2827 for (i = 0x3; i < 0x8; i++)
2828 seq_printf(m, "%c", buf[i]);
2829 seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
2830 seq_printf(m, "\n");
2831 bret = dump_dp_payload_table(mgr, buf);
2832 if (bret == true) {
2833 seq_printf(m, "payload table: ");
2834 for (i = 0; i < 63; i++)
2835 seq_printf(m, "%02x ", buf[i]);
2836 seq_printf(m, "\n");
2837 }
2838
2839 }
2840
2841 mutex_unlock(&mgr->lock);
2842
2843 }
2844 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2845
2846 static void drm_dp_tx_work(struct work_struct *work)
2847 {
2848 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2849
2850 mutex_lock(&mgr->qlock);
2851 if (mgr->tx_down_in_progress)
2852 process_single_down_tx_qlock(mgr);
2853 mutex_unlock(&mgr->qlock);
2854 }
2855
2856 static void drm_dp_free_mst_port(struct kref *kref)
2857 {
2858 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
2859 kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
2860 kfree(port);
2861 }
2862
2863 static void drm_dp_destroy_connector_work(struct work_struct *work)
2864 {
2865 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
2866 struct drm_dp_mst_port *port;
2867 bool send_hotplug = false;
2868 /*
2869 * Not a regular list traverse as we have to drop the destroy
2870 * connector lock before destroying the connector, to avoid AB->BA
2871 * ordering between this lock and the config mutex.
2872 */
2873 for (;;) {
2874 mutex_lock(&mgr->destroy_connector_lock);
2875 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
2876 if (!port) {
2877 mutex_unlock(&mgr->destroy_connector_lock);
2878 break;
2879 }
2880 list_del(&port->next);
2881 mutex_unlock(&mgr->destroy_connector_lock);
2882
2883 kref_init(&port->kref);
2884 INIT_LIST_HEAD(&port->next);
2885
2886 mgr->cbs->destroy_connector(mgr, port->connector);
2887
2888 drm_dp_port_teardown_pdt(port, port->pdt);
2889
2890 if (!port->input && port->vcpi.vcpi > 0) {
2891 if (mgr->mst_state) {
2892 drm_dp_mst_reset_vcpi_slots(mgr, port);
2893 drm_dp_update_payload_part1(mgr);
2894 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2895 }
2896 }
2897
2898 kref_put(&port->kref, drm_dp_free_mst_port);
2899 send_hotplug = true;
2900 }
2901 if (send_hotplug)
2902 (*mgr->cbs->hotplug)(mgr);
2903 }
2904
2905 /**
2906 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2907 * @mgr: manager struct to initialise
2908 * @dev: device providing this structure - for i2c addition.
2909 * @aux: DP helper aux channel to talk to this device
2910 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2911 * @max_payloads: maximum number of payloads this GPU can source
2912 * @conn_base_id: the connector object ID the MST device is connected to.
2913 *
2914 * Return 0 for success, or negative error code on failure
2915 */
2916 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2917 struct device *dev, struct drm_dp_aux *aux,
2918 int max_dpcd_transaction_bytes,
2919 int max_payloads, int conn_base_id)
2920 {
2921 mutex_init(&mgr->lock);
2922 mutex_init(&mgr->qlock);
2923 mutex_init(&mgr->payload_lock);
2924 mutex_init(&mgr->destroy_connector_lock);
2925 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2926 INIT_LIST_HEAD(&mgr->destroy_connector_list);
2927 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2928 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2929 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
2930 init_waitqueue_head(&mgr->tx_waitq);
2931 mgr->dev = dev;
2932 mgr->aux = aux;
2933 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2934 mgr->max_payloads = max_payloads;
2935 mgr->conn_base_id = conn_base_id;
2936 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
2937 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
2938 return -EINVAL;
2939 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2940 if (!mgr->payloads)
2941 return -ENOMEM;
2942 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2943 if (!mgr->proposed_vcpis)
2944 return -ENOMEM;
2945 set_bit(0, &mgr->payload_mask);
2946 if (test_calc_pbn_mode() < 0)
2947 DRM_ERROR("MST PBN self-test failed\n");
2948
2949 return 0;
2950 }
2951 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2952
2953 /**
2954 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2955 * @mgr: manager to destroy
2956 */
2957 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2958 {
2959 flush_work(&mgr->work);
2960 flush_work(&mgr->destroy_connector_work);
2961 mutex_lock(&mgr->payload_lock);
2962 kfree(mgr->payloads);
2963 mgr->payloads = NULL;
2964 kfree(mgr->proposed_vcpis);
2965 mgr->proposed_vcpis = NULL;
2966 mutex_unlock(&mgr->payload_lock);
2967 mgr->dev = NULL;
2968 mgr->aux = NULL;
2969 }
2970 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2971
2972 /* I2C device */
2973 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2974 int num)
2975 {
2976 struct drm_dp_aux *aux = adapter->algo_data;
2977 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2978 struct drm_dp_mst_branch *mstb;
2979 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2980 unsigned int i;
2981 bool reading = false;
2982 struct drm_dp_sideband_msg_req_body msg;
2983 struct drm_dp_sideband_msg_tx *txmsg = NULL;
2984 int ret;
2985
2986 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2987 if (!mstb)
2988 return -EREMOTEIO;
2989
2990 /* construct i2c msg */
2991 /* see if last msg is a read */
2992 if (msgs[num - 1].flags & I2C_M_RD)
2993 reading = true;
2994
2995 if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
2996 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2997 ret = -EIO;
2998 goto out;
2999 }
3000
3001 memset(&msg, 0, sizeof(msg));
3002 msg.req_type = DP_REMOTE_I2C_READ;
3003 msg.u.i2c_read.num_transactions = num - 1;
3004 msg.u.i2c_read.port_number = port->port_num;
3005 for (i = 0; i < num - 1; i++) {
3006 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
3007 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
3008 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
3009 }
3010 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
3011 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
3012
3013 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3014 if (!txmsg) {
3015 ret = -ENOMEM;
3016 goto out;
3017 }
3018
3019 txmsg->dst = mstb;
3020 drm_dp_encode_sideband_req(&msg, txmsg);
3021
3022 drm_dp_queue_down_tx(mgr, txmsg);
3023
3024 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3025 if (ret > 0) {
3026
3027 if (txmsg->reply.reply_type == 1) { /* got a NAK back */
3028 ret = -EREMOTEIO;
3029 goto out;
3030 }
3031 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
3032 ret = -EIO;
3033 goto out;
3034 }
3035 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
3036 ret = num;
3037 }
3038 out:
3039 kfree(txmsg);
3040 drm_dp_put_mst_branch_device(mstb);
3041 return ret;
3042 }
3043
3044 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
3045 {
3046 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
3047 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
3048 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
3049 I2C_FUNC_10BIT_ADDR;
3050 }
3051
3052 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
3053 .functionality = drm_dp_mst_i2c_functionality,
3054 .master_xfer = drm_dp_mst_i2c_xfer,
3055 };
3056
3057 /**
3058 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
3059 * @aux: DisplayPort AUX channel
3060 *
3061 * Returns 0 on success or a negative error code on failure.
3062 */
3063 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
3064 {
3065 aux->ddc.algo = &drm_dp_mst_i2c_algo;
3066 aux->ddc.algo_data = aux;
3067 aux->ddc.retries = 3;
3068
3069 aux->ddc.class = I2C_CLASS_DDC;
3070 aux->ddc.owner = THIS_MODULE;
3071 aux->ddc.dev.parent = aux->dev;
3072 aux->ddc.dev.of_node = aux->dev->of_node;
3073
3074 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
3075 sizeof(aux->ddc.name));
3076
3077 return i2c_add_adapter(&aux->ddc);
3078 }
3079
3080 /**
3081 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
3082 * @aux: DisplayPort AUX channel
3083 */
3084 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
3085 {
3086 i2c_del_adapter(&aux->ddc);
3087 }
This page took 0.118565 seconds and 5 git commands to generate.