drm/tilcdc: oops a Module.symvers snuck in
[deliverable/linux.git] / drivers / gpu / drm / drm_dp_mst_topology.c
CommitLineData
ad7f8a1f
DA
1/*
2 * Copyright © 2014 Red Hat
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22
23#include <linux/kernel.h>
24#include <linux/delay.h>
25#include <linux/init.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/i2c.h>
29#include <drm/drm_dp_mst_helper.h>
30#include <drm/drmP.h>
31
32#include <drm/drm_fixed.h>
33
34/**
35 * DOC: dp mst helper
36 *
37 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
38 * protocol. The helpers contain a topology manager and bandwidth manager.
39 * The helpers encapsulate the sending and received of sideband msgs.
40 */
41static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
42 char *buf);
43static int test_calc_pbn_mode(void);
44
45static void drm_dp_put_port(struct drm_dp_mst_port *port);
46
47static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
48 int id,
49 struct drm_dp_payload *payload);
50
51static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
52 struct drm_dp_mst_port *port,
53 int offset, int size, u8 *bytes);
54
55static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
56 struct drm_dp_mst_branch *mstb);
57static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
58 struct drm_dp_mst_branch *mstb,
59 struct drm_dp_mst_port *port);
60static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
61 u8 *guid);
62
63static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
64static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
65static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
66/* sideband msg handling */
67static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
68{
69 u8 bitmask = 0x80;
70 u8 bitshift = 7;
71 u8 array_index = 0;
72 int number_of_bits = num_nibbles * 4;
73 u8 remainder = 0;
74
75 while (number_of_bits != 0) {
76 number_of_bits--;
77 remainder <<= 1;
78 remainder |= (data[array_index] & bitmask) >> bitshift;
79 bitmask >>= 1;
80 bitshift--;
81 if (bitmask == 0) {
82 bitmask = 0x80;
83 bitshift = 7;
84 array_index++;
85 }
86 if ((remainder & 0x10) == 0x10)
87 remainder ^= 0x13;
88 }
89
90 number_of_bits = 4;
91 while (number_of_bits != 0) {
92 number_of_bits--;
93 remainder <<= 1;
94 if ((remainder & 0x10) != 0)
95 remainder ^= 0x13;
96 }
97
98 return remainder;
99}
100
101static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
102{
103 u8 bitmask = 0x80;
104 u8 bitshift = 7;
105 u8 array_index = 0;
106 int number_of_bits = number_of_bytes * 8;
107 u16 remainder = 0;
108
109 while (number_of_bits != 0) {
110 number_of_bits--;
111 remainder <<= 1;
112 remainder |= (data[array_index] & bitmask) >> bitshift;
113 bitmask >>= 1;
114 bitshift--;
115 if (bitmask == 0) {
116 bitmask = 0x80;
117 bitshift = 7;
118 array_index++;
119 }
120 if ((remainder & 0x100) == 0x100)
121 remainder ^= 0xd5;
122 }
123
124 number_of_bits = 8;
125 while (number_of_bits != 0) {
126 number_of_bits--;
127 remainder <<= 1;
128 if ((remainder & 0x100) != 0)
129 remainder ^= 0xd5;
130 }
131
132 return remainder & 0xff;
133}
134static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
135{
136 u8 size = 3;
137 size += (hdr->lct / 2);
138 return size;
139}
140
141static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
142 u8 *buf, int *len)
143{
144 int idx = 0;
145 int i;
146 u8 crc4;
147 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
148 for (i = 0; i < (hdr->lct / 2); i++)
149 buf[idx++] = hdr->rad[i];
150 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
151 (hdr->msg_len & 0x3f);
152 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
153
154 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
155 buf[idx - 1] |= (crc4 & 0xf);
156
157 *len = idx;
158}
159
160static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
161 u8 *buf, int buflen, u8 *hdrlen)
162{
163 u8 crc4;
164 u8 len;
165 int i;
166 u8 idx;
167 if (buf[0] == 0)
168 return false;
169 len = 3;
170 len += ((buf[0] & 0xf0) >> 4) / 2;
171 if (len > buflen)
172 return false;
173 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
174
175 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
176 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
177 return false;
178 }
179
180 hdr->lct = (buf[0] & 0xf0) >> 4;
181 hdr->lcr = (buf[0] & 0xf);
182 idx = 1;
183 for (i = 0; i < (hdr->lct / 2); i++)
184 hdr->rad[i] = buf[idx++];
185 hdr->broadcast = (buf[idx] >> 7) & 0x1;
186 hdr->path_msg = (buf[idx] >> 6) & 0x1;
187 hdr->msg_len = buf[idx] & 0x3f;
188 idx++;
189 hdr->somt = (buf[idx] >> 7) & 0x1;
190 hdr->eomt = (buf[idx] >> 6) & 0x1;
191 hdr->seqno = (buf[idx] >> 4) & 0x1;
192 idx++;
193 *hdrlen = idx;
194 return true;
195}
196
197static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
198 struct drm_dp_sideband_msg_tx *raw)
199{
200 int idx = 0;
201 int i;
202 u8 *buf = raw->msg;
203 buf[idx++] = req->req_type & 0x7f;
204
205 switch (req->req_type) {
206 case DP_ENUM_PATH_RESOURCES:
207 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
208 idx++;
209 break;
210 case DP_ALLOCATE_PAYLOAD:
211 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
212 (req->u.allocate_payload.number_sdp_streams & 0xf);
213 idx++;
214 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
215 idx++;
216 buf[idx] = (req->u.allocate_payload.pbn >> 8);
217 idx++;
218 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
219 idx++;
220 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
221 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
222 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
223 idx++;
224 }
225 if (req->u.allocate_payload.number_sdp_streams & 1) {
226 i = req->u.allocate_payload.number_sdp_streams - 1;
227 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
228 idx++;
229 }
230 break;
231 case DP_QUERY_PAYLOAD:
232 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
233 idx++;
234 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
235 idx++;
236 break;
237 case DP_REMOTE_DPCD_READ:
238 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
239 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
240 idx++;
241 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
242 idx++;
243 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
244 idx++;
245 buf[idx] = (req->u.dpcd_read.num_bytes);
246 idx++;
247 break;
248
249 case DP_REMOTE_DPCD_WRITE:
250 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
251 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
252 idx++;
253 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
254 idx++;
255 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
256 idx++;
257 buf[idx] = (req->u.dpcd_write.num_bytes);
258 idx++;
259 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
260 idx += req->u.dpcd_write.num_bytes;
261 break;
262 case DP_REMOTE_I2C_READ:
263 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
264 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
265 idx++;
266 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
267 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
268 idx++;
269 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
270 idx++;
271 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
272 idx += req->u.i2c_read.transactions[i].num_bytes;
273
274 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
275 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
276 idx++;
277 }
278 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
279 idx++;
280 buf[idx] = (req->u.i2c_read.num_bytes_read);
281 idx++;
282 break;
283
284 case DP_REMOTE_I2C_WRITE:
285 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
286 idx++;
287 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
288 idx++;
289 buf[idx] = (req->u.i2c_write.num_bytes);
290 idx++;
291 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
292 idx += req->u.i2c_write.num_bytes;
293 break;
294 }
295 raw->cur_len = idx;
296}
297
298static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
299{
300 u8 crc4;
301 crc4 = drm_dp_msg_data_crc4(msg, len);
302 msg[len] = crc4;
303}
304
305static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
306 struct drm_dp_sideband_msg_tx *raw)
307{
308 int idx = 0;
309 u8 *buf = raw->msg;
310
311 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
312
313 raw->cur_len = idx;
314}
315
316/* this adds a chunk of msg to the builder to get the final msg */
317static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
318 u8 *replybuf, u8 replybuflen, bool hdr)
319{
320 int ret;
321 u8 crc4;
322
323 if (hdr) {
324 u8 hdrlen;
325 struct drm_dp_sideband_msg_hdr recv_hdr;
326 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
327 if (ret == false) {
328 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
329 return false;
330 }
331
332 /* get length contained in this portion */
333 msg->curchunk_len = recv_hdr.msg_len;
334 msg->curchunk_hdrlen = hdrlen;
335
336 /* we have already gotten an somt - don't bother parsing */
337 if (recv_hdr.somt && msg->have_somt)
338 return false;
339
340 if (recv_hdr.somt) {
341 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
342 msg->have_somt = true;
343 }
344 if (recv_hdr.eomt)
345 msg->have_eomt = true;
346
347 /* copy the bytes for the remainder of this header chunk */
348 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
349 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
350 } else {
351 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
352 msg->curchunk_idx += replybuflen;
353 }
354
355 if (msg->curchunk_idx >= msg->curchunk_len) {
356 /* do CRC */
357 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
358 /* copy chunk into bigger msg */
359 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
360 msg->curlen += msg->curchunk_len - 1;
361 }
362 return true;
363}
364
365static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
366 struct drm_dp_sideband_msg_reply_body *repmsg)
367{
368 int idx = 1;
369 int i;
370 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
371 idx += 16;
372 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
373 idx++;
374 if (idx > raw->curlen)
375 goto fail_len;
376 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
377 if (raw->msg[idx] & 0x80)
378 repmsg->u.link_addr.ports[i].input_port = 1;
379
380 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
381 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
382
383 idx++;
384 if (idx > raw->curlen)
385 goto fail_len;
386 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
387 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
388 if (repmsg->u.link_addr.ports[i].input_port == 0)
389 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
390 idx++;
391 if (idx > raw->curlen)
392 goto fail_len;
393 if (repmsg->u.link_addr.ports[i].input_port == 0) {
394 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
395 idx++;
396 if (idx > raw->curlen)
397 goto fail_len;
398 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
399 idx += 16;
400 if (idx > raw->curlen)
401 goto fail_len;
402 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
403 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
404 idx++;
405
406 }
407 if (idx > raw->curlen)
408 goto fail_len;
409 }
410
411 return true;
412fail_len:
413 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
414 return false;
415}
416
417static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
418 struct drm_dp_sideband_msg_reply_body *repmsg)
419{
420 int idx = 1;
421 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
422 idx++;
423 if (idx > raw->curlen)
424 goto fail_len;
425 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
426 if (idx > raw->curlen)
427 goto fail_len;
428
429 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
430 return true;
431fail_len:
432 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
433 return false;
434}
435
436static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
437 struct drm_dp_sideband_msg_reply_body *repmsg)
438{
439 int idx = 1;
440 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
441 idx++;
442 if (idx > raw->curlen)
443 goto fail_len;
444 return true;
445fail_len:
446 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
447 return false;
448}
449
450static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
451 struct drm_dp_sideband_msg_reply_body *repmsg)
452{
453 int idx = 1;
454
455 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
456 idx++;
457 if (idx > raw->curlen)
458 goto fail_len;
459 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
460 idx++;
461 /* TODO check */
462 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
463 return true;
464fail_len:
465 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
466 return false;
467}
468
469static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
470 struct drm_dp_sideband_msg_reply_body *repmsg)
471{
472 int idx = 1;
473 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
474 idx++;
475 if (idx > raw->curlen)
476 goto fail_len;
477 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
478 idx += 2;
479 if (idx > raw->curlen)
480 goto fail_len;
481 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
482 idx += 2;
483 if (idx > raw->curlen)
484 goto fail_len;
485 return true;
486fail_len:
487 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
488 return false;
489}
490
491static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
492 struct drm_dp_sideband_msg_reply_body *repmsg)
493{
494 int idx = 1;
495 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
496 idx++;
497 if (idx > raw->curlen)
498 goto fail_len;
499 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
500 idx++;
501 if (idx > raw->curlen)
502 goto fail_len;
503 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
504 idx += 2;
505 if (idx > raw->curlen)
506 goto fail_len;
507 return true;
508fail_len:
509 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
510 return false;
511}
512
513static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
514 struct drm_dp_sideband_msg_reply_body *repmsg)
515{
516 int idx = 1;
517 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
518 idx++;
519 if (idx > raw->curlen)
520 goto fail_len;
521 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
522 idx += 2;
523 if (idx > raw->curlen)
524 goto fail_len;
525 return true;
526fail_len:
527 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
528 return false;
529}
530
531static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
532 struct drm_dp_sideband_msg_reply_body *msg)
533{
534 memset(msg, 0, sizeof(*msg));
535 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
536 msg->req_type = (raw->msg[0] & 0x7f);
537
538 if (msg->reply_type) {
539 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
540 msg->u.nak.reason = raw->msg[17];
541 msg->u.nak.nak_data = raw->msg[18];
542 return false;
543 }
544
545 switch (msg->req_type) {
546 case DP_LINK_ADDRESS:
547 return drm_dp_sideband_parse_link_address(raw, msg);
548 case DP_QUERY_PAYLOAD:
549 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
550 case DP_REMOTE_DPCD_READ:
551 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
552 case DP_REMOTE_DPCD_WRITE:
553 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
554 case DP_REMOTE_I2C_READ:
555 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
556 case DP_ENUM_PATH_RESOURCES:
557 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
558 case DP_ALLOCATE_PAYLOAD:
559 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
560 default:
561 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
562 return false;
563 }
564}
565
566static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
567 struct drm_dp_sideband_msg_req_body *msg)
568{
569 int idx = 1;
570
571 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
572 idx++;
573 if (idx > raw->curlen)
574 goto fail_len;
575
576 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
577 idx += 16;
578 if (idx > raw->curlen)
579 goto fail_len;
580
581 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
582 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
583 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
584 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
585 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
586 idx++;
587 return true;
588fail_len:
589 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
590 return false;
591}
592
593static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
594 struct drm_dp_sideband_msg_req_body *msg)
595{
596 int idx = 1;
597
598 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
599 idx++;
600 if (idx > raw->curlen)
601 goto fail_len;
602
603 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
604 idx += 16;
605 if (idx > raw->curlen)
606 goto fail_len;
607
608 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
609 idx++;
610 return true;
611fail_len:
612 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
613 return false;
614}
615
616static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
617 struct drm_dp_sideband_msg_req_body *msg)
618{
619 memset(msg, 0, sizeof(*msg));
620 msg->req_type = (raw->msg[0] & 0x7f);
621
622 switch (msg->req_type) {
623 case DP_CONNECTION_STATUS_NOTIFY:
624 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
625 case DP_RESOURCE_STATUS_NOTIFY:
626 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
627 default:
628 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
629 return false;
630 }
631}
632
633static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
634{
635 struct drm_dp_sideband_msg_req_body req;
636
637 req.req_type = DP_REMOTE_DPCD_WRITE;
638 req.u.dpcd_write.port_number = port_num;
639 req.u.dpcd_write.dpcd_address = offset;
640 req.u.dpcd_write.num_bytes = num_bytes;
641 req.u.dpcd_write.bytes = bytes;
642 drm_dp_encode_sideband_req(&req, msg);
643
644 return 0;
645}
646
647static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
648{
649 struct drm_dp_sideband_msg_req_body req;
650
651 req.req_type = DP_LINK_ADDRESS;
652 drm_dp_encode_sideband_req(&req, msg);
653 return 0;
654}
655
656static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
657{
658 struct drm_dp_sideband_msg_req_body req;
659
660 req.req_type = DP_ENUM_PATH_RESOURCES;
661 req.u.port_num.port_number = port_num;
662 drm_dp_encode_sideband_req(&req, msg);
663 msg->path_msg = true;
664 return 0;
665}
666
667static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
668 u8 vcpi, uint16_t pbn)
669{
670 struct drm_dp_sideband_msg_req_body req;
671 memset(&req, 0, sizeof(req));
672 req.req_type = DP_ALLOCATE_PAYLOAD;
673 req.u.allocate_payload.port_number = port_num;
674 req.u.allocate_payload.vcpi = vcpi;
675 req.u.allocate_payload.pbn = pbn;
676 drm_dp_encode_sideband_req(&req, msg);
677 msg->path_msg = true;
678 return 0;
679}
680
681static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
682 struct drm_dp_vcpi *vcpi)
683{
684 int ret;
685
686 mutex_lock(&mgr->payload_lock);
687 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
688 if (ret > mgr->max_payloads) {
689 ret = -EINVAL;
690 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
691 goto out_unlock;
692 }
693
694 set_bit(ret, &mgr->payload_mask);
695 vcpi->vcpi = ret;
696 mgr->proposed_vcpis[ret - 1] = vcpi;
697out_unlock:
698 mutex_unlock(&mgr->payload_lock);
699 return ret;
700}
701
702static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
703 int id)
704{
705 if (id == 0)
706 return;
707
708 mutex_lock(&mgr->payload_lock);
709 DRM_DEBUG_KMS("putting payload %d\n", id);
710 clear_bit(id, &mgr->payload_mask);
711 mgr->proposed_vcpis[id - 1] = NULL;
712 mutex_unlock(&mgr->payload_lock);
713}
714
715static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
716 struct drm_dp_sideband_msg_tx *txmsg)
717{
718 bool ret;
719 mutex_lock(&mgr->qlock);
720 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
721 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
722 mutex_unlock(&mgr->qlock);
723 return ret;
724}
725
726static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
727 struct drm_dp_sideband_msg_tx *txmsg)
728{
729 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
730 int ret;
731
732 ret = wait_event_timeout(mgr->tx_waitq,
733 check_txmsg_state(mgr, txmsg),
734 (4 * HZ));
735 mutex_lock(&mstb->mgr->qlock);
736 if (ret > 0) {
737 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
738 ret = -EIO;
739 goto out;
740 }
741 } else {
742 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
743
744 /* dump some state */
745 ret = -EIO;
746
747 /* remove from q */
748 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
749 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
750 list_del(&txmsg->next);
751 }
752
753 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
754 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
755 mstb->tx_slots[txmsg->seqno] = NULL;
756 }
757 }
758out:
759 mutex_unlock(&mgr->qlock);
760
761 return ret;
762}
763
764static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
765{
766 struct drm_dp_mst_branch *mstb;
767
768 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
769 if (!mstb)
770 return NULL;
771
772 mstb->lct = lct;
773 if (lct > 1)
774 memcpy(mstb->rad, rad, lct / 2);
775 INIT_LIST_HEAD(&mstb->ports);
776 kref_init(&mstb->kref);
777 return mstb;
778}
779
780static void drm_dp_destroy_mst_branch_device(struct kref *kref)
781{
782 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
783 struct drm_dp_mst_port *port, *tmp;
784 bool wake_tx = false;
785
786 cancel_work_sync(&mstb->mgr->work);
787
788 /*
789 * destroy all ports - don't need lock
790 * as there are no more references to the mst branch
791 * device at this point.
792 */
793 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
794 list_del(&port->next);
795 drm_dp_put_port(port);
796 }
797
798 /* drop any tx slots msg */
799 mutex_lock(&mstb->mgr->qlock);
800 if (mstb->tx_slots[0]) {
801 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
802 mstb->tx_slots[0] = NULL;
803 wake_tx = true;
804 }
805 if (mstb->tx_slots[1]) {
806 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
807 mstb->tx_slots[1] = NULL;
808 wake_tx = true;
809 }
810 mutex_unlock(&mstb->mgr->qlock);
811
812 if (wake_tx)
813 wake_up(&mstb->mgr->tx_waitq);
814 kfree(mstb);
815}
816
817static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
818{
819 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
820}
821
822
823static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
824{
825 switch (old_pdt) {
826 case DP_PEER_DEVICE_DP_LEGACY_CONV:
827 case DP_PEER_DEVICE_SST_SINK:
828 /* remove i2c over sideband */
829 drm_dp_mst_unregister_i2c_bus(&port->aux);
830 break;
831 case DP_PEER_DEVICE_MST_BRANCHING:
832 drm_dp_put_mst_branch_device(port->mstb);
833 port->mstb = NULL;
834 break;
835 }
836}
837
838static void drm_dp_destroy_port(struct kref *kref)
839{
840 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
841 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
842 if (!port->input) {
843 port->vcpi.num_slots = 0;
844 if (port->connector)
845 (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
846 drm_dp_port_teardown_pdt(port, port->pdt);
847
848 if (!port->input && port->vcpi.vcpi > 0)
849 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
850 }
851 kfree(port);
852
853 (*mgr->cbs->hotplug)(mgr);
854}
855
856static void drm_dp_put_port(struct drm_dp_mst_port *port)
857{
858 kref_put(&port->kref, drm_dp_destroy_port);
859}
860
861static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
862{
863 struct drm_dp_mst_port *port;
864 struct drm_dp_mst_branch *rmstb;
865 if (to_find == mstb) {
866 kref_get(&mstb->kref);
867 return mstb;
868 }
869 list_for_each_entry(port, &mstb->ports, next) {
870 if (port->mstb) {
871 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
872 if (rmstb)
873 return rmstb;
874 }
875 }
876 return NULL;
877}
878
879static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
880{
881 struct drm_dp_mst_branch *rmstb = NULL;
882 mutex_lock(&mgr->lock);
883 if (mgr->mst_primary)
884 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
885 mutex_unlock(&mgr->lock);
886 return rmstb;
887}
888
889static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
890{
891 struct drm_dp_mst_port *port, *mport;
892
893 list_for_each_entry(port, &mstb->ports, next) {
894 if (port == to_find) {
895 kref_get(&port->kref);
896 return port;
897 }
898 if (port->mstb) {
899 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
900 if (mport)
901 return mport;
902 }
903 }
904 return NULL;
905}
906
907static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
908{
909 struct drm_dp_mst_port *rport = NULL;
910 mutex_lock(&mgr->lock);
911 if (mgr->mst_primary)
912 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
913 mutex_unlock(&mgr->lock);
914 return rport;
915}
916
917static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
918{
919 struct drm_dp_mst_port *port;
920
921 list_for_each_entry(port, &mstb->ports, next) {
922 if (port->port_num == port_num) {
923 kref_get(&port->kref);
924 return port;
925 }
926 }
927
928 return NULL;
929}
930
931/*
932 * calculate a new RAD for this MST branch device
933 * if parent has an LCT of 2 then it has 1 nibble of RAD,
934 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
935 */
936static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
937 u8 *rad)
938{
939 int lct = port->parent->lct;
940 int shift = 4;
941 int idx = lct / 2;
942 if (lct > 1) {
943 memcpy(rad, port->parent->rad, idx);
944 shift = (lct % 2) ? 4 : 0;
945 } else
946 rad[0] = 0;
947
948 rad[idx] |= port->port_num << shift;
949 return lct + 1;
950}
951
952/*
953 * return sends link address for new mstb
954 */
955static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
956{
957 int ret;
958 u8 rad[6], lct;
959 bool send_link = false;
960 switch (port->pdt) {
961 case DP_PEER_DEVICE_DP_LEGACY_CONV:
962 case DP_PEER_DEVICE_SST_SINK:
963 /* add i2c over sideband */
964 ret = drm_dp_mst_register_i2c_bus(&port->aux);
965 break;
966 case DP_PEER_DEVICE_MST_BRANCHING:
967 lct = drm_dp_calculate_rad(port, rad);
968
969 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
970 port->mstb->mgr = port->mgr;
971 port->mstb->port_parent = port;
972
973 send_link = true;
974 break;
975 }
976 return send_link;
977}
978
979static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
980 struct drm_dp_mst_port *port)
981{
982 int ret;
983 if (port->dpcd_rev >= 0x12) {
984 port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
985 if (!port->guid_valid) {
986 ret = drm_dp_send_dpcd_write(mstb->mgr,
987 port,
988 DP_GUID,
989 16, port->guid);
990 port->guid_valid = true;
991 }
992 }
993}
994
995static void build_mst_prop_path(struct drm_dp_mst_port *port,
996 struct drm_dp_mst_branch *mstb,
997 char *proppath)
998{
999 int i;
1000 char temp[8];
1001 snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
1002 for (i = 0; i < (mstb->lct - 1); i++) {
1003 int shift = (i % 2) ? 0 : 4;
1004 int port_num = mstb->rad[i / 2] >> shift;
1005 snprintf(temp, 8, "-%d", port_num);
1006 strncat(proppath, temp, 255);
1007 }
1008 snprintf(temp, 8, "-%d", port->port_num);
1009 strncat(proppath, temp, 255);
1010}
1011
1012static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1013 struct device *dev,
1014 struct drm_dp_link_addr_reply_port *port_msg)
1015{
1016 struct drm_dp_mst_port *port;
1017 bool ret;
1018 bool created = false;
1019 int old_pdt = 0;
1020 int old_ddps = 0;
1021 port = drm_dp_get_port(mstb, port_msg->port_number);
1022 if (!port) {
1023 port = kzalloc(sizeof(*port), GFP_KERNEL);
1024 if (!port)
1025 return;
1026 kref_init(&port->kref);
1027 port->parent = mstb;
1028 port->port_num = port_msg->port_number;
1029 port->mgr = mstb->mgr;
1030 port->aux.name = "DPMST";
1031 port->aux.dev = dev;
1032 created = true;
1033 } else {
1034 old_pdt = port->pdt;
1035 old_ddps = port->ddps;
1036 }
1037
1038 port->pdt = port_msg->peer_device_type;
1039 port->input = port_msg->input_port;
1040 port->mcs = port_msg->mcs;
1041 port->ddps = port_msg->ddps;
1042 port->ldps = port_msg->legacy_device_plug_status;
1043 port->dpcd_rev = port_msg->dpcd_revision;
1044 port->num_sdp_streams = port_msg->num_sdp_streams;
1045 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1046 memcpy(port->guid, port_msg->peer_guid, 16);
1047
1048 /* manage mstb port lists with mgr lock - take a reference
1049 for this list */
1050 if (created) {
1051 mutex_lock(&mstb->mgr->lock);
1052 kref_get(&port->kref);
1053 list_add(&port->next, &mstb->ports);
1054 mutex_unlock(&mstb->mgr->lock);
1055 }
1056
1057 if (old_ddps != port->ddps) {
1058 if (port->ddps) {
1059 drm_dp_check_port_guid(mstb, port);
1060 if (!port->input)
1061 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1062 } else {
1063 port->guid_valid = false;
1064 port->available_pbn = 0;
1065 }
1066 }
1067
1068 if (old_pdt != port->pdt && !port->input) {
1069 drm_dp_port_teardown_pdt(port, old_pdt);
1070
1071 ret = drm_dp_port_setup_pdt(port);
1072 if (ret == true) {
1073 drm_dp_send_link_address(mstb->mgr, port->mstb);
1074 port->mstb->link_address_sent = true;
1075 }
1076 }
1077
1078 if (created && !port->input) {
1079 char proppath[255];
1080 build_mst_prop_path(port, mstb, proppath);
1081 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1082 }
1083
1084 /* put reference to this port */
1085 drm_dp_put_port(port);
1086}
1087
1088static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1089 struct drm_dp_connection_status_notify *conn_stat)
1090{
1091 struct drm_dp_mst_port *port;
1092 int old_pdt;
1093 int old_ddps;
1094 bool dowork = false;
1095 port = drm_dp_get_port(mstb, conn_stat->port_number);
1096 if (!port)
1097 return;
1098
1099 old_ddps = port->ddps;
1100 old_pdt = port->pdt;
1101 port->pdt = conn_stat->peer_device_type;
1102 port->mcs = conn_stat->message_capability_status;
1103 port->ldps = conn_stat->legacy_device_plug_status;
1104 port->ddps = conn_stat->displayport_device_plug_status;
1105
1106 if (old_ddps != port->ddps) {
1107 if (port->ddps) {
1108 drm_dp_check_port_guid(mstb, port);
1109 dowork = true;
1110 } else {
1111 port->guid_valid = false;
1112 port->available_pbn = 0;
1113 }
1114 }
1115 if (old_pdt != port->pdt && !port->input) {
1116 drm_dp_port_teardown_pdt(port, old_pdt);
1117
1118 if (drm_dp_port_setup_pdt(port))
1119 dowork = true;
1120 }
1121
1122 drm_dp_put_port(port);
1123 if (dowork)
1124 queue_work(system_long_wq, &mstb->mgr->work);
1125
1126}
1127
1128static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1129 u8 lct, u8 *rad)
1130{
1131 struct drm_dp_mst_branch *mstb;
1132 struct drm_dp_mst_port *port;
1133 int i;
1134 /* find the port by iterating down */
1135 mstb = mgr->mst_primary;
1136
1137 for (i = 0; i < lct - 1; i++) {
1138 int shift = (i % 2) ? 0 : 4;
1139 int port_num = rad[i / 2] >> shift;
1140
1141 list_for_each_entry(port, &mstb->ports, next) {
1142 if (port->port_num == port_num) {
1143 if (!port->mstb) {
1144 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1145 return NULL;
1146 }
1147
1148 mstb = port->mstb;
1149 break;
1150 }
1151 }
1152 }
1153 kref_get(&mstb->kref);
1154 return mstb;
1155}
1156
1157static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1158 struct drm_dp_mst_branch *mstb)
1159{
1160 struct drm_dp_mst_port *port;
1161
1162 if (!mstb->link_address_sent) {
1163 drm_dp_send_link_address(mgr, mstb);
1164 mstb->link_address_sent = true;
1165 }
1166 list_for_each_entry(port, &mstb->ports, next) {
1167 if (port->input)
1168 continue;
1169
1170 if (!port->ddps)
1171 continue;
1172
1173 if (!port->available_pbn)
1174 drm_dp_send_enum_path_resources(mgr, mstb, port);
1175
1176 if (port->mstb)
1177 drm_dp_check_and_send_link_address(mgr, port->mstb);
1178 }
1179}
1180
1181static void drm_dp_mst_link_probe_work(struct work_struct *work)
1182{
1183 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1184
1185 drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
1186
1187}
1188
1189static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1190 u8 *guid)
1191{
1192 static u8 zero_guid[16];
1193
1194 if (!memcmp(guid, zero_guid, 16)) {
1195 u64 salt = get_jiffies_64();
1196 memcpy(&guid[0], &salt, sizeof(u64));
1197 memcpy(&guid[8], &salt, sizeof(u64));
1198 return false;
1199 }
1200 return true;
1201}
1202
1203#if 0
1204static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1205{
1206 struct drm_dp_sideband_msg_req_body req;
1207
1208 req.req_type = DP_REMOTE_DPCD_READ;
1209 req.u.dpcd_read.port_number = port_num;
1210 req.u.dpcd_read.dpcd_address = offset;
1211 req.u.dpcd_read.num_bytes = num_bytes;
1212 drm_dp_encode_sideband_req(&req, msg);
1213
1214 return 0;
1215}
1216#endif
1217
1218static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1219 bool up, u8 *msg, int len)
1220{
1221 int ret;
1222 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1223 int tosend, total, offset;
1224 int retries = 0;
1225
1226retry:
1227 total = len;
1228 offset = 0;
1229 do {
1230 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1231
1232 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1233 &msg[offset],
1234 tosend);
1235 if (ret != tosend) {
1236 if (ret == -EIO && retries < 5) {
1237 retries++;
1238 goto retry;
1239 }
1240 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1241 WARN(1, "fail\n");
1242
1243 return -EIO;
1244 }
1245 offset += tosend;
1246 total -= tosend;
1247 } while (total > 0);
1248 return 0;
1249}
1250
1251static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1252 struct drm_dp_sideband_msg_tx *txmsg)
1253{
1254 struct drm_dp_mst_branch *mstb = txmsg->dst;
1255
1256 /* both msg slots are full */
1257 if (txmsg->seqno == -1) {
1258 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1259 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1260 return -EAGAIN;
1261 }
1262 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1263 txmsg->seqno = mstb->last_seqno;
1264 mstb->last_seqno ^= 1;
1265 } else if (mstb->tx_slots[0] == NULL)
1266 txmsg->seqno = 0;
1267 else
1268 txmsg->seqno = 1;
1269 mstb->tx_slots[txmsg->seqno] = txmsg;
1270 }
1271 hdr->broadcast = 0;
1272 hdr->path_msg = txmsg->path_msg;
1273 hdr->lct = mstb->lct;
1274 hdr->lcr = mstb->lct - 1;
1275 if (mstb->lct > 1)
1276 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1277 hdr->seqno = txmsg->seqno;
1278 return 0;
1279}
1280/*
1281 * process a single block of the next message in the sideband queue
1282 */
1283static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1284 struct drm_dp_sideband_msg_tx *txmsg,
1285 bool up)
1286{
1287 u8 chunk[48];
1288 struct drm_dp_sideband_msg_hdr hdr;
1289 int len, space, idx, tosend;
1290 int ret;
1291
1292 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1293 txmsg->seqno = -1;
1294 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1295 }
1296
1297 /* make hdr from dst mst - for replies use seqno
1298 otherwise assign one */
1299 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1300 if (ret < 0)
1301 return ret;
1302
1303 /* amount left to send in this message */
1304 len = txmsg->cur_len - txmsg->cur_offset;
1305
1306 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1307 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1308
1309 tosend = min(len, space);
1310 if (len == txmsg->cur_len)
1311 hdr.somt = 1;
1312 if (space >= len)
1313 hdr.eomt = 1;
1314
1315
1316 hdr.msg_len = tosend + 1;
1317 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1318 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1319 /* add crc at end */
1320 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1321 idx += tosend + 1;
1322
1323 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1324 if (ret) {
1325 DRM_DEBUG_KMS("sideband msg failed to send\n");
1326 return ret;
1327 }
1328
1329 txmsg->cur_offset += tosend;
1330 if (txmsg->cur_offset == txmsg->cur_len) {
1331 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1332 return 1;
1333 }
1334 return 0;
1335}
1336
1337/* must be called holding qlock */
1338static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1339{
1340 struct drm_dp_sideband_msg_tx *txmsg;
1341 int ret;
1342
1343 /* construct a chunk from the first msg in the tx_msg queue */
1344 if (list_empty(&mgr->tx_msg_downq)) {
1345 mgr->tx_down_in_progress = false;
1346 return;
1347 }
1348 mgr->tx_down_in_progress = true;
1349
1350 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1351 ret = process_single_tx_qlock(mgr, txmsg, false);
1352 if (ret == 1) {
1353 /* txmsg is sent it should be in the slots now */
1354 list_del(&txmsg->next);
1355 } else if (ret) {
1356 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1357 list_del(&txmsg->next);
1358 if (txmsg->seqno != -1)
1359 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1360 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1361 wake_up(&mgr->tx_waitq);
1362 }
1363 if (list_empty(&mgr->tx_msg_downq)) {
1364 mgr->tx_down_in_progress = false;
1365 return;
1366 }
1367}
1368
1369/* called holding qlock */
1370static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1371{
1372 struct drm_dp_sideband_msg_tx *txmsg;
1373 int ret;
1374
1375 /* construct a chunk from the first msg in the tx_msg queue */
1376 if (list_empty(&mgr->tx_msg_upq)) {
1377 mgr->tx_up_in_progress = false;
1378 return;
1379 }
1380
1381 txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1382 ret = process_single_tx_qlock(mgr, txmsg, true);
1383 if (ret == 1) {
1384 /* up txmsgs aren't put in slots - so free after we send it */
1385 list_del(&txmsg->next);
1386 kfree(txmsg);
1387 } else if (ret)
1388 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1389 mgr->tx_up_in_progress = true;
1390}
1391
1392static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1393 struct drm_dp_sideband_msg_tx *txmsg)
1394{
1395 mutex_lock(&mgr->qlock);
1396 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1397 if (!mgr->tx_down_in_progress)
1398 process_single_down_tx_qlock(mgr);
1399 mutex_unlock(&mgr->qlock);
1400}
1401
1402static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1403 struct drm_dp_mst_branch *mstb)
1404{
1405 int len;
1406 struct drm_dp_sideband_msg_tx *txmsg;
1407 int ret;
1408
1409 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1410 if (!txmsg)
1411 return -ENOMEM;
1412
1413 txmsg->dst = mstb;
1414 len = build_link_address(txmsg);
1415
1416 drm_dp_queue_down_tx(mgr, txmsg);
1417
1418 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1419 if (ret > 0) {
1420 int i;
1421
1422 if (txmsg->reply.reply_type == 1)
1423 DRM_DEBUG_KMS("link address nak received\n");
1424 else {
1425 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1426 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1427 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1428 txmsg->reply.u.link_addr.ports[i].input_port,
1429 txmsg->reply.u.link_addr.ports[i].peer_device_type,
1430 txmsg->reply.u.link_addr.ports[i].port_number,
1431 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1432 txmsg->reply.u.link_addr.ports[i].mcs,
1433 txmsg->reply.u.link_addr.ports[i].ddps,
1434 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1435 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1436 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1437 }
1438 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1439 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1440 }
1441 (*mgr->cbs->hotplug)(mgr);
1442 }
1443 } else
1444 DRM_DEBUG_KMS("link address failed %d\n", ret);
1445
1446 kfree(txmsg);
1447 return 0;
1448}
1449
1450static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1451 struct drm_dp_mst_branch *mstb,
1452 struct drm_dp_mst_port *port)
1453{
1454 int len;
1455 struct drm_dp_sideband_msg_tx *txmsg;
1456 int ret;
1457
1458 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1459 if (!txmsg)
1460 return -ENOMEM;
1461
1462 txmsg->dst = mstb;
1463 len = build_enum_path_resources(txmsg, port->port_num);
1464
1465 drm_dp_queue_down_tx(mgr, txmsg);
1466
1467 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1468 if (ret > 0) {
1469 if (txmsg->reply.reply_type == 1)
1470 DRM_DEBUG_KMS("enum path resources nak received\n");
1471 else {
1472 if (port->port_num != txmsg->reply.u.path_resources.port_number)
1473 DRM_ERROR("got incorrect port in response\n");
1474 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1475 txmsg->reply.u.path_resources.avail_payload_bw_number);
1476 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1477 }
1478 }
1479
1480 kfree(txmsg);
1481 return 0;
1482}
1483
1484int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1485 struct drm_dp_mst_port *port,
1486 int id,
1487 int pbn)
1488{
1489 struct drm_dp_sideband_msg_tx *txmsg;
1490 struct drm_dp_mst_branch *mstb;
1491 int len, ret;
1492
1493 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1494 if (!mstb)
1495 return -EINVAL;
1496
1497 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1498 if (!txmsg) {
1499 ret = -ENOMEM;
1500 goto fail_put;
1501 }
1502
1503 txmsg->dst = mstb;
1504 len = build_allocate_payload(txmsg, port->port_num,
1505 id,
1506 pbn);
1507
1508 drm_dp_queue_down_tx(mgr, txmsg);
1509
1510 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1511 if (ret > 0) {
1512 if (txmsg->reply.reply_type == 1) {
1513 ret = -EINVAL;
1514 } else
1515 ret = 0;
1516 }
1517 kfree(txmsg);
1518fail_put:
1519 drm_dp_put_mst_branch_device(mstb);
1520 return ret;
1521}
1522
1523static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1524 int id,
1525 struct drm_dp_payload *payload)
1526{
1527 int ret;
1528
1529 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1530 if (ret < 0) {
1531 payload->payload_state = 0;
1532 return ret;
1533 }
1534 payload->payload_state = DP_PAYLOAD_LOCAL;
1535 return 0;
1536}
1537
1538int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1539 struct drm_dp_mst_port *port,
1540 int id,
1541 struct drm_dp_payload *payload)
1542{
1543 int ret;
1544 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1545 if (ret < 0)
1546 return ret;
1547 payload->payload_state = DP_PAYLOAD_REMOTE;
1548 return ret;
1549}
1550
1551int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1552 struct drm_dp_mst_port *port,
1553 int id,
1554 struct drm_dp_payload *payload)
1555{
1556 DRM_DEBUG_KMS("\n");
1557 /* its okay for these to fail */
1558 if (port) {
1559 drm_dp_payload_send_msg(mgr, port, id, 0);
1560 }
1561
1562 drm_dp_dpcd_write_payload(mgr, id, payload);
1563 payload->payload_state = 0;
1564 return 0;
1565}
1566
1567int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1568 int id,
1569 struct drm_dp_payload *payload)
1570{
1571 payload->payload_state = 0;
1572 return 0;
1573}
1574
1575/**
1576 * drm_dp_update_payload_part1() - Execute payload update part 1
1577 * @mgr: manager to use.
1578 *
1579 * This iterates over all proposed virtual channels, and tries to
1580 * allocate space in the link for them. For 0->slots transitions,
1581 * this step just writes the VCPI to the MST device. For slots->0
1582 * transitions, this writes the updated VCPIs and removes the
1583 * remote VC payloads.
1584 *
1585 * after calling this the driver should generate ACT and payload
1586 * packets.
1587 */
1588int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1589{
1590 int i;
1591 int cur_slots = 1;
1592 struct drm_dp_payload req_payload;
1593 struct drm_dp_mst_port *port;
1594
1595 mutex_lock(&mgr->payload_lock);
1596 for (i = 0; i < mgr->max_payloads; i++) {
1597 /* solve the current payloads - compare to the hw ones
1598 - update the hw view */
1599 req_payload.start_slot = cur_slots;
1600 if (mgr->proposed_vcpis[i]) {
1601 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1602 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1603 } else {
1604 port = NULL;
1605 req_payload.num_slots = 0;
1606 }
1607 /* work out what is required to happen with this payload */
1608 if (mgr->payloads[i].start_slot != req_payload.start_slot ||
1609 mgr->payloads[i].num_slots != req_payload.num_slots) {
1610
1611 /* need to push an update for this payload */
1612 if (req_payload.num_slots) {
1613 drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
1614 mgr->payloads[i].num_slots = req_payload.num_slots;
1615 } else if (mgr->payloads[i].num_slots) {
1616 mgr->payloads[i].num_slots = 0;
1617 drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
1618 req_payload.payload_state = mgr->payloads[i].payload_state;
1619 } else
1620 req_payload.payload_state = 0;
1621
1622 mgr->payloads[i].start_slot = req_payload.start_slot;
1623 mgr->payloads[i].payload_state = req_payload.payload_state;
1624 }
1625 cur_slots += req_payload.num_slots;
1626 }
1627 mutex_unlock(&mgr->payload_lock);
1628
1629 return 0;
1630}
1631EXPORT_SYMBOL(drm_dp_update_payload_part1);
1632
1633/**
1634 * drm_dp_update_payload_part2() - Execute payload update part 2
1635 * @mgr: manager to use.
1636 *
1637 * This iterates over all proposed virtual channels, and tries to
1638 * allocate space in the link for them. For 0->slots transitions,
1639 * this step writes the remote VC payload commands. For slots->0
1640 * this just resets some internal state.
1641 */
1642int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1643{
1644 struct drm_dp_mst_port *port;
1645 int i;
1646 int ret;
1647 mutex_lock(&mgr->payload_lock);
1648 for (i = 0; i < mgr->max_payloads; i++) {
1649
1650 if (!mgr->proposed_vcpis[i])
1651 continue;
1652
1653 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1654
1655 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1656 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1657 ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
1658 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1659 ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
1660 }
1661 if (ret) {
1662 mutex_unlock(&mgr->payload_lock);
1663 return ret;
1664 }
1665 }
1666 mutex_unlock(&mgr->payload_lock);
1667 return 0;
1668}
1669EXPORT_SYMBOL(drm_dp_update_payload_part2);
1670
1671#if 0 /* unused as of yet */
1672static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1673 struct drm_dp_mst_port *port,
1674 int offset, int size)
1675{
1676 int len;
1677 struct drm_dp_sideband_msg_tx *txmsg;
1678
1679 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1680 if (!txmsg)
1681 return -ENOMEM;
1682
1683 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1684 txmsg->dst = port->parent;
1685
1686 drm_dp_queue_down_tx(mgr, txmsg);
1687
1688 return 0;
1689}
1690#endif
1691
1692static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1693 struct drm_dp_mst_port *port,
1694 int offset, int size, u8 *bytes)
1695{
1696 int len;
1697 int ret;
1698 struct drm_dp_sideband_msg_tx *txmsg;
1699 struct drm_dp_mst_branch *mstb;
1700
1701 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1702 if (!mstb)
1703 return -EINVAL;
1704
1705 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1706 if (!txmsg) {
1707 ret = -ENOMEM;
1708 goto fail_put;
1709 }
1710
1711 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1712 txmsg->dst = mstb;
1713
1714 drm_dp_queue_down_tx(mgr, txmsg);
1715
1716 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1717 if (ret > 0) {
1718 if (txmsg->reply.reply_type == 1) {
1719 ret = -EINVAL;
1720 } else
1721 ret = 0;
1722 }
1723 kfree(txmsg);
1724fail_put:
1725 drm_dp_put_mst_branch_device(mstb);
1726 return ret;
1727}
1728
1729static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1730{
1731 struct drm_dp_sideband_msg_reply_body reply;
1732
1733 reply.reply_type = 1;
1734 reply.req_type = req_type;
1735 drm_dp_encode_sideband_reply(&reply, msg);
1736 return 0;
1737}
1738
1739static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1740 struct drm_dp_mst_branch *mstb,
1741 int req_type, int seqno, bool broadcast)
1742{
1743 struct drm_dp_sideband_msg_tx *txmsg;
1744
1745 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1746 if (!txmsg)
1747 return -ENOMEM;
1748
1749 txmsg->dst = mstb;
1750 txmsg->seqno = seqno;
1751 drm_dp_encode_up_ack_reply(txmsg, req_type);
1752
1753 mutex_lock(&mgr->qlock);
1754 list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
1755 if (!mgr->tx_up_in_progress) {
1756 process_single_up_tx_qlock(mgr);
1757 }
1758 mutex_unlock(&mgr->qlock);
1759 return 0;
1760}
1761
1762static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
1763{
1764 switch (dp_link_bw) {
1765 case DP_LINK_BW_1_62:
1766 return 3 * dp_link_count;
1767 case DP_LINK_BW_2_7:
1768 return 5 * dp_link_count;
1769 case DP_LINK_BW_5_4:
1770 return 10 * dp_link_count;
1771 }
1772 return 0;
1773}
1774
1775/**
1776 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1777 * @mgr: manager to set state for
1778 * @mst_state: true to enable MST on this connector - false to disable.
1779 *
1780 * This is called by the driver when it detects an MST capable device plugged
1781 * into a DP MST capable port, or when a DP MST capable device is unplugged.
1782 */
1783int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
1784{
1785 int ret = 0;
1786 struct drm_dp_mst_branch *mstb = NULL;
1787
1788 mutex_lock(&mgr->lock);
1789 if (mst_state == mgr->mst_state)
1790 goto out_unlock;
1791
1792 mgr->mst_state = mst_state;
1793 /* set the device into MST mode */
1794 if (mst_state) {
1795 WARN_ON(mgr->mst_primary);
1796
1797 /* get dpcd info */
1798 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1799 if (ret != DP_RECEIVER_CAP_SIZE) {
1800 DRM_DEBUG_KMS("failed to read DPCD\n");
1801 goto out_unlock;
1802 }
1803
1804 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
1805 mgr->total_pbn = 2560;
1806 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
1807 mgr->avail_slots = mgr->total_slots;
1808
1809 /* add initial branch device at LCT 1 */
1810 mstb = drm_dp_add_mst_branch_device(1, NULL);
1811 if (mstb == NULL) {
1812 ret = -ENOMEM;
1813 goto out_unlock;
1814 }
1815 mstb->mgr = mgr;
1816
1817 /* give this the main reference */
1818 mgr->mst_primary = mstb;
1819 kref_get(&mgr->mst_primary->kref);
1820
1821 {
1822 struct drm_dp_payload reset_pay;
1823 reset_pay.start_slot = 0;
1824 reset_pay.num_slots = 0x3f;
1825 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
1826 }
1827
1828 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1829 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1830 if (ret < 0) {
1831 goto out_unlock;
1832 }
1833
1834
1835 /* sort out guid */
1836 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
1837 if (ret != 16) {
1838 DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
1839 goto out_unlock;
1840 }
1841
1842 mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
1843 if (!mgr->guid_valid) {
1844 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
1845 mgr->guid_valid = true;
1846 }
1847
1848 queue_work(system_long_wq, &mgr->work);
1849
1850 ret = 0;
1851 } else {
1852 /* disable MST on the device */
1853 mstb = mgr->mst_primary;
1854 mgr->mst_primary = NULL;
1855 /* this can fail if the device is gone */
1856 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
1857 ret = 0;
1858 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
1859 mgr->payload_mask = 0;
1860 set_bit(0, &mgr->payload_mask);
1861 }
1862
1863out_unlock:
1864 mutex_unlock(&mgr->lock);
1865 if (mstb)
1866 drm_dp_put_mst_branch_device(mstb);
1867 return ret;
1868
1869}
1870EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
1871
1872/**
1873 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
1874 * @mgr: manager to suspend
1875 *
1876 * This function tells the MST device that we can't handle UP messages
1877 * anymore. This should stop it from sending any since we are suspended.
1878 */
1879void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1880{
1881 mutex_lock(&mgr->lock);
1882 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1883 DP_MST_EN | DP_UPSTREAM_IS_SRC);
1884 mutex_unlock(&mgr->lock);
1885}
1886EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
1887
1888/**
1889 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
1890 * @mgr: manager to resume
1891 *
1892 * This will fetch DPCD and see if the device is still there,
1893 * if it is, it will rewrite the MSTM control bits, and return.
1894 *
1895 * if the device fails this returns -1, and the driver should do
1896 * a full MST reprobe, in case we were undocked.
1897 */
1898int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
1899{
1900 int ret = 0;
1901
1902 mutex_lock(&mgr->lock);
1903
1904 if (mgr->mst_primary) {
1905 int sret;
1906 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1907 if (sret != DP_RECEIVER_CAP_SIZE) {
1908 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1909 ret = -1;
1910 goto out_unlock;
1911 }
1912
1913 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1914 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1915 if (ret < 0) {
1916 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
1917 ret = -1;
1918 goto out_unlock;
1919 }
1920 ret = 0;
1921 } else
1922 ret = -1;
1923
1924out_unlock:
1925 mutex_unlock(&mgr->lock);
1926 return ret;
1927}
1928EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
1929
1930static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
1931{
1932 int len;
1933 u8 replyblock[32];
1934 int replylen, origlen, curreply;
1935 int ret;
1936 struct drm_dp_sideband_msg_rx *msg;
1937 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
1938 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
1939
1940 len = min(mgr->max_dpcd_transaction_bytes, 16);
1941 ret = drm_dp_dpcd_read(mgr->aux, basereg,
1942 replyblock, len);
1943 if (ret != len) {
1944 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
1945 return;
1946 }
1947 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
1948 if (!ret) {
1949 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
1950 return;
1951 }
1952 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
1953
1954 origlen = replylen;
1955 replylen -= len;
1956 curreply = len;
1957 while (replylen > 0) {
1958 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
1959 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
1960 replyblock, len);
1961 if (ret != len) {
1962 DRM_DEBUG_KMS("failed to read a chunk\n");
1963 }
1964 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
1965 if (ret == false)
1966 DRM_DEBUG_KMS("failed to build sideband msg\n");
1967 curreply += len;
1968 replylen -= len;
1969 }
1970}
1971
1972static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
1973{
1974 int ret = 0;
1975
1976 drm_dp_get_one_sb_msg(mgr, false);
1977
1978 if (mgr->down_rep_recv.have_eomt) {
1979 struct drm_dp_sideband_msg_tx *txmsg;
1980 struct drm_dp_mst_branch *mstb;
1981 int slot = -1;
1982 mstb = drm_dp_get_mst_branch_device(mgr,
1983 mgr->down_rep_recv.initial_hdr.lct,
1984 mgr->down_rep_recv.initial_hdr.rad);
1985
1986 if (!mstb) {
1987 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
1988 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
1989 return 0;
1990 }
1991
1992 /* find the message */
1993 slot = mgr->down_rep_recv.initial_hdr.seqno;
1994 mutex_lock(&mgr->qlock);
1995 txmsg = mstb->tx_slots[slot];
1996 /* remove from slots */
1997 mutex_unlock(&mgr->qlock);
1998
1999 if (!txmsg) {
2000 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2001 mstb,
2002 mgr->down_rep_recv.initial_hdr.seqno,
2003 mgr->down_rep_recv.initial_hdr.lct,
2004 mgr->down_rep_recv.initial_hdr.rad[0],
2005 mgr->down_rep_recv.msg[0]);
2006 drm_dp_put_mst_branch_device(mstb);
2007 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2008 return 0;
2009 }
2010
2011 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2012 if (txmsg->reply.reply_type == 1) {
2013 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2014 }
2015
2016 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2017 drm_dp_put_mst_branch_device(mstb);
2018
2019 mutex_lock(&mgr->qlock);
2020 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2021 mstb->tx_slots[slot] = NULL;
2022 mutex_unlock(&mgr->qlock);
2023
2024 wake_up(&mgr->tx_waitq);
2025 }
2026 return ret;
2027}
2028
2029static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2030{
2031 int ret = 0;
2032 drm_dp_get_one_sb_msg(mgr, true);
2033
2034 if (mgr->up_req_recv.have_eomt) {
2035 struct drm_dp_sideband_msg_req_body msg;
2036 struct drm_dp_mst_branch *mstb;
2037 bool seqno;
2038 mstb = drm_dp_get_mst_branch_device(mgr,
2039 mgr->up_req_recv.initial_hdr.lct,
2040 mgr->up_req_recv.initial_hdr.rad);
2041 if (!mstb) {
2042 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2043 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2044 return 0;
2045 }
2046
2047 seqno = mgr->up_req_recv.initial_hdr.seqno;
2048 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2049
2050 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2051 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2052 drm_dp_update_port(mstb, &msg.u.conn_stat);
2053 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2054 (*mgr->cbs->hotplug)(mgr);
2055
2056 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2057 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2058 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2059 }
2060
2061 drm_dp_put_mst_branch_device(mstb);
2062 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2063 }
2064 return ret;
2065}
2066
2067/**
2068 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2069 * @mgr: manager to notify irq for.
2070 * @esi: 4 bytes from SINK_COUNT_ESI
2071 *
2072 * This should be called from the driver when it detects a short IRQ,
2073 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2074 * topology manager will process the sideband messages received as a result
2075 * of this.
2076 */
2077int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2078{
2079 int ret = 0;
2080 int sc;
2081 *handled = false;
2082 sc = esi[0] & 0x3f;
2083
2084 if (sc != mgr->sink_count) {
2085 mgr->sink_count = sc;
2086 *handled = true;
2087 }
2088
2089 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2090 ret = drm_dp_mst_handle_down_rep(mgr);
2091 *handled = true;
2092 }
2093
2094 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2095 ret |= drm_dp_mst_handle_up_req(mgr);
2096 *handled = true;
2097 }
2098
2099 drm_dp_mst_kick_tx(mgr);
2100 return ret;
2101}
2102EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2103
2104/**
2105 * drm_dp_mst_detect_port() - get connection status for an MST port
2106 * @mgr: manager for this port
2107 * @port: unverified pointer to a port
2108 *
2109 * This returns the current connection state for a port. It validates the
2110 * port pointer still exists so the caller doesn't require a reference
2111 */
2112enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2113{
2114 enum drm_connector_status status = connector_status_disconnected;
2115
2116 /* we need to search for the port in the mgr in case its gone */
2117 port = drm_dp_get_validated_port_ref(mgr, port);
2118 if (!port)
2119 return connector_status_disconnected;
2120
2121 if (!port->ddps)
2122 goto out;
2123
2124 switch (port->pdt) {
2125 case DP_PEER_DEVICE_NONE:
2126 case DP_PEER_DEVICE_MST_BRANCHING:
2127 break;
2128
2129 case DP_PEER_DEVICE_SST_SINK:
2130 status = connector_status_connected;
2131 break;
2132 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2133 if (port->ldps)
2134 status = connector_status_connected;
2135 break;
2136 }
2137out:
2138 drm_dp_put_port(port);
2139 return status;
2140}
2141EXPORT_SYMBOL(drm_dp_mst_detect_port);
2142
2143/**
2144 * drm_dp_mst_get_edid() - get EDID for an MST port
2145 * @connector: toplevel connector to get EDID for
2146 * @mgr: manager for this port
2147 * @port: unverified pointer to a port.
2148 *
2149 * This returns an EDID for the port connected to a connector,
2150 * It validates the pointer still exists so the caller doesn't require a
2151 * reference.
2152 */
2153struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2154{
2155 struct edid *edid = NULL;
2156
2157 /* we need to search for the port in the mgr in case its gone */
2158 port = drm_dp_get_validated_port_ref(mgr, port);
2159 if (!port)
2160 return NULL;
2161
2162 edid = drm_get_edid(connector, &port->aux.ddc);
2163 drm_dp_put_port(port);
2164 return edid;
2165}
2166EXPORT_SYMBOL(drm_dp_mst_get_edid);
2167
2168/**
2169 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2170 * @mgr: manager to use
2171 * @pbn: payload bandwidth to convert into slots.
2172 */
2173int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2174 int pbn)
2175{
2176 int num_slots;
2177
2178 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2179
2180 if (num_slots > mgr->avail_slots)
2181 return -ENOSPC;
2182 return num_slots;
2183}
2184EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2185
2186static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2187 struct drm_dp_vcpi *vcpi, int pbn)
2188{
2189 int num_slots;
2190 int ret;
2191
2192 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2193
2194 if (num_slots > mgr->avail_slots)
2195 return -ENOSPC;
2196
2197 vcpi->pbn = pbn;
2198 vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2199 vcpi->num_slots = num_slots;
2200
2201 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2202 if (ret < 0)
2203 return ret;
2204 return 0;
2205}
2206
2207/**
2208 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2209 * @mgr: manager for this port
2210 * @port: port to allocate a virtual channel for.
2211 * @pbn: payload bandwidth number to request
2212 * @slots: returned number of slots for this PBN.
2213 */
2214bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2215{
2216 int ret;
2217
2218 port = drm_dp_get_validated_port_ref(mgr, port);
2219 if (!port)
2220 return false;
2221
2222 if (port->vcpi.vcpi > 0) {
2223 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2224 if (pbn == port->vcpi.pbn) {
2225 *slots = port->vcpi.num_slots;
2226 return true;
2227 }
2228 }
2229
2230 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2231 if (ret) {
2232 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2233 goto out;
2234 }
2235 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2236 *slots = port->vcpi.num_slots;
2237
2238 drm_dp_put_port(port);
2239 return true;
2240out:
2241 return false;
2242}
2243EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2244
2245/**
2246 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2247 * @mgr: manager for this port
2248 * @port: unverified pointer to a port.
2249 *
2250 * This just resets the number of slots for the ports VCPI for later programming.
2251 */
2252void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2253{
2254 port = drm_dp_get_validated_port_ref(mgr, port);
2255 if (!port)
2256 return;
2257 port->vcpi.num_slots = 0;
2258 drm_dp_put_port(port);
2259}
2260EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2261
2262/**
2263 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2264 * @mgr: manager for this port
2265 * @port: unverified port to deallocate vcpi for
2266 */
2267void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2268{
2269 port = drm_dp_get_validated_port_ref(mgr, port);
2270 if (!port)
2271 return;
2272
2273 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2274 port->vcpi.num_slots = 0;
2275 port->vcpi.pbn = 0;
2276 port->vcpi.aligned_pbn = 0;
2277 port->vcpi.vcpi = 0;
2278 drm_dp_put_port(port);
2279}
2280EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2281
2282static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2283 int id, struct drm_dp_payload *payload)
2284{
2285 u8 payload_alloc[3], status;
2286 int ret;
2287 int retries = 0;
2288
2289 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2290 DP_PAYLOAD_TABLE_UPDATED);
2291
2292 payload_alloc[0] = id;
2293 payload_alloc[1] = payload->start_slot;
2294 payload_alloc[2] = payload->num_slots;
2295
2296 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2297 if (ret != 3) {
2298 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2299 goto fail;
2300 }
2301
2302retry:
2303 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2304 if (ret < 0) {
2305 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2306 goto fail;
2307 }
2308
2309 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2310 retries++;
2311 if (retries < 20) {
2312 usleep_range(10000, 20000);
2313 goto retry;
2314 }
2315 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2316 ret = -EINVAL;
2317 goto fail;
2318 }
2319 ret = 0;
2320fail:
2321 return ret;
2322}
2323
2324
2325/**
2326 * drm_dp_check_act_status() - Check ACT handled status.
2327 * @mgr: manager to use
2328 *
2329 * Check the payload status bits in the DPCD for ACT handled completion.
2330 */
2331int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2332{
2333 u8 status;
2334 int ret;
2335 int count = 0;
2336
2337 do {
2338 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2339
2340 if (ret < 0) {
2341 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2342 goto fail;
2343 }
2344
2345 if (status & DP_PAYLOAD_ACT_HANDLED)
2346 break;
2347 count++;
2348 udelay(100);
2349
2350 } while (count < 30);
2351
2352 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2353 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2354 ret = -EINVAL;
2355 goto fail;
2356 }
2357 return 0;
2358fail:
2359 return ret;
2360}
2361EXPORT_SYMBOL(drm_dp_check_act_status);
2362
2363/**
2364 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2365 * @clock: dot clock for the mode
2366 * @bpp: bpp for the mode.
2367 *
2368 * This uses the formula in the spec to calculate the PBN value for a mode.
2369 */
2370int drm_dp_calc_pbn_mode(int clock, int bpp)
2371{
2372 fixed20_12 pix_bw;
2373 fixed20_12 fbpp;
2374 fixed20_12 result;
2375 fixed20_12 margin, tmp;
2376 u32 res;
2377
2378 pix_bw.full = dfixed_const(clock);
2379 fbpp.full = dfixed_const(bpp);
2380 tmp.full = dfixed_const(8);
2381 fbpp.full = dfixed_div(fbpp, tmp);
2382
2383 result.full = dfixed_mul(pix_bw, fbpp);
2384 margin.full = dfixed_const(54);
2385 tmp.full = dfixed_const(64);
2386 margin.full = dfixed_div(margin, tmp);
2387 result.full = dfixed_div(result, margin);
2388
2389 margin.full = dfixed_const(1006);
2390 tmp.full = dfixed_const(1000);
2391 margin.full = dfixed_div(margin, tmp);
2392 result.full = dfixed_mul(result, margin);
2393
2394 result.full = dfixed_div(result, tmp);
2395 result.full = dfixed_ceil(result);
2396 res = dfixed_trunc(result);
2397 return res;
2398}
2399EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2400
2401static int test_calc_pbn_mode(void)
2402{
2403 int ret;
2404 ret = drm_dp_calc_pbn_mode(154000, 30);
2405 if (ret != 689)
2406 return -EINVAL;
2407 ret = drm_dp_calc_pbn_mode(234000, 30);
2408 if (ret != 1047)
2409 return -EINVAL;
2410 return 0;
2411}
2412
2413/* we want to kick the TX after we've ack the up/down IRQs. */
2414static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2415{
2416 queue_work(system_long_wq, &mgr->tx_work);
2417}
2418
2419static void drm_dp_mst_dump_mstb(struct seq_file *m,
2420 struct drm_dp_mst_branch *mstb)
2421{
2422 struct drm_dp_mst_port *port;
2423 int tabs = mstb->lct;
2424 char prefix[10];
2425 int i;
2426
2427 for (i = 0; i < tabs; i++)
2428 prefix[i] = '\t';
2429 prefix[i] = '\0';
2430
2431 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2432 list_for_each_entry(port, &mstb->ports, next) {
2433 seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2434 if (port->mstb)
2435 drm_dp_mst_dump_mstb(m, port->mstb);
2436 }
2437}
2438
2439static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2440 char *buf)
2441{
2442 int ret;
2443 int i;
2444 for (i = 0; i < 4; i++) {
2445 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2446 if (ret != 16)
2447 break;
2448 }
2449 if (i == 4)
2450 return true;
2451 return false;
2452}
2453
2454/**
2455 * drm_dp_mst_dump_topology(): dump topology to seq file.
2456 * @m: seq_file to dump output to
2457 * @mgr: manager to dump current topology for.
2458 *
2459 * helper to dump MST topology to a seq file for debugfs.
2460 */
2461void drm_dp_mst_dump_topology(struct seq_file *m,
2462 struct drm_dp_mst_topology_mgr *mgr)
2463{
2464 int i;
2465 struct drm_dp_mst_port *port;
2466 mutex_lock(&mgr->lock);
2467 if (mgr->mst_primary)
2468 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2469
2470 /* dump VCPIs */
2471 mutex_unlock(&mgr->lock);
2472
2473 mutex_lock(&mgr->payload_lock);
2474 seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
2475
2476 for (i = 0; i < mgr->max_payloads; i++) {
2477 if (mgr->proposed_vcpis[i]) {
2478 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2479 seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
2480 } else
2481 seq_printf(m, "vcpi %d:unsed\n", i);
2482 }
2483 for (i = 0; i < mgr->max_payloads; i++) {
2484 seq_printf(m, "payload %d: %d, %d, %d\n",
2485 i,
2486 mgr->payloads[i].payload_state,
2487 mgr->payloads[i].start_slot,
2488 mgr->payloads[i].num_slots);
2489
2490
2491 }
2492 mutex_unlock(&mgr->payload_lock);
2493
2494 mutex_lock(&mgr->lock);
2495 if (mgr->mst_primary) {
2496 u8 buf[64];
2497 bool bret;
2498 int ret;
2499 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
2500 seq_printf(m, "dpcd: ");
2501 for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
2502 seq_printf(m, "%02x ", buf[i]);
2503 seq_printf(m, "\n");
2504 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
2505 seq_printf(m, "faux/mst: ");
2506 for (i = 0; i < 2; i++)
2507 seq_printf(m, "%02x ", buf[i]);
2508 seq_printf(m, "\n");
2509 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
2510 seq_printf(m, "mst ctrl: ");
2511 for (i = 0; i < 1; i++)
2512 seq_printf(m, "%02x ", buf[i]);
2513 seq_printf(m, "\n");
2514
2515 bret = dump_dp_payload_table(mgr, buf);
2516 if (bret == true) {
2517 seq_printf(m, "payload table: ");
2518 for (i = 0; i < 63; i++)
2519 seq_printf(m, "%02x ", buf[i]);
2520 seq_printf(m, "\n");
2521 }
2522
2523 }
2524
2525 mutex_unlock(&mgr->lock);
2526
2527}
2528EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2529
2530static void drm_dp_tx_work(struct work_struct *work)
2531{
2532 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2533
2534 mutex_lock(&mgr->qlock);
2535 if (mgr->tx_down_in_progress)
2536 process_single_down_tx_qlock(mgr);
2537 mutex_unlock(&mgr->qlock);
2538}
2539
2540/**
2541 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2542 * @mgr: manager struct to initialise
2543 * @dev: device providing this structure - for i2c addition.
2544 * @aux: DP helper aux channel to talk to this device
2545 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2546 * @max_payloads: maximum number of payloads this GPU can source
2547 * @conn_base_id: the connector object ID the MST device is connected to.
2548 *
2549 * Return 0 for success, or negative error code on failure
2550 */
2551int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2552 struct device *dev, struct drm_dp_aux *aux,
2553 int max_dpcd_transaction_bytes,
2554 int max_payloads, int conn_base_id)
2555{
2556 mutex_init(&mgr->lock);
2557 mutex_init(&mgr->qlock);
2558 mutex_init(&mgr->payload_lock);
2559 INIT_LIST_HEAD(&mgr->tx_msg_upq);
2560 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2561 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2562 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2563 init_waitqueue_head(&mgr->tx_waitq);
2564 mgr->dev = dev;
2565 mgr->aux = aux;
2566 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2567 mgr->max_payloads = max_payloads;
2568 mgr->conn_base_id = conn_base_id;
2569 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2570 if (!mgr->payloads)
2571 return -ENOMEM;
2572 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2573 if (!mgr->proposed_vcpis)
2574 return -ENOMEM;
2575 set_bit(0, &mgr->payload_mask);
2576 test_calc_pbn_mode();
2577 return 0;
2578}
2579EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2580
2581/**
2582 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2583 * @mgr: manager to destroy
2584 */
2585void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2586{
2587 mutex_lock(&mgr->payload_lock);
2588 kfree(mgr->payloads);
2589 mgr->payloads = NULL;
2590 kfree(mgr->proposed_vcpis);
2591 mgr->proposed_vcpis = NULL;
2592 mutex_unlock(&mgr->payload_lock);
2593 mgr->dev = NULL;
2594 mgr->aux = NULL;
2595}
2596EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2597
2598/* I2C device */
2599static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2600 int num)
2601{
2602 struct drm_dp_aux *aux = adapter->algo_data;
2603 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2604 struct drm_dp_mst_branch *mstb;
2605 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2606 unsigned int i;
2607 bool reading = false;
2608 struct drm_dp_sideband_msg_req_body msg;
2609 struct drm_dp_sideband_msg_tx *txmsg = NULL;
2610 int ret;
2611
2612 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2613 if (!mstb)
2614 return -EREMOTEIO;
2615
2616 /* construct i2c msg */
2617 /* see if last msg is a read */
2618 if (msgs[num - 1].flags & I2C_M_RD)
2619 reading = true;
2620
2621 if (!reading) {
2622 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2623 ret = -EIO;
2624 goto out;
2625 }
2626
2627 msg.req_type = DP_REMOTE_I2C_READ;
2628 msg.u.i2c_read.num_transactions = num - 1;
2629 msg.u.i2c_read.port_number = port->port_num;
2630 for (i = 0; i < num - 1; i++) {
2631 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2632 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2633 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2634 }
2635 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2636 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2637
2638 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2639 if (!txmsg) {
2640 ret = -ENOMEM;
2641 goto out;
2642 }
2643
2644 txmsg->dst = mstb;
2645 drm_dp_encode_sideband_req(&msg, txmsg);
2646
2647 drm_dp_queue_down_tx(mgr, txmsg);
2648
2649 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2650 if (ret > 0) {
2651
2652 if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2653 ret = -EREMOTEIO;
2654 goto out;
2655 }
2656 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2657 ret = -EIO;
2658 goto out;
2659 }
2660 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2661 ret = num;
2662 }
2663out:
2664 kfree(txmsg);
2665 drm_dp_put_mst_branch_device(mstb);
2666 return ret;
2667}
2668
2669static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2670{
2671 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2672 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2673 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2674 I2C_FUNC_10BIT_ADDR;
2675}
2676
2677static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2678 .functionality = drm_dp_mst_i2c_functionality,
2679 .master_xfer = drm_dp_mst_i2c_xfer,
2680};
2681
2682/**
2683 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2684 * @aux: DisplayPort AUX channel
2685 *
2686 * Returns 0 on success or a negative error code on failure.
2687 */
2688static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2689{
2690 aux->ddc.algo = &drm_dp_mst_i2c_algo;
2691 aux->ddc.algo_data = aux;
2692 aux->ddc.retries = 3;
2693
2694 aux->ddc.class = I2C_CLASS_DDC;
2695 aux->ddc.owner = THIS_MODULE;
2696 aux->ddc.dev.parent = aux->dev;
2697 aux->ddc.dev.of_node = aux->dev->of_node;
2698
2699 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
2700 sizeof(aux->ddc.name));
2701
2702 return i2c_add_adapter(&aux->ddc);
2703}
2704
2705/**
2706 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2707 * @aux: DisplayPort AUX channel
2708 */
2709static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2710{
2711 i2c_del_adapter(&aux->ddc);
2712}
This page took 0.123518 seconds and 5 git commands to generate.