2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_dp_mst_helper.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_print.h>
36 #include <drm/drm_probe_helper.h>
38 #include "drm_crtc_helper_internal.h"
39 #include "drm_dp_mst_topology_internal.h"
44 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
45 * protocol. The helpers contain a topology manager and bandwidth manager.
46 * The helpers encapsulate the sending and received of sideband msgs.
48 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
51 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
53 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
55 struct drm_dp_payload *payload);
57 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
58 struct drm_dp_mst_port *port,
59 int offset, int size, u8 *bytes);
60 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
61 struct drm_dp_mst_port *port,
62 int offset, int size, u8 *bytes);
64 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
65 struct drm_dp_mst_branch *mstb);
66 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
67 struct drm_dp_mst_branch *mstb,
68 struct drm_dp_mst_port *port);
69 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
72 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
73 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
74 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
76 #define DBG_PREFIX "[dp_mst]"
78 #define DP_STR(x) [DP_ ## x] = #x
80 static const char *drm_dp_mst_req_type_str(u8 req_type)
82 static const char * const req_type_str[] = {
83 DP_STR(GET_MSG_TRANSACTION_VERSION),
85 DP_STR(CONNECTION_STATUS_NOTIFY),
86 DP_STR(ENUM_PATH_RESOURCES),
87 DP_STR(ALLOCATE_PAYLOAD),
88 DP_STR(QUERY_PAYLOAD),
89 DP_STR(RESOURCE_STATUS_NOTIFY),
90 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
91 DP_STR(REMOTE_DPCD_READ),
92 DP_STR(REMOTE_DPCD_WRITE),
93 DP_STR(REMOTE_I2C_READ),
94 DP_STR(REMOTE_I2C_WRITE),
96 DP_STR(POWER_DOWN_PHY),
97 DP_STR(SINK_EVENT_NOTIFY),
98 DP_STR(QUERY_STREAM_ENC_STATUS),
101 if (req_type >= ARRAY_SIZE(req_type_str) ||
102 !req_type_str[req_type])
105 return req_type_str[req_type];
109 #define DP_STR(x) [DP_NAK_ ## x] = #x
111 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
113 static const char * const nak_reason_str[] = {
114 DP_STR(WRITE_FAILURE),
115 DP_STR(INVALID_READ),
119 DP_STR(LINK_FAILURE),
120 DP_STR(NO_RESOURCES),
123 DP_STR(ALLOCATE_FAIL),
126 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
127 !nak_reason_str[nak_reason])
130 return nak_reason_str[nak_reason];
134 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
136 static const char *drm_dp_mst_sideband_tx_state_str(int state)
138 static const char * const sideband_reason_str[] = {
146 if (state >= ARRAY_SIZE(sideband_reason_str) ||
147 !sideband_reason_str[state])
150 return sideband_reason_str[state];
154 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
159 for (i = 0; i < lct; i++) {
161 unpacked_rad[i] = rad[i / 2] >> 4;
163 unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
166 /* TODO: Eventually add something to printk so we can format the rad
169 return snprintf(out, len, "%*phC", lct, unpacked_rad);
172 /* sideband msg handling */
173 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
178 int number_of_bits = num_nibbles * 4;
181 while (number_of_bits != 0) {
184 remainder |= (data[array_index] & bitmask) >> bitshift;
192 if ((remainder & 0x10) == 0x10)
197 while (number_of_bits != 0) {
200 if ((remainder & 0x10) != 0)
207 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
212 int number_of_bits = number_of_bytes * 8;
215 while (number_of_bits != 0) {
218 remainder |= (data[array_index] & bitmask) >> bitshift;
226 if ((remainder & 0x100) == 0x100)
231 while (number_of_bits != 0) {
234 if ((remainder & 0x100) != 0)
238 return remainder & 0xff;
240 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
243 size += (hdr->lct / 2);
247 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
253 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
254 for (i = 0; i < (hdr->lct / 2); i++)
255 buf[idx++] = hdr->rad[i];
256 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
257 (hdr->msg_len & 0x3f);
258 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
260 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
261 buf[idx - 1] |= (crc4 & 0xf);
266 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
267 u8 *buf, int buflen, u8 *hdrlen)
276 len += ((buf[0] & 0xf0) >> 4) / 2;
279 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
281 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
282 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
286 hdr->lct = (buf[0] & 0xf0) >> 4;
287 hdr->lcr = (buf[0] & 0xf);
289 for (i = 0; i < (hdr->lct / 2); i++)
290 hdr->rad[i] = buf[idx++];
291 hdr->broadcast = (buf[idx] >> 7) & 0x1;
292 hdr->path_msg = (buf[idx] >> 6) & 0x1;
293 hdr->msg_len = buf[idx] & 0x3f;
295 hdr->somt = (buf[idx] >> 7) & 0x1;
296 hdr->eomt = (buf[idx] >> 6) & 0x1;
297 hdr->seqno = (buf[idx] >> 4) & 0x1;
304 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
305 struct drm_dp_sideband_msg_tx *raw)
310 buf[idx++] = req->req_type & 0x7f;
312 switch (req->req_type) {
313 case DP_ENUM_PATH_RESOURCES:
314 case DP_POWER_DOWN_PHY:
315 case DP_POWER_UP_PHY:
316 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
319 case DP_ALLOCATE_PAYLOAD:
320 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
321 (req->u.allocate_payload.number_sdp_streams & 0xf);
323 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
325 buf[idx] = (req->u.allocate_payload.pbn >> 8);
327 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
329 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
330 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
331 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
334 if (req->u.allocate_payload.number_sdp_streams & 1) {
335 i = req->u.allocate_payload.number_sdp_streams - 1;
336 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
340 case DP_QUERY_PAYLOAD:
341 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
343 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
346 case DP_REMOTE_DPCD_READ:
347 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
348 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
350 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
352 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
354 buf[idx] = (req->u.dpcd_read.num_bytes);
358 case DP_REMOTE_DPCD_WRITE:
359 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
360 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
362 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
364 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
366 buf[idx] = (req->u.dpcd_write.num_bytes);
368 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
369 idx += req->u.dpcd_write.num_bytes;
371 case DP_REMOTE_I2C_READ:
372 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
373 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
375 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
376 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
378 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
380 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
381 idx += req->u.i2c_read.transactions[i].num_bytes;
383 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
384 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
387 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
389 buf[idx] = (req->u.i2c_read.num_bytes_read);
393 case DP_REMOTE_I2C_WRITE:
394 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
396 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
398 buf[idx] = (req->u.i2c_write.num_bytes);
400 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
401 idx += req->u.i2c_write.num_bytes;
406 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
408 /* Decode a sideband request we've encoded, mainly used for debugging */
410 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
411 struct drm_dp_sideband_msg_req_body *req)
413 const u8 *buf = raw->msg;
416 req->req_type = buf[idx++] & 0x7f;
417 switch (req->req_type) {
418 case DP_ENUM_PATH_RESOURCES:
419 case DP_POWER_DOWN_PHY:
420 case DP_POWER_UP_PHY:
421 req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
423 case DP_ALLOCATE_PAYLOAD:
425 struct drm_dp_allocate_payload *a =
426 &req->u.allocate_payload;
428 a->number_sdp_streams = buf[idx] & 0xf;
429 a->port_number = (buf[idx] >> 4) & 0xf;
431 WARN_ON(buf[++idx] & 0x80);
432 a->vcpi = buf[idx] & 0x7f;
434 a->pbn = buf[++idx] << 8;
435 a->pbn |= buf[++idx];
438 for (i = 0; i < a->number_sdp_streams; i++) {
439 a->sdp_stream_sink[i] =
440 (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
444 case DP_QUERY_PAYLOAD:
445 req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
446 WARN_ON(buf[++idx] & 0x80);
447 req->u.query_payload.vcpi = buf[idx] & 0x7f;
449 case DP_REMOTE_DPCD_READ:
451 struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
453 r->port_number = (buf[idx] >> 4) & 0xf;
455 r->dpcd_address = (buf[idx] << 16) & 0xf0000;
456 r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
457 r->dpcd_address |= buf[++idx] & 0xff;
459 r->num_bytes = buf[++idx];
462 case DP_REMOTE_DPCD_WRITE:
464 struct drm_dp_remote_dpcd_write *w =
467 w->port_number = (buf[idx] >> 4) & 0xf;
469 w->dpcd_address = (buf[idx] << 16) & 0xf0000;
470 w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
471 w->dpcd_address |= buf[++idx] & 0xff;
473 w->num_bytes = buf[++idx];
475 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
481 case DP_REMOTE_I2C_READ:
483 struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
484 struct drm_dp_remote_i2c_read_tx *tx;
487 r->num_transactions = buf[idx] & 0x3;
488 r->port_number = (buf[idx] >> 4) & 0xf;
489 for (i = 0; i < r->num_transactions; i++) {
490 tx = &r->transactions[i];
492 tx->i2c_dev_id = buf[++idx] & 0x7f;
493 tx->num_bytes = buf[++idx];
494 tx->bytes = kmemdup(&buf[++idx],
501 idx += tx->num_bytes;
502 tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
503 tx->i2c_transaction_delay = buf[idx] & 0xf;
507 for (i = 0; i < r->num_transactions; i++)
512 r->read_i2c_device_id = buf[++idx] & 0x7f;
513 r->num_bytes_read = buf[++idx];
516 case DP_REMOTE_I2C_WRITE:
518 struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
520 w->port_number = (buf[idx] >> 4) & 0xf;
521 w->write_i2c_device_id = buf[++idx] & 0x7f;
522 w->num_bytes = buf[++idx];
523 w->bytes = kmemdup(&buf[++idx], w->num_bytes,
533 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
536 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
537 int indent, struct drm_printer *printer)
541 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
542 if (req->req_type == DP_LINK_ADDRESS) {
543 /* No contents to print */
544 P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
548 P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
551 switch (req->req_type) {
552 case DP_ENUM_PATH_RESOURCES:
553 case DP_POWER_DOWN_PHY:
554 case DP_POWER_UP_PHY:
555 P("port=%d\n", req->u.port_num.port_number);
557 case DP_ALLOCATE_PAYLOAD:
558 P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
559 req->u.allocate_payload.port_number,
560 req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
561 req->u.allocate_payload.number_sdp_streams,
562 req->u.allocate_payload.number_sdp_streams,
563 req->u.allocate_payload.sdp_stream_sink);
565 case DP_QUERY_PAYLOAD:
566 P("port=%d vcpi=%d\n",
567 req->u.query_payload.port_number,
568 req->u.query_payload.vcpi);
570 case DP_REMOTE_DPCD_READ:
571 P("port=%d dpcd_addr=%05x len=%d\n",
572 req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
573 req->u.dpcd_read.num_bytes);
575 case DP_REMOTE_DPCD_WRITE:
576 P("port=%d addr=%05x len=%d: %*ph\n",
577 req->u.dpcd_write.port_number,
578 req->u.dpcd_write.dpcd_address,
579 req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
580 req->u.dpcd_write.bytes);
582 case DP_REMOTE_I2C_READ:
583 P("port=%d num_tx=%d id=%d size=%d:\n",
584 req->u.i2c_read.port_number,
585 req->u.i2c_read.num_transactions,
586 req->u.i2c_read.read_i2c_device_id,
587 req->u.i2c_read.num_bytes_read);
590 for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
591 const struct drm_dp_remote_i2c_read_tx *rtx =
592 &req->u.i2c_read.transactions[i];
594 P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
595 i, rtx->i2c_dev_id, rtx->num_bytes,
596 rtx->no_stop_bit, rtx->i2c_transaction_delay,
597 rtx->num_bytes, rtx->bytes);
600 case DP_REMOTE_I2C_WRITE:
601 P("port=%d id=%d size=%d: %*ph\n",
602 req->u.i2c_write.port_number,
603 req->u.i2c_write.write_i2c_device_id,
604 req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
605 req->u.i2c_write.bytes);
613 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
616 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
617 const struct drm_dp_sideband_msg_tx *txmsg)
619 struct drm_dp_sideband_msg_req_body req;
624 drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
626 drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
627 txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
628 drm_dp_mst_sideband_tx_state_str(txmsg->state),
629 txmsg->path_msg, buf);
631 ret = drm_dp_decode_sideband_req(txmsg, &req);
633 drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
636 drm_dp_dump_sideband_msg_req_body(&req, 1, p);
638 switch (req.req_type) {
639 case DP_REMOTE_DPCD_WRITE:
640 kfree(req.u.dpcd_write.bytes);
642 case DP_REMOTE_I2C_READ:
643 for (i = 0; i < req.u.i2c_read.num_transactions; i++)
644 kfree(req.u.i2c_read.transactions[i].bytes);
646 case DP_REMOTE_I2C_WRITE:
647 kfree(req.u.i2c_write.bytes);
652 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
655 crc4 = drm_dp_msg_data_crc4(msg, len);
659 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
660 struct drm_dp_sideband_msg_tx *raw)
665 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
670 /* this adds a chunk of msg to the builder to get the final msg */
671 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
672 u8 *replybuf, u8 replybuflen, bool hdr)
679 struct drm_dp_sideband_msg_hdr recv_hdr;
680 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
682 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
687 * ignore out-of-order messages or messages that are part of a
690 if (!recv_hdr.somt && !msg->have_somt)
693 /* get length contained in this portion */
694 msg->curchunk_len = recv_hdr.msg_len;
695 msg->curchunk_hdrlen = hdrlen;
697 /* we have already gotten an somt - don't bother parsing */
698 if (recv_hdr.somt && msg->have_somt)
702 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
703 msg->have_somt = true;
706 msg->have_eomt = true;
708 /* copy the bytes for the remainder of this header chunk */
709 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
710 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
712 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
713 msg->curchunk_idx += replybuflen;
716 if (msg->curchunk_idx >= msg->curchunk_len) {
718 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
719 /* copy chunk into bigger msg */
720 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
721 msg->curlen += msg->curchunk_len - 1;
726 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
727 struct drm_dp_sideband_msg_reply_body *repmsg)
731 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
733 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
735 if (idx > raw->curlen)
737 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
738 if (raw->msg[idx] & 0x80)
739 repmsg->u.link_addr.ports[i].input_port = 1;
741 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
742 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
745 if (idx > raw->curlen)
747 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
748 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
749 if (repmsg->u.link_addr.ports[i].input_port == 0)
750 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
752 if (idx > raw->curlen)
754 if (repmsg->u.link_addr.ports[i].input_port == 0) {
755 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
757 if (idx > raw->curlen)
759 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
761 if (idx > raw->curlen)
763 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
764 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
768 if (idx > raw->curlen)
774 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
778 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
779 struct drm_dp_sideband_msg_reply_body *repmsg)
782 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
784 if (idx > raw->curlen)
786 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
788 if (idx > raw->curlen)
791 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
794 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
798 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
799 struct drm_dp_sideband_msg_reply_body *repmsg)
802 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
804 if (idx > raw->curlen)
808 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
812 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
813 struct drm_dp_sideband_msg_reply_body *repmsg)
817 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
819 if (idx > raw->curlen)
821 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
824 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
827 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
831 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
832 struct drm_dp_sideband_msg_reply_body *repmsg)
835 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
837 if (idx > raw->curlen)
839 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
841 if (idx > raw->curlen)
843 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
845 if (idx > raw->curlen)
849 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
853 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
854 struct drm_dp_sideband_msg_reply_body *repmsg)
857 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
859 if (idx > raw->curlen)
861 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
863 if (idx > raw->curlen)
865 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
867 if (idx > raw->curlen)
871 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
875 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
876 struct drm_dp_sideband_msg_reply_body *repmsg)
879 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
881 if (idx > raw->curlen)
883 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
885 if (idx > raw->curlen)
889 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
893 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
894 struct drm_dp_sideband_msg_reply_body *repmsg)
898 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
900 if (idx > raw->curlen) {
901 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
908 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
909 struct drm_dp_sideband_msg_reply_body *msg)
911 memset(msg, 0, sizeof(*msg));
912 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
913 msg->req_type = (raw->msg[0] & 0x7f);
915 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
916 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
917 msg->u.nak.reason = raw->msg[17];
918 msg->u.nak.nak_data = raw->msg[18];
922 switch (msg->req_type) {
923 case DP_LINK_ADDRESS:
924 return drm_dp_sideband_parse_link_address(raw, msg);
925 case DP_QUERY_PAYLOAD:
926 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
927 case DP_REMOTE_DPCD_READ:
928 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
929 case DP_REMOTE_DPCD_WRITE:
930 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
931 case DP_REMOTE_I2C_READ:
932 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
933 case DP_ENUM_PATH_RESOURCES:
934 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
935 case DP_ALLOCATE_PAYLOAD:
936 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
937 case DP_POWER_DOWN_PHY:
938 case DP_POWER_UP_PHY:
939 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
941 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
942 drm_dp_mst_req_type_str(msg->req_type));
947 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
948 struct drm_dp_sideband_msg_req_body *msg)
952 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
954 if (idx > raw->curlen)
957 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
959 if (idx > raw->curlen)
962 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
963 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
964 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
965 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
966 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
970 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
974 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
975 struct drm_dp_sideband_msg_req_body *msg)
979 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
981 if (idx > raw->curlen)
984 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
986 if (idx > raw->curlen)
989 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
993 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
997 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
998 struct drm_dp_sideband_msg_req_body *msg)
1000 memset(msg, 0, sizeof(*msg));
1001 msg->req_type = (raw->msg[0] & 0x7f);
1003 switch (msg->req_type) {
1004 case DP_CONNECTION_STATUS_NOTIFY:
1005 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
1006 case DP_RESOURCE_STATUS_NOTIFY:
1007 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
1009 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
1010 drm_dp_mst_req_type_str(msg->req_type));
1015 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
1017 struct drm_dp_sideband_msg_req_body req;
1019 req.req_type = DP_REMOTE_DPCD_WRITE;
1020 req.u.dpcd_write.port_number = port_num;
1021 req.u.dpcd_write.dpcd_address = offset;
1022 req.u.dpcd_write.num_bytes = num_bytes;
1023 req.u.dpcd_write.bytes = bytes;
1024 drm_dp_encode_sideband_req(&req, msg);
1029 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
1031 struct drm_dp_sideband_msg_req_body req;
1033 req.req_type = DP_LINK_ADDRESS;
1034 drm_dp_encode_sideband_req(&req, msg);
1038 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
1040 struct drm_dp_sideband_msg_req_body req;
1042 req.req_type = DP_ENUM_PATH_RESOURCES;
1043 req.u.port_num.port_number = port_num;
1044 drm_dp_encode_sideband_req(&req, msg);
1045 msg->path_msg = true;
1049 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
1050 u8 vcpi, uint16_t pbn,
1051 u8 number_sdp_streams,
1052 u8 *sdp_stream_sink)
1054 struct drm_dp_sideband_msg_req_body req;
1055 memset(&req, 0, sizeof(req));
1056 req.req_type = DP_ALLOCATE_PAYLOAD;
1057 req.u.allocate_payload.port_number = port_num;
1058 req.u.allocate_payload.vcpi = vcpi;
1059 req.u.allocate_payload.pbn = pbn;
1060 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
1061 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
1062 number_sdp_streams);
1063 drm_dp_encode_sideband_req(&req, msg);
1064 msg->path_msg = true;
1068 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
1069 int port_num, bool power_up)
1071 struct drm_dp_sideband_msg_req_body req;
1074 req.req_type = DP_POWER_UP_PHY;
1076 req.req_type = DP_POWER_DOWN_PHY;
1078 req.u.port_num.port_number = port_num;
1079 drm_dp_encode_sideband_req(&req, msg);
1080 msg->path_msg = true;
1084 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1085 struct drm_dp_vcpi *vcpi)
1089 mutex_lock(&mgr->payload_lock);
1090 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
1091 if (ret > mgr->max_payloads) {
1093 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
1097 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
1098 if (vcpi_ret > mgr->max_payloads) {
1100 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
1104 set_bit(ret, &mgr->payload_mask);
1105 set_bit(vcpi_ret, &mgr->vcpi_mask);
1106 vcpi->vcpi = vcpi_ret + 1;
1107 mgr->proposed_vcpis[ret - 1] = vcpi;
1109 mutex_unlock(&mgr->payload_lock);
1113 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
1120 mutex_lock(&mgr->payload_lock);
1121 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
1122 clear_bit(vcpi - 1, &mgr->vcpi_mask);
1124 for (i = 0; i < mgr->max_payloads; i++) {
1125 if (mgr->proposed_vcpis[i] &&
1126 mgr->proposed_vcpis[i]->vcpi == vcpi) {
1127 mgr->proposed_vcpis[i] = NULL;
1128 clear_bit(i + 1, &mgr->payload_mask);
1131 mutex_unlock(&mgr->payload_lock);
1134 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
1135 struct drm_dp_sideband_msg_tx *txmsg)
1140 * All updates to txmsg->state are protected by mgr->qlock, and the two
1141 * cases we check here are terminal states. For those the barriers
1142 * provided by the wake_up/wait_event pair are enough.
1144 state = READ_ONCE(txmsg->state);
1145 return (state == DRM_DP_SIDEBAND_TX_RX ||
1146 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
1149 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
1150 struct drm_dp_sideband_msg_tx *txmsg)
1152 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1155 ret = wait_event_timeout(mgr->tx_waitq,
1156 check_txmsg_state(mgr, txmsg),
1158 mutex_lock(&mstb->mgr->qlock);
1160 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
1165 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
1167 /* dump some state */
1171 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
1172 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
1173 list_del(&txmsg->next);
1176 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
1177 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
1178 mstb->tx_slots[txmsg->seqno] = NULL;
1182 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
1183 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
1185 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
1187 mutex_unlock(&mgr->qlock);
1192 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
1194 struct drm_dp_mst_branch *mstb;
1196 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
1202 memcpy(mstb->rad, rad, lct / 2);
1203 INIT_LIST_HEAD(&mstb->ports);
1204 kref_init(&mstb->topology_kref);
1205 kref_init(&mstb->malloc_kref);
1209 static void drm_dp_free_mst_branch_device(struct kref *kref)
1211 struct drm_dp_mst_branch *mstb =
1212 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
1214 if (mstb->port_parent)
1215 drm_dp_mst_put_port_malloc(mstb->port_parent);
1221 * DOC: Branch device and port refcounting
1223 * Topology refcount overview
1224 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
1226 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
1227 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
1228 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
1230 * Topology refcounts are not exposed to drivers, and are handled internally
1231 * by the DP MST helpers. The helpers use them in order to prevent the
1232 * in-memory topology state from being changed in the middle of critical
1233 * operations like changing the internal state of payload allocations. This
1234 * means each branch and port will be considered to be connected to the rest
1235 * of the topology until its topology refcount reaches zero. Additionally,
1236 * for ports this means that their associated &struct drm_connector will stay
1237 * registered with userspace until the port's refcount reaches 0.
1239 * Malloc refcount overview
1240 * ~~~~~~~~~~~~~~~~~~~~~~~~
1242 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
1243 * drm_dp_mst_branch allocated even after all of its topology references have
1244 * been dropped, so that the driver or MST helpers can safely access each
1245 * branch's last known state before it was disconnected from the topology.
1246 * When the malloc refcount of a port or branch reaches 0, the memory
1247 * allocation containing the &struct drm_dp_mst_branch or &struct
1248 * drm_dp_mst_port respectively will be freed.
1250 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
1251 * to drivers. As of writing this documentation, there are no drivers that
1252 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
1253 * helpers. Exposing this API to drivers in a race-free manner would take more
1254 * tweaking of the refcounting scheme, however patches are welcome provided
1255 * there is a legitimate driver usecase for this.
1257 * Refcount relationships in a topology
1258 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
1260 * Let's take a look at why the relationship between topology and malloc
1261 * refcounts is designed the way it is.
1263 * .. kernel-figure:: dp-mst/topology-figure-1.dot
1265 * An example of topology and malloc refs in a DP MST topology with two
1266 * active payloads. Topology refcount increments are indicated by solid
1267 * lines, and malloc refcount increments are indicated by dashed lines.
1268 * Each starts from the branch which incremented the refcount, and ends at
1269 * the branch to which the refcount belongs to, i.e. the arrow points the
1270 * same way as the C pointers used to reference a structure.
1272 * As you can see in the above figure, every branch increments the topology
1273 * refcount of its children, and increments the malloc refcount of its
1274 * parent. Additionally, every payload increments the malloc refcount of its
1275 * assigned port by 1.
1277 * So, what would happen if MSTB #3 from the above figure was unplugged from
1278 * the system, but the driver hadn't yet removed payload #2 from port #3? The
1279 * topology would start to look like the figure below.
1281 * .. kernel-figure:: dp-mst/topology-figure-2.dot
1283 * Ports and branch devices which have been released from memory are
1284 * colored grey, and references which have been removed are colored red.
1286 * Whenever a port or branch device's topology refcount reaches zero, it will
1287 * decrement the topology refcounts of all its children, the malloc refcount
1288 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
1289 * #4, this means they both have been disconnected from the topology and freed
1290 * from memory. But, because payload #2 is still holding a reference to port
1291 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1292 * is still accessible from memory. This also means port #3 has not yet
1293 * decremented the malloc refcount of MSTB #3, so its &struct
1294 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1295 * malloc refcount reaches 0.
1297 * This relationship is necessary because in order to release payload #2, we
1298 * need to be able to figure out the last relative of port #3 that's still
1299 * connected to the topology. In this case, we would travel up the topology as
1302 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1304 * And finally, remove payload #2 by communicating with port #2 through
1305 * sideband transactions.
1309 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1311 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1313 * Increments &drm_dp_mst_branch.malloc_kref. When
1314 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1315 * will be released and @mstb may no longer be used.
1317 * See also: drm_dp_mst_put_mstb_malloc()
1320 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1322 kref_get(&mstb->malloc_kref);
1323 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1327 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1329 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1331 * Decrements &drm_dp_mst_branch.malloc_kref. When
1332 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1333 * will be released and @mstb may no longer be used.
1335 * See also: drm_dp_mst_get_mstb_malloc()
1338 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1340 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1341 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1344 static void drm_dp_free_mst_port(struct kref *kref)
1346 struct drm_dp_mst_port *port =
1347 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1349 drm_dp_mst_put_mstb_malloc(port->parent);
1354 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1355 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1357 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1358 * reaches 0, the memory allocation for @port will be released and @port may
1359 * no longer be used.
1361 * Because @port could potentially be freed at any time by the DP MST helpers
1362 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1363 * function, drivers that which to make use of &struct drm_dp_mst_port should
1364 * ensure that they grab at least one main malloc reference to their MST ports
1365 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1366 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1368 * See also: drm_dp_mst_put_port_malloc()
1371 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1373 kref_get(&port->malloc_kref);
1374 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1376 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1379 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1380 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1382 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1383 * reaches 0, the memory allocation for @port will be released and @port may
1384 * no longer be used.
1386 * See also: drm_dp_mst_get_port_malloc()
1389 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1391 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1392 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1394 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1396 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1398 struct drm_dp_mst_branch *mstb =
1399 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1400 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1401 struct drm_dp_mst_port *port, *tmp;
1402 bool wake_tx = false;
1404 mutex_lock(&mgr->lock);
1405 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1406 list_del(&port->next);
1407 drm_dp_mst_topology_put_port(port);
1409 mutex_unlock(&mgr->lock);
1411 /* drop any tx slots msg */
1412 mutex_lock(&mstb->mgr->qlock);
1413 if (mstb->tx_slots[0]) {
1414 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1415 mstb->tx_slots[0] = NULL;
1418 if (mstb->tx_slots[1]) {
1419 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1420 mstb->tx_slots[1] = NULL;
1423 mutex_unlock(&mstb->mgr->qlock);
1426 wake_up_all(&mstb->mgr->tx_waitq);
1428 drm_dp_mst_put_mstb_malloc(mstb);
1432 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1433 * branch device unless it's zero
1434 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1436 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1437 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1438 * reached 0). Holding a topology reference implies that a malloc reference
1439 * will be held to @mstb as long as the user holds the topology reference.
1441 * Care should be taken to ensure that the user has at least one malloc
1442 * reference to @mstb. If you already have a topology reference to @mstb, you
1443 * should use drm_dp_mst_topology_get_mstb() instead.
1446 * drm_dp_mst_topology_get_mstb()
1447 * drm_dp_mst_topology_put_mstb()
1450 * * 1: A topology reference was grabbed successfully
1451 * * 0: @port is no longer in the topology, no reference was grabbed
1453 static int __must_check
1454 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1456 int ret = kref_get_unless_zero(&mstb->topology_kref);
1459 DRM_DEBUG("mstb %p (%d)\n", mstb,
1460 kref_read(&mstb->topology_kref));
1466 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1468 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1470 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1471 * not it's already reached 0. This is only valid to use in scenarios where
1472 * you are already guaranteed to have at least one active topology reference
1473 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1476 * drm_dp_mst_topology_try_get_mstb()
1477 * drm_dp_mst_topology_put_mstb()
1479 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1481 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1482 kref_get(&mstb->topology_kref);
1483 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1487 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1489 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1491 * Releases a topology reference from @mstb by decrementing
1492 * &drm_dp_mst_branch.topology_kref.
1495 * drm_dp_mst_topology_try_get_mstb()
1496 * drm_dp_mst_topology_get_mstb()
1499 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1501 DRM_DEBUG("mstb %p (%d)\n",
1502 mstb, kref_read(&mstb->topology_kref) - 1);
1503 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1506 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1508 struct drm_dp_mst_branch *mstb;
1511 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1512 case DP_PEER_DEVICE_SST_SINK:
1513 /* remove i2c over sideband */
1514 drm_dp_mst_unregister_i2c_bus(&port->aux);
1516 case DP_PEER_DEVICE_MST_BRANCHING:
1519 drm_dp_mst_topology_put_mstb(mstb);
1524 static void drm_dp_destroy_port(struct kref *kref)
1526 struct drm_dp_mst_port *port =
1527 container_of(kref, struct drm_dp_mst_port, topology_kref);
1528 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1531 kfree(port->cached_edid);
1534 * The only time we don't have a connector
1535 * on an output port is if the connector init
1538 if (port->connector) {
1539 /* we can't destroy the connector here, as
1540 * we might be holding the mode_config.mutex
1541 * from an EDID retrieval */
1543 mutex_lock(&mgr->destroy_connector_lock);
1544 list_add(&port->next, &mgr->destroy_connector_list);
1545 mutex_unlock(&mgr->destroy_connector_lock);
1546 schedule_work(&mgr->destroy_connector_work);
1549 /* no need to clean up vcpi
1550 * as if we have no connector we never setup a vcpi */
1551 drm_dp_port_teardown_pdt(port, port->pdt);
1552 port->pdt = DP_PEER_DEVICE_NONE;
1554 drm_dp_mst_put_port_malloc(port);
1558 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1559 * port unless it's zero
1560 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1562 * Attempts to grab a topology reference to @port, if it hasn't yet been
1563 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1564 * 0). Holding a topology reference implies that a malloc reference will be
1565 * held to @port as long as the user holds the topology reference.
1567 * Care should be taken to ensure that the user has at least one malloc
1568 * reference to @port. If you already have a topology reference to @port, you
1569 * should use drm_dp_mst_topology_get_port() instead.
1572 * drm_dp_mst_topology_get_port()
1573 * drm_dp_mst_topology_put_port()
1576 * * 1: A topology reference was grabbed successfully
1577 * * 0: @port is no longer in the topology, no reference was grabbed
1579 static int __must_check
1580 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1582 int ret = kref_get_unless_zero(&port->topology_kref);
1585 DRM_DEBUG("port %p (%d)\n", port,
1586 kref_read(&port->topology_kref));
1592 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1593 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1595 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1596 * not it's already reached 0. This is only valid to use in scenarios where
1597 * you are already guaranteed to have at least one active topology reference
1598 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1601 * drm_dp_mst_topology_try_get_port()
1602 * drm_dp_mst_topology_put_port()
1604 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1606 WARN_ON(kref_read(&port->topology_kref) == 0);
1607 kref_get(&port->topology_kref);
1608 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1612 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1613 * @port: The &struct drm_dp_mst_port to release the topology reference from
1615 * Releases a topology reference from @port by decrementing
1616 * &drm_dp_mst_port.topology_kref.
1619 * drm_dp_mst_topology_try_get_port()
1620 * drm_dp_mst_topology_get_port()
1622 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1624 DRM_DEBUG("port %p (%d)\n",
1625 port, kref_read(&port->topology_kref) - 1);
1626 kref_put(&port->topology_kref, drm_dp_destroy_port);
1629 static struct drm_dp_mst_branch *
1630 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1631 struct drm_dp_mst_branch *to_find)
1633 struct drm_dp_mst_port *port;
1634 struct drm_dp_mst_branch *rmstb;
1636 if (to_find == mstb)
1639 list_for_each_entry(port, &mstb->ports, next) {
1641 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1642 port->mstb, to_find);
1650 static struct drm_dp_mst_branch *
1651 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1652 struct drm_dp_mst_branch *mstb)
1654 struct drm_dp_mst_branch *rmstb = NULL;
1656 mutex_lock(&mgr->lock);
1657 if (mgr->mst_primary) {
1658 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1659 mgr->mst_primary, mstb);
1661 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1664 mutex_unlock(&mgr->lock);
1668 static struct drm_dp_mst_port *
1669 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1670 struct drm_dp_mst_port *to_find)
1672 struct drm_dp_mst_port *port, *mport;
1674 list_for_each_entry(port, &mstb->ports, next) {
1675 if (port == to_find)
1679 mport = drm_dp_mst_topology_get_port_validated_locked(
1680 port->mstb, to_find);
1688 static struct drm_dp_mst_port *
1689 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1690 struct drm_dp_mst_port *port)
1692 struct drm_dp_mst_port *rport = NULL;
1694 mutex_lock(&mgr->lock);
1695 if (mgr->mst_primary) {
1696 rport = drm_dp_mst_topology_get_port_validated_locked(
1697 mgr->mst_primary, port);
1699 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1702 mutex_unlock(&mgr->lock);
1706 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1708 struct drm_dp_mst_port *port;
1711 list_for_each_entry(port, &mstb->ports, next) {
1712 if (port->port_num == port_num) {
1713 ret = drm_dp_mst_topology_try_get_port(port);
1714 return ret ? port : NULL;
1722 * calculate a new RAD for this MST branch device
1723 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1724 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1726 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1729 int parent_lct = port->parent->lct;
1731 int idx = (parent_lct - 1) / 2;
1732 if (parent_lct > 1) {
1733 memcpy(rad, port->parent->rad, idx + 1);
1734 shift = (parent_lct % 2) ? 4 : 0;
1738 rad[idx] |= port->port_num << shift;
1739 return parent_lct + 1;
1743 * return sends link address for new mstb
1745 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1749 bool send_link = false;
1750 switch (port->pdt) {
1751 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1752 case DP_PEER_DEVICE_SST_SINK:
1753 /* add i2c over sideband */
1754 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1756 case DP_PEER_DEVICE_MST_BRANCHING:
1757 lct = drm_dp_calculate_rad(port, rad);
1759 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1761 port->mstb->mgr = port->mgr;
1762 port->mstb->port_parent = port;
1764 * Make sure this port's memory allocation stays
1765 * around until its child MSTB releases it
1767 drm_dp_mst_get_port_malloc(port);
1777 * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
1778 * @aux: Fake sideband AUX CH
1779 * @offset: address of the (first) register to read
1780 * @buffer: buffer to store the register values
1781 * @size: number of bytes in @buffer
1783 * Performs the same functionality for remote devices via
1784 * sideband messaging as drm_dp_dpcd_read() does for local
1785 * devices via actual AUX CH.
1787 * Return: Number of bytes read, or negative error code on failure.
1789 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
1790 unsigned int offset, void *buffer, size_t size)
1792 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1795 return drm_dp_send_dpcd_read(port->mgr, port,
1796 offset, size, buffer);
1800 * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
1801 * @aux: Fake sideband AUX CH
1802 * @offset: address of the (first) register to write
1803 * @buffer: buffer containing the values to write
1804 * @size: number of bytes in @buffer
1806 * Performs the same functionality for remote devices via
1807 * sideband messaging as drm_dp_dpcd_write() does for local
1808 * devices via actual AUX CH.
1810 * Return: 0 on success, negative error code on failure.
1812 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
1813 unsigned int offset, void *buffer, size_t size)
1815 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
1818 return drm_dp_send_dpcd_write(port->mgr, port,
1819 offset, size, buffer);
1822 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1826 memcpy(mstb->guid, guid, 16);
1828 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1829 if (mstb->port_parent) {
1830 ret = drm_dp_send_dpcd_write(
1838 ret = drm_dp_dpcd_write(
1847 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1850 size_t proppath_size)
1854 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1855 for (i = 0; i < (mstb->lct - 1); i++) {
1856 int shift = (i % 2) ? 0 : 4;
1857 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1858 snprintf(temp, sizeof(temp), "-%d", port_num);
1859 strlcat(proppath, temp, proppath_size);
1861 snprintf(temp, sizeof(temp), "-%d", pnum);
1862 strlcat(proppath, temp, proppath_size);
1866 * drm_dp_mst_connector_late_register() - Late MST connector registration
1867 * @connector: The MST connector
1868 * @port: The MST port for this connector
1870 * Helper to register the remote aux device for this MST port. Drivers should
1871 * call this from their mst connector's late_register hook to enable MST aux
1874 * Return: 0 on success, negative error code on failure.
1876 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
1877 struct drm_dp_mst_port *port)
1879 DRM_DEBUG_KMS("registering %s remote bus for %s\n",
1880 port->aux.name, connector->kdev->kobj.name);
1882 port->aux.dev = connector->kdev;
1883 return drm_dp_aux_register_devnode(&port->aux);
1885 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
1888 * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
1889 * @connector: The MST connector
1890 * @port: The MST port for this connector
1892 * Helper to unregister the remote aux device for this MST port, registered by
1893 * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
1894 * connector's early_unregister hook.
1896 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
1897 struct drm_dp_mst_port *port)
1899 DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
1900 port->aux.name, connector->kdev->kobj.name);
1901 drm_dp_aux_unregister_devnode(&port->aux);
1903 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
1906 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
1907 struct drm_device *dev,
1908 struct drm_dp_link_addr_reply_port *port_msg)
1910 struct drm_dp_mst_port *port;
1912 bool created = false;
1916 port = drm_dp_get_port(mstb, port_msg->port_number);
1918 port = kzalloc(sizeof(*port), GFP_KERNEL);
1921 kref_init(&port->topology_kref);
1922 kref_init(&port->malloc_kref);
1923 port->parent = mstb;
1924 port->port_num = port_msg->port_number;
1925 port->mgr = mstb->mgr;
1926 port->aux.name = "DPMST";
1927 port->aux.dev = dev->dev;
1928 port->aux.is_remote = true;
1931 * Make sure the memory allocation for our parent branch stays
1932 * around until our own memory allocation is released
1934 drm_dp_mst_get_mstb_malloc(mstb);
1938 old_pdt = port->pdt;
1939 old_ddps = port->ddps;
1942 port->pdt = port_msg->peer_device_type;
1943 port->input = port_msg->input_port;
1944 port->mcs = port_msg->mcs;
1945 port->ddps = port_msg->ddps;
1946 port->ldps = port_msg->legacy_device_plug_status;
1947 port->dpcd_rev = port_msg->dpcd_revision;
1948 port->num_sdp_streams = port_msg->num_sdp_streams;
1949 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1951 /* manage mstb port lists with mgr lock - take a reference
1954 mutex_lock(&mstb->mgr->lock);
1955 drm_dp_mst_topology_get_port(port);
1956 list_add(&port->next, &mstb->ports);
1957 mutex_unlock(&mstb->mgr->lock);
1960 if (old_ddps != port->ddps) {
1963 drm_dp_send_enum_path_resources(mstb->mgr,
1967 port->available_pbn = 0;
1971 if (old_pdt != port->pdt && !port->input) {
1972 drm_dp_port_teardown_pdt(port, old_pdt);
1974 ret = drm_dp_port_setup_pdt(port);
1976 drm_dp_send_link_address(mstb->mgr, port->mstb);
1979 if (created && !port->input) {
1982 build_mst_prop_path(mstb, port->port_num, proppath,
1984 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1987 if (!port->connector) {
1988 /* remove it from the port list */
1989 mutex_lock(&mstb->mgr->lock);
1990 list_del(&port->next);
1991 mutex_unlock(&mstb->mgr->lock);
1992 /* drop port list reference */
1993 drm_dp_mst_topology_put_port(port);
1996 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1997 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1998 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1999 port->cached_edid = drm_get_edid(port->connector,
2001 drm_connector_set_tile_property(port->connector);
2003 (*mstb->mgr->cbs->register_connector)(port->connector);
2007 /* put reference to this port */
2008 drm_dp_mst_topology_put_port(port);
2012 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
2013 struct drm_dp_connection_status_notify *conn_stat)
2015 struct drm_dp_mst_port *port;
2018 bool dowork = false;
2019 port = drm_dp_get_port(mstb, conn_stat->port_number);
2023 old_ddps = port->ddps;
2024 old_pdt = port->pdt;
2025 port->pdt = conn_stat->peer_device_type;
2026 port->mcs = conn_stat->message_capability_status;
2027 port->ldps = conn_stat->legacy_device_plug_status;
2028 port->ddps = conn_stat->displayport_device_plug_status;
2030 if (old_ddps != port->ddps) {
2034 port->available_pbn = 0;
2037 if (old_pdt != port->pdt && !port->input) {
2038 drm_dp_port_teardown_pdt(port, old_pdt);
2040 if (drm_dp_port_setup_pdt(port))
2044 drm_dp_mst_topology_put_port(port);
2046 queue_work(system_long_wq, &mstb->mgr->work);
2050 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
2053 struct drm_dp_mst_branch *mstb;
2054 struct drm_dp_mst_port *port;
2056 /* find the port by iterating down */
2058 mutex_lock(&mgr->lock);
2059 mstb = mgr->mst_primary;
2064 for (i = 0; i < lct - 1; i++) {
2065 int shift = (i % 2) ? 0 : 4;
2066 int port_num = (rad[i / 2] >> shift) & 0xf;
2068 list_for_each_entry(port, &mstb->ports, next) {
2069 if (port->port_num == port_num) {
2072 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
2080 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2084 mutex_unlock(&mgr->lock);
2088 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
2089 struct drm_dp_mst_branch *mstb,
2090 const uint8_t *guid)
2092 struct drm_dp_mst_branch *found_mstb;
2093 struct drm_dp_mst_port *port;
2095 if (memcmp(mstb->guid, guid, 16) == 0)
2099 list_for_each_entry(port, &mstb->ports, next) {
2103 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
2112 static struct drm_dp_mst_branch *
2113 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
2114 const uint8_t *guid)
2116 struct drm_dp_mst_branch *mstb;
2119 /* find the port by iterating down */
2120 mutex_lock(&mgr->lock);
2122 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
2124 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2129 mutex_unlock(&mgr->lock);
2133 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2134 struct drm_dp_mst_branch *mstb)
2136 struct drm_dp_mst_port *port;
2137 struct drm_dp_mst_branch *mstb_child;
2138 if (!mstb->link_address_sent)
2139 drm_dp_send_link_address(mgr, mstb);
2141 list_for_each_entry(port, &mstb->ports, next) {
2148 if (!port->available_pbn)
2149 drm_dp_send_enum_path_resources(mgr, mstb, port);
2152 mstb_child = drm_dp_mst_topology_get_mstb_validated(
2155 drm_dp_check_and_send_link_address(mgr, mstb_child);
2156 drm_dp_mst_topology_put_mstb(mstb_child);
2162 static void drm_dp_mst_link_probe_work(struct work_struct *work)
2164 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
2165 struct drm_dp_mst_branch *mstb;
2168 mutex_lock(&mgr->lock);
2169 mstb = mgr->mst_primary;
2171 ret = drm_dp_mst_topology_try_get_mstb(mstb);
2175 mutex_unlock(&mgr->lock);
2177 drm_dp_check_and_send_link_address(mgr, mstb);
2178 drm_dp_mst_topology_put_mstb(mstb);
2182 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
2187 if (memchr_inv(guid, 0, 16))
2190 salt = get_jiffies_64();
2192 memcpy(&guid[0], &salt, sizeof(u64));
2193 memcpy(&guid[8], &salt, sizeof(u64));
2198 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
2200 struct drm_dp_sideband_msg_req_body req;
2202 req.req_type = DP_REMOTE_DPCD_READ;
2203 req.u.dpcd_read.port_number = port_num;
2204 req.u.dpcd_read.dpcd_address = offset;
2205 req.u.dpcd_read.num_bytes = num_bytes;
2206 drm_dp_encode_sideband_req(&req, msg);
2211 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
2212 bool up, u8 *msg, int len)
2215 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
2216 int tosend, total, offset;
2223 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
2225 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
2228 if (ret != tosend) {
2229 if (ret == -EIO && retries < 5) {
2233 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
2239 } while (total > 0);
2243 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
2244 struct drm_dp_sideband_msg_tx *txmsg)
2246 struct drm_dp_mst_branch *mstb = txmsg->dst;
2249 /* both msg slots are full */
2250 if (txmsg->seqno == -1) {
2251 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
2252 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
2255 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
2256 txmsg->seqno = mstb->last_seqno;
2257 mstb->last_seqno ^= 1;
2258 } else if (mstb->tx_slots[0] == NULL)
2262 mstb->tx_slots[txmsg->seqno] = txmsg;
2265 req_type = txmsg->msg[0] & 0x7f;
2266 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
2267 req_type == DP_RESOURCE_STATUS_NOTIFY)
2271 hdr->path_msg = txmsg->path_msg;
2272 hdr->lct = mstb->lct;
2273 hdr->lcr = mstb->lct - 1;
2275 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
2276 hdr->seqno = txmsg->seqno;
2280 * process a single block of the next message in the sideband queue
2282 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2283 struct drm_dp_sideband_msg_tx *txmsg,
2287 struct drm_dp_sideband_msg_hdr hdr;
2288 int len, space, idx, tosend;
2291 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
2293 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
2295 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
2298 /* make hdr from dst mst - for replies use seqno
2299 otherwise assign one */
2300 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
2304 /* amount left to send in this message */
2305 len = txmsg->cur_len - txmsg->cur_offset;
2307 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
2308 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
2310 tosend = min(len, space);
2311 if (len == txmsg->cur_len)
2317 hdr.msg_len = tosend + 1;
2318 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
2319 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
2320 /* add crc at end */
2321 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
2324 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
2325 if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
2326 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2328 drm_printf(&p, "sideband msg failed to send\n");
2329 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2333 txmsg->cur_offset += tosend;
2334 if (txmsg->cur_offset == txmsg->cur_len) {
2335 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
2341 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
2343 struct drm_dp_sideband_msg_tx *txmsg;
2346 WARN_ON(!mutex_is_locked(&mgr->qlock));
2348 /* construct a chunk from the first msg in the tx_msg queue */
2349 if (list_empty(&mgr->tx_msg_downq))
2352 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
2353 ret = process_single_tx_qlock(mgr, txmsg, false);
2355 /* txmsg is sent it should be in the slots now */
2356 list_del(&txmsg->next);
2358 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2359 list_del(&txmsg->next);
2360 if (txmsg->seqno != -1)
2361 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2362 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
2363 wake_up_all(&mgr->tx_waitq);
2367 /* called holding qlock */
2368 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
2369 struct drm_dp_sideband_msg_tx *txmsg)
2373 /* construct a chunk from the first msg in the tx_msg queue */
2374 ret = process_single_tx_qlock(mgr, txmsg, true);
2377 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
2379 if (txmsg->seqno != -1) {
2380 WARN_ON((unsigned int)txmsg->seqno >
2381 ARRAY_SIZE(txmsg->dst->tx_slots));
2382 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2386 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2387 struct drm_dp_sideband_msg_tx *txmsg)
2389 mutex_lock(&mgr->qlock);
2390 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2392 if (drm_debug_enabled(DRM_UT_DP)) {
2393 struct drm_printer p = drm_debug_printer(DBG_PREFIX);
2395 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
2398 if (list_is_singular(&mgr->tx_msg_downq))
2399 process_single_down_tx_qlock(mgr);
2400 mutex_unlock(&mgr->qlock);
2404 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
2406 struct drm_dp_link_addr_reply_port *port_reply;
2409 for (i = 0; i < reply->nports; i++) {
2410 port_reply = &reply->ports[i];
2411 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
2413 port_reply->input_port,
2414 port_reply->peer_device_type,
2415 port_reply->port_number,
2416 port_reply->dpcd_revision,
2419 port_reply->legacy_device_plug_status,
2420 port_reply->num_sdp_streams,
2421 port_reply->num_sdp_stream_sinks);
2425 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2426 struct drm_dp_mst_branch *mstb)
2428 struct drm_dp_sideband_msg_tx *txmsg;
2429 struct drm_dp_link_address_ack_reply *reply;
2432 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2437 len = build_link_address(txmsg);
2439 mstb->link_address_sent = true;
2440 drm_dp_queue_down_tx(mgr, txmsg);
2442 /* FIXME: Actually do some real error handling here */
2443 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2445 DRM_ERROR("Sending link address failed with %d\n", ret);
2448 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2449 DRM_ERROR("link address NAK received\n");
2454 reply = &txmsg->reply.u.link_addr;
2455 DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
2456 drm_dp_dump_link_address(reply);
2458 drm_dp_check_mstb_guid(mstb, reply->guid);
2460 for (i = 0; i < reply->nports; i++)
2461 drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
2464 drm_kms_helper_hotplug_event(mgr->dev);
2468 mstb->link_address_sent = false;
2473 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2474 struct drm_dp_mst_branch *mstb,
2475 struct drm_dp_mst_port *port)
2477 struct drm_dp_enum_path_resources_ack_reply *path_res;
2478 struct drm_dp_sideband_msg_tx *txmsg;
2482 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2487 len = build_enum_path_resources(txmsg, port->port_num);
2489 drm_dp_queue_down_tx(mgr, txmsg);
2491 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2493 path_res = &txmsg->reply.u.path_resources;
2495 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2496 DRM_DEBUG_KMS("enum path resources nak received\n");
2498 if (port->port_num != path_res->port_number)
2499 DRM_ERROR("got incorrect port in response\n");
2501 DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
2502 path_res->port_number,
2503 path_res->full_payload_bw_number,
2504 path_res->avail_payload_bw_number);
2505 port->available_pbn =
2506 path_res->avail_payload_bw_number;
2514 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2516 if (!mstb->port_parent)
2519 if (mstb->port_parent->mstb != mstb)
2520 return mstb->port_parent;
2522 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2526 * Searches upwards in the topology starting from mstb to try to find the
2527 * closest available parent of mstb that's still connected to the rest of the
2528 * topology. This can be used in order to perform operations like releasing
2529 * payloads, where the branch device which owned the payload may no longer be
2530 * around and thus would require that the payload on the last living relative
2533 static struct drm_dp_mst_branch *
2534 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2535 struct drm_dp_mst_branch *mstb,
2538 struct drm_dp_mst_branch *rmstb = NULL;
2539 struct drm_dp_mst_port *found_port;
2541 mutex_lock(&mgr->lock);
2542 if (!mgr->mst_primary)
2546 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2550 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2551 rmstb = found_port->parent;
2552 *port_num = found_port->port_num;
2554 /* Search again, starting from this parent */
2555 mstb = found_port->parent;
2559 mutex_unlock(&mgr->lock);
2563 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2564 struct drm_dp_mst_port *port,
2568 struct drm_dp_sideband_msg_tx *txmsg;
2569 struct drm_dp_mst_branch *mstb;
2570 int len, ret, port_num;
2571 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2574 port_num = port->port_num;
2575 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2577 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2585 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2591 for (i = 0; i < port->num_sdp_streams; i++)
2595 len = build_allocate_payload(txmsg, port_num,
2597 pbn, port->num_sdp_streams, sinks);
2599 drm_dp_queue_down_tx(mgr, txmsg);
2602 * FIXME: there is a small chance that between getting the last
2603 * connected mstb and sending the payload message, the last connected
2604 * mstb could also be removed from the topology. In the future, this
2605 * needs to be fixed by restarting the
2606 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
2607 * timeout if the topology is still connected to the system.
2609 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2611 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2618 drm_dp_mst_topology_put_mstb(mstb);
2622 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2623 struct drm_dp_mst_port *port, bool power_up)
2625 struct drm_dp_sideband_msg_tx *txmsg;
2628 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2632 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2634 drm_dp_mst_topology_put_port(port);
2638 txmsg->dst = port->parent;
2639 len = build_power_updown_phy(txmsg, port->port_num, power_up);
2640 drm_dp_queue_down_tx(mgr, txmsg);
2642 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2644 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2650 drm_dp_mst_topology_put_port(port);
2654 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2656 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2658 struct drm_dp_payload *payload)
2662 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2664 payload->payload_state = 0;
2667 payload->payload_state = DP_PAYLOAD_LOCAL;
2671 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2672 struct drm_dp_mst_port *port,
2674 struct drm_dp_payload *payload)
2677 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2680 payload->payload_state = DP_PAYLOAD_REMOTE;
2684 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2685 struct drm_dp_mst_port *port,
2687 struct drm_dp_payload *payload)
2689 DRM_DEBUG_KMS("\n");
2690 /* it's okay for these to fail */
2692 drm_dp_payload_send_msg(mgr, port, id, 0);
2695 drm_dp_dpcd_write_payload(mgr, id, payload);
2696 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2700 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2702 struct drm_dp_payload *payload)
2704 payload->payload_state = 0;
2709 * drm_dp_update_payload_part1() - Execute payload update part 1
2710 * @mgr: manager to use.
2712 * This iterates over all proposed virtual channels, and tries to
2713 * allocate space in the link for them. For 0->slots transitions,
2714 * this step just writes the VCPI to the MST device. For slots->0
2715 * transitions, this writes the updated VCPIs and removes the
2716 * remote VC payloads.
2718 * after calling this the driver should generate ACT and payload
2721 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2723 struct drm_dp_payload req_payload;
2724 struct drm_dp_mst_port *port;
2728 mutex_lock(&mgr->payload_lock);
2729 for (i = 0; i < mgr->max_payloads; i++) {
2730 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2731 struct drm_dp_payload *payload = &mgr->payloads[i];
2732 bool put_port = false;
2734 /* solve the current payloads - compare to the hw ones
2735 - update the hw view */
2736 req_payload.start_slot = cur_slots;
2738 port = container_of(vcpi, struct drm_dp_mst_port,
2741 /* Validated ports don't matter if we're releasing
2744 if (vcpi->num_slots) {
2745 port = drm_dp_mst_topology_get_port_validated(
2748 mutex_unlock(&mgr->payload_lock);
2754 req_payload.num_slots = vcpi->num_slots;
2755 req_payload.vcpi = vcpi->vcpi;
2758 req_payload.num_slots = 0;
2761 payload->start_slot = req_payload.start_slot;
2762 /* work out what is required to happen with this payload */
2763 if (payload->num_slots != req_payload.num_slots) {
2765 /* need to push an update for this payload */
2766 if (req_payload.num_slots) {
2767 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2769 payload->num_slots = req_payload.num_slots;
2770 payload->vcpi = req_payload.vcpi;
2772 } else if (payload->num_slots) {
2773 payload->num_slots = 0;
2774 drm_dp_destroy_payload_step1(mgr, port,
2777 req_payload.payload_state =
2778 payload->payload_state;
2779 payload->start_slot = 0;
2781 payload->payload_state = req_payload.payload_state;
2783 cur_slots += req_payload.num_slots;
2786 drm_dp_mst_topology_put_port(port);
2789 for (i = 0; i < mgr->max_payloads; i++) {
2790 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
2793 DRM_DEBUG_KMS("removing payload %d\n", i);
2794 for (j = i; j < mgr->max_payloads - 1; j++) {
2795 mgr->payloads[j] = mgr->payloads[j + 1];
2796 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2798 if (mgr->proposed_vcpis[j] &&
2799 mgr->proposed_vcpis[j]->num_slots) {
2800 set_bit(j + 1, &mgr->payload_mask);
2802 clear_bit(j + 1, &mgr->payload_mask);
2806 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2807 sizeof(struct drm_dp_payload));
2808 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2809 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2811 mutex_unlock(&mgr->payload_lock);
2815 EXPORT_SYMBOL(drm_dp_update_payload_part1);
2818 * drm_dp_update_payload_part2() - Execute payload update part 2
2819 * @mgr: manager to use.
2821 * This iterates over all proposed virtual channels, and tries to
2822 * allocate space in the link for them. For 0->slots transitions,
2823 * this step writes the remote VC payload commands. For slots->0
2824 * this just resets some internal state.
2826 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2828 struct drm_dp_mst_port *port;
2831 mutex_lock(&mgr->payload_lock);
2832 for (i = 0; i < mgr->max_payloads; i++) {
2834 if (!mgr->proposed_vcpis[i])
2837 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2839 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2840 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2841 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2842 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2843 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2846 mutex_unlock(&mgr->payload_lock);
2850 mutex_unlock(&mgr->payload_lock);
2853 EXPORT_SYMBOL(drm_dp_update_payload_part2);
2855 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2856 struct drm_dp_mst_port *port,
2857 int offset, int size, u8 *bytes)
2861 struct drm_dp_sideband_msg_tx *txmsg;
2862 struct drm_dp_mst_branch *mstb;
2864 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2868 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2874 len = build_dpcd_read(txmsg, port->port_num, offset, size);
2875 txmsg->dst = port->parent;
2877 drm_dp_queue_down_tx(mgr, txmsg);
2879 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2883 /* DPCD read should never be NACKed */
2884 if (txmsg->reply.reply_type == 1) {
2885 DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
2886 mstb, port->port_num, offset, size);
2891 if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
2896 ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
2898 memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
2903 drm_dp_mst_topology_put_mstb(mstb);
2908 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2909 struct drm_dp_mst_port *port,
2910 int offset, int size, u8 *bytes)
2914 struct drm_dp_sideband_msg_tx *txmsg;
2915 struct drm_dp_mst_branch *mstb;
2917 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2921 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2927 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2930 drm_dp_queue_down_tx(mgr, txmsg);
2932 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2934 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2941 drm_dp_mst_topology_put_mstb(mstb);
2945 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2947 struct drm_dp_sideband_msg_reply_body reply;
2949 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2950 reply.req_type = req_type;
2951 drm_dp_encode_sideband_reply(&reply, msg);
2955 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2956 struct drm_dp_mst_branch *mstb,
2957 int req_type, int seqno, bool broadcast)
2959 struct drm_dp_sideband_msg_tx *txmsg;
2961 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2966 txmsg->seqno = seqno;
2967 drm_dp_encode_up_ack_reply(txmsg, req_type);
2969 mutex_lock(&mgr->qlock);
2971 process_single_up_tx_qlock(mgr, txmsg);
2973 mutex_unlock(&mgr->qlock);
2979 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
2981 if (dp_link_bw == 0 || dp_link_count == 0)
2982 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2983 dp_link_bw, dp_link_count);
2985 return dp_link_bw * dp_link_count / 2;
2989 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2990 * @mgr: manager to set state for
2991 * @mst_state: true to enable MST on this connector - false to disable.
2993 * This is called by the driver when it detects an MST capable device plugged
2994 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2996 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2999 struct drm_dp_mst_branch *mstb = NULL;
3001 mutex_lock(&mgr->lock);
3002 if (mst_state == mgr->mst_state)
3005 mgr->mst_state = mst_state;
3006 /* set the device into MST mode */
3008 WARN_ON(mgr->mst_primary);
3011 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3012 if (ret != DP_RECEIVER_CAP_SIZE) {
3013 DRM_DEBUG_KMS("failed to read DPCD\n");
3017 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
3018 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
3019 if (mgr->pbn_div == 0) {
3024 /* add initial branch device at LCT 1 */
3025 mstb = drm_dp_add_mst_branch_device(1, NULL);
3032 /* give this the main reference */
3033 mgr->mst_primary = mstb;
3034 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
3036 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3037 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3043 struct drm_dp_payload reset_pay;
3044 reset_pay.start_slot = 0;
3045 reset_pay.num_slots = 0x3f;
3046 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3049 queue_work(system_long_wq, &mgr->work);
3053 /* disable MST on the device */
3054 mstb = mgr->mst_primary;
3055 mgr->mst_primary = NULL;
3056 /* this can fail if the device is gone */
3057 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
3059 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
3060 mgr->payload_mask = 0;
3061 set_bit(0, &mgr->payload_mask);
3066 mutex_unlock(&mgr->lock);
3068 drm_dp_mst_topology_put_mstb(mstb);
3072 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
3075 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
3076 * @mgr: manager to suspend
3078 * This function tells the MST device that we can't handle UP messages
3079 * anymore. This should stop it from sending any since we are suspended.
3081 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
3083 mutex_lock(&mgr->lock);
3084 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3085 DP_MST_EN | DP_UPSTREAM_IS_SRC);
3086 mutex_unlock(&mgr->lock);
3087 flush_work(&mgr->work);
3088 flush_work(&mgr->destroy_connector_work);
3090 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
3093 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
3094 * @mgr: manager to resume
3096 * This will fetch DPCD and see if the device is still there,
3097 * if it is, it will rewrite the MSTM control bits, and return.
3099 * if the device fails this returns -1, and the driver should do
3100 * a full MST reprobe, in case we were undocked.
3102 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
3106 mutex_lock(&mgr->lock);
3108 if (mgr->mst_primary) {
3112 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
3113 if (sret != DP_RECEIVER_CAP_SIZE) {
3114 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3119 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3120 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3122 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
3127 /* Some hubs forget their guids after they resume */
3128 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
3130 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
3134 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
3141 mutex_unlock(&mgr->lock);
3144 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
3146 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
3150 int replylen, origlen, curreply;
3152 struct drm_dp_sideband_msg_rx *msg;
3153 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
3154 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
3156 len = min(mgr->max_dpcd_transaction_bytes, 16);
3157 ret = drm_dp_dpcd_read(mgr->aux, basereg,
3160 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
3163 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
3165 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
3168 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
3173 while (replylen > 0) {
3174 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
3175 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
3178 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
3183 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
3185 DRM_DEBUG_KMS("failed to build sideband msg\n");
3195 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
3197 struct drm_dp_sideband_msg_tx *txmsg;
3198 struct drm_dp_mst_branch *mstb;
3199 struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
3202 if (!drm_dp_get_one_sb_msg(mgr, false))
3203 goto clear_down_rep_recv;
3205 if (!mgr->down_rep_recv.have_eomt)
3208 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3210 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3212 goto clear_down_rep_recv;
3215 /* find the message */
3217 mutex_lock(&mgr->qlock);
3218 txmsg = mstb->tx_slots[slot];
3219 /* remove from slots */
3220 mutex_unlock(&mgr->qlock);
3223 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
3224 mstb, hdr->seqno, hdr->lct, hdr->rad[0],
3225 mgr->down_rep_recv.msg[0]);
3229 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
3231 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
3232 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
3233 txmsg->reply.req_type,
3234 drm_dp_mst_req_type_str(txmsg->reply.req_type),
3235 txmsg->reply.u.nak.reason,
3236 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
3237 txmsg->reply.u.nak.nak_data);
3239 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3240 drm_dp_mst_topology_put_mstb(mstb);
3242 mutex_lock(&mgr->qlock);
3243 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
3244 mstb->tx_slots[slot] = NULL;
3245 mutex_unlock(&mgr->qlock);
3247 wake_up_all(&mgr->tx_waitq);
3252 drm_dp_mst_topology_put_mstb(mstb);
3253 clear_down_rep_recv:
3254 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3259 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3261 struct drm_dp_sideband_msg_req_body msg;
3262 struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
3263 struct drm_dp_mst_branch *mstb = NULL;
3267 if (!drm_dp_get_one_sb_msg(mgr, true))
3270 if (!mgr->up_req_recv.have_eomt)
3273 if (!hdr->broadcast) {
3274 mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
3276 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3283 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
3285 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY)
3286 guid = msg.u.conn_stat.guid;
3287 else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY)
3288 guid = msg.u.resource_stat.guid;
3292 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno,
3296 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
3298 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
3304 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3305 drm_dp_mst_handle_conn_stat(mstb, &msg.u.conn_stat);
3307 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
3308 msg.u.conn_stat.port_number,
3309 msg.u.conn_stat.legacy_device_plug_status,
3310 msg.u.conn_stat.displayport_device_plug_status,
3311 msg.u.conn_stat.message_capability_status,
3312 msg.u.conn_stat.input_port,
3313 msg.u.conn_stat.peer_device_type);
3315 drm_kms_helper_hotplug_event(mgr->dev);
3316 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3317 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
3318 msg.u.resource_stat.port_number,
3319 msg.u.resource_stat.available_pbn);
3322 drm_dp_mst_topology_put_mstb(mstb);
3324 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3329 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
3330 * @mgr: manager to notify irq for.
3331 * @esi: 4 bytes from SINK_COUNT_ESI
3332 * @handled: whether the hpd interrupt was consumed or not
3334 * This should be called from the driver when it detects a short IRQ,
3335 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
3336 * topology manager will process the sideband messages received as a result
3339 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
3346 if (sc != mgr->sink_count) {
3347 mgr->sink_count = sc;
3351 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
3352 ret = drm_dp_mst_handle_down_rep(mgr);
3356 if (esi[1] & DP_UP_REQ_MSG_RDY) {
3357 ret |= drm_dp_mst_handle_up_req(mgr);
3361 drm_dp_mst_kick_tx(mgr);
3364 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
3367 * drm_dp_mst_detect_port() - get connection status for an MST port
3368 * @connector: DRM connector for this port
3369 * @mgr: manager for this port
3370 * @port: unverified pointer to a port
3372 * This returns the current connection state for a port. It validates the
3373 * port pointer still exists so the caller doesn't require a reference
3375 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
3376 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3378 enum drm_connector_status status = connector_status_disconnected;
3380 /* we need to search for the port in the mgr in case it's gone */
3381 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3383 return connector_status_disconnected;
3388 switch (port->pdt) {
3389 case DP_PEER_DEVICE_NONE:
3390 case DP_PEER_DEVICE_MST_BRANCHING:
3393 case DP_PEER_DEVICE_SST_SINK:
3394 status = connector_status_connected;
3395 /* for logical ports - cache the EDID */
3396 if (port->port_num >= 8 && !port->cached_edid) {
3397 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
3400 case DP_PEER_DEVICE_DP_LEGACY_CONV:
3402 status = connector_status_connected;
3406 drm_dp_mst_topology_put_port(port);
3409 EXPORT_SYMBOL(drm_dp_mst_detect_port);
3412 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
3413 * @mgr: manager for this port
3414 * @port: unverified pointer to a port.
3416 * This returns whether the port supports audio or not.
3418 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
3419 struct drm_dp_mst_port *port)
3423 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3426 ret = port->has_audio;
3427 drm_dp_mst_topology_put_port(port);
3430 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3433 * drm_dp_mst_get_edid() - get EDID for an MST port
3434 * @connector: toplevel connector to get EDID for
3435 * @mgr: manager for this port
3436 * @port: unverified pointer to a port.
3438 * This returns an EDID for the port connected to a connector,
3439 * It validates the pointer still exists so the caller doesn't require a
3442 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3444 struct edid *edid = NULL;
3446 /* we need to search for the port in the mgr in case it's gone */
3447 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3451 if (port->cached_edid)
3452 edid = drm_edid_duplicate(port->cached_edid);
3454 edid = drm_get_edid(connector, &port->aux.ddc);
3456 port->has_audio = drm_detect_monitor_audio(edid);
3457 drm_dp_mst_topology_put_port(port);
3460 EXPORT_SYMBOL(drm_dp_mst_get_edid);
3463 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
3464 * @mgr: manager to use
3465 * @pbn: payload bandwidth to convert into slots.
3467 * Calculate the number of VCPI slots that will be required for the given PBN
3468 * value. This function is deprecated, and should not be used in atomic
3472 * The total slots required for this port, or error.
3474 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3479 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3481 /* max. time slots - one slot for MTP header */
3486 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3488 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3489 struct drm_dp_vcpi *vcpi, int pbn, int slots)
3493 /* max. time slots - one slot for MTP header */
3498 vcpi->aligned_pbn = slots * mgr->pbn_div;
3499 vcpi->num_slots = slots;
3501 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3508 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
3509 * @state: global atomic state
3510 * @mgr: MST topology manager for the port
3511 * @port: port to find vcpi slots for
3512 * @pbn: bandwidth required for the mode in PBN
3514 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
3515 * may have had. Any atomic drivers which support MST must call this function
3516 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
3517 * current VCPI allocation for the new state, but only when
3518 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
3519 * to ensure compatibility with userspace applications that still use the
3520 * legacy modesetting UAPI.
3522 * Allocations set by this function are not checked against the bandwidth
3523 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
3525 * Additionally, it is OK to call this function multiple times on the same
3526 * @port as needed. It is not OK however, to call this function and
3527 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
3530 * drm_dp_atomic_release_vcpi_slots()
3531 * drm_dp_mst_atomic_check()
3534 * Total slots in the atomic state assigned for this port, or a negative error
3535 * code if the port no longer exists
3537 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3538 struct drm_dp_mst_topology_mgr *mgr,
3539 struct drm_dp_mst_port *port, int pbn)
3541 struct drm_dp_mst_topology_state *topology_state;
3542 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3543 int prev_slots, req_slots, ret;
3545 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3546 if (IS_ERR(topology_state))
3547 return PTR_ERR(topology_state);
3549 /* Find the current allocation for this port, if any */
3550 list_for_each_entry(pos, &topology_state->vcpis, next) {
3551 if (pos->port == port) {
3553 prev_slots = vcpi->vcpi;
3556 * This should never happen, unless the driver tries
3557 * releasing and allocating the same VCPI allocation,
3560 if (WARN_ON(!prev_slots)) {
3561 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3572 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3574 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3575 port->connector->base.id, port->connector->name,
3576 port, prev_slots, req_slots);
3578 /* Add the new allocation to the state */
3580 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3584 drm_dp_mst_get_port_malloc(port);
3586 list_add(&vcpi->next, &topology_state->vcpis);
3588 vcpi->vcpi = req_slots;
3593 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3596 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
3597 * @state: global atomic state
3598 * @mgr: MST topology manager for the port
3599 * @port: The port to release the VCPI slots from
3601 * Releases any VCPI slots that have been allocated to a port in the atomic
3602 * state. Any atomic drivers which support MST must call this function in
3603 * their &drm_connector_helper_funcs.atomic_check() callback when the
3604 * connector will no longer have VCPI allocated (e.g. because its CRTC was
3605 * removed) when it had VCPI allocated in the previous atomic state.
3607 * It is OK to call this even if @port has been removed from the system.
3608 * Additionally, it is OK to call this function multiple times on the same
3609 * @port as needed. It is not OK however, to call this function and
3610 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
3614 * drm_dp_atomic_find_vcpi_slots()
3615 * drm_dp_mst_atomic_check()
3618 * 0 if all slots for this port were added back to
3619 * &drm_dp_mst_topology_state.avail_slots or negative error code
3621 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3622 struct drm_dp_mst_topology_mgr *mgr,
3623 struct drm_dp_mst_port *port)
3625 struct drm_dp_mst_topology_state *topology_state;
3626 struct drm_dp_vcpi_allocation *pos;
3629 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3630 if (IS_ERR(topology_state))
3631 return PTR_ERR(topology_state);
3633 list_for_each_entry(pos, &topology_state->vcpis, next) {
3634 if (pos->port == port) {
3639 if (WARN_ON(!found)) {
3640 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3641 port, &topology_state->base);
3645 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3647 drm_dp_mst_put_port_malloc(port);
3653 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3656 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
3657 * @mgr: manager for this port
3658 * @port: port to allocate a virtual channel for.
3659 * @pbn: payload bandwidth number to request
3660 * @slots: returned number of slots for this PBN.
3662 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3663 struct drm_dp_mst_port *port, int pbn, int slots)
3667 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3674 if (port->vcpi.vcpi > 0) {
3675 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3676 port->vcpi.vcpi, port->vcpi.pbn, pbn);
3677 if (pbn == port->vcpi.pbn) {
3678 drm_dp_mst_topology_put_port(port);
3683 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3685 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3686 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3689 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3690 pbn, port->vcpi.num_slots);
3692 /* Keep port allocated until its payload has been removed */
3693 drm_dp_mst_get_port_malloc(port);
3694 drm_dp_mst_topology_put_port(port);
3699 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3701 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3704 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3708 slots = port->vcpi.num_slots;
3709 drm_dp_mst_topology_put_port(port);
3712 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3715 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
3716 * @mgr: manager for this port
3717 * @port: unverified pointer to a port.
3719 * This just resets the number of slots for the ports VCPI for later programming.
3721 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3724 * A port with VCPI will remain allocated until its VCPI is
3725 * released, no verified ref needed
3728 port->vcpi.num_slots = 0;
3730 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3733 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
3734 * @mgr: manager for this port
3735 * @port: port to deallocate vcpi for
3737 * This can be called unconditionally, regardless of whether
3738 * drm_dp_mst_allocate_vcpi() succeeded or not.
3740 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3741 struct drm_dp_mst_port *port)
3743 if (!port->vcpi.vcpi)
3746 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3747 port->vcpi.num_slots = 0;
3749 port->vcpi.aligned_pbn = 0;
3750 port->vcpi.vcpi = 0;
3751 drm_dp_mst_put_port_malloc(port);
3753 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3755 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3756 int id, struct drm_dp_payload *payload)
3758 u8 payload_alloc[3], status;
3762 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3763 DP_PAYLOAD_TABLE_UPDATED);
3765 payload_alloc[0] = id;
3766 payload_alloc[1] = payload->start_slot;
3767 payload_alloc[2] = payload->num_slots;
3769 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3771 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3776 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3778 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3782 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3785 usleep_range(10000, 20000);
3788 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3799 * drm_dp_check_act_status() - Check ACT handled status.
3800 * @mgr: manager to use
3802 * Check the payload status bits in the DPCD for ACT handled completion.
3804 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3811 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3814 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3818 if (status & DP_PAYLOAD_ACT_HANDLED)
3823 } while (count < 30);
3825 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3826 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3834 EXPORT_SYMBOL(drm_dp_check_act_status);
3837 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
3838 * @clock: dot clock for the mode
3839 * @bpp: bpp for the mode.
3841 * This uses the formula in the spec to calculate the PBN value for a mode.
3843 int drm_dp_calc_pbn_mode(int clock, int bpp)
3846 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3847 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3848 * common multiplier to render an integer PBN for all link rate/lane
3849 * counts combinations
3851 * peak_kbps *= (1006/1000)
3852 * peak_kbps *= (64/54)
3853 * peak_kbps *= 8 convert to bytes
3855 return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
3856 8 * 54 * 1000 * 1000);
3858 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3860 /* we want to kick the TX after we've ack the up/down IRQs. */
3861 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3863 queue_work(system_long_wq, &mgr->tx_work);
3866 static void drm_dp_mst_dump_mstb(struct seq_file *m,
3867 struct drm_dp_mst_branch *mstb)
3869 struct drm_dp_mst_port *port;
3870 int tabs = mstb->lct;
3874 for (i = 0; i < tabs; i++)
3878 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3879 list_for_each_entry(port, &mstb->ports, next) {
3880 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3882 drm_dp_mst_dump_mstb(m, port->mstb);
3886 #define DP_PAYLOAD_TABLE_SIZE 64
3888 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3893 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3894 if (drm_dp_dpcd_read(mgr->aux,
3895 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3902 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3903 struct drm_dp_mst_port *port, char *name,
3906 struct edid *mst_edid;
3908 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3909 drm_edid_get_monitor_name(mst_edid, name, namelen);
3913 * drm_dp_mst_dump_topology(): dump topology to seq file.
3914 * @m: seq_file to dump output to
3915 * @mgr: manager to dump current topology for.
3917 * helper to dump MST topology to a seq file for debugfs.
3919 void drm_dp_mst_dump_topology(struct seq_file *m,
3920 struct drm_dp_mst_topology_mgr *mgr)
3923 struct drm_dp_mst_port *port;
3925 mutex_lock(&mgr->lock);
3926 if (mgr->mst_primary)
3927 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3930 mutex_unlock(&mgr->lock);
3932 mutex_lock(&mgr->payload_lock);
3933 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3936 for (i = 0; i < mgr->max_payloads; i++) {
3937 if (mgr->proposed_vcpis[i]) {
3940 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3941 fetch_monitor_name(mgr, port, name, sizeof(name));
3942 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3943 port->port_num, port->vcpi.vcpi,
3944 port->vcpi.num_slots,
3945 (*name != 0) ? name : "Unknown");
3947 seq_printf(m, "vcpi %d:unused\n", i);
3949 for (i = 0; i < mgr->max_payloads; i++) {
3950 seq_printf(m, "payload %d: %d, %d, %d\n",
3952 mgr->payloads[i].payload_state,
3953 mgr->payloads[i].start_slot,
3954 mgr->payloads[i].num_slots);
3958 mutex_unlock(&mgr->payload_lock);
3960 mutex_lock(&mgr->lock);
3961 if (mgr->mst_primary) {
3962 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3965 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3966 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3967 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3968 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3969 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3970 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3972 /* dump the standard OUI branch header */
3973 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3974 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3975 for (i = 0x3; i < 0x8 && buf[i]; i++)
3976 seq_printf(m, "%c", buf[i]);
3977 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3978 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3979 if (dump_dp_payload_table(mgr, buf))
3980 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3983 mutex_unlock(&mgr->lock);
3986 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
3988 static void drm_dp_tx_work(struct work_struct *work)
3990 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
3992 mutex_lock(&mgr->qlock);
3993 if (!list_empty(&mgr->tx_msg_downq))
3994 process_single_down_tx_qlock(mgr);
3995 mutex_unlock(&mgr->qlock);
3998 static void drm_dp_destroy_connector_work(struct work_struct *work)
4000 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
4001 struct drm_dp_mst_port *port;
4002 bool send_hotplug = false;
4004 * Not a regular list traverse as we have to drop the destroy
4005 * connector lock before destroying the connector, to avoid AB->BA
4006 * ordering between this lock and the config mutex.
4009 mutex_lock(&mgr->destroy_connector_lock);
4010 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
4012 mutex_unlock(&mgr->destroy_connector_lock);
4015 list_del(&port->next);
4016 mutex_unlock(&mgr->destroy_connector_lock);
4018 mgr->cbs->destroy_connector(mgr, port->connector);
4020 drm_dp_port_teardown_pdt(port, port->pdt);
4021 port->pdt = DP_PEER_DEVICE_NONE;
4023 drm_dp_mst_put_port_malloc(port);
4024 send_hotplug = true;
4027 drm_kms_helper_hotplug_event(mgr->dev);
4030 static struct drm_private_state *
4031 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
4033 struct drm_dp_mst_topology_state *state, *old_state =
4034 to_dp_mst_topology_state(obj->state);
4035 struct drm_dp_vcpi_allocation *pos, *vcpi;
4037 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
4041 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
4043 INIT_LIST_HEAD(&state->vcpis);
4045 list_for_each_entry(pos, &old_state->vcpis, next) {
4046 /* Prune leftover freed VCPI allocations */
4050 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
4054 drm_dp_mst_get_port_malloc(vcpi->port);
4055 list_add(&vcpi->next, &state->vcpis);
4058 return &state->base;
4061 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
4062 drm_dp_mst_put_port_malloc(pos->port);
4070 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
4071 struct drm_private_state *state)
4073 struct drm_dp_mst_topology_state *mst_state =
4074 to_dp_mst_topology_state(state);
4075 struct drm_dp_vcpi_allocation *pos, *tmp;
4077 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
4078 /* We only keep references to ports with non-zero VCPIs */
4080 drm_dp_mst_put_port_malloc(pos->port);
4088 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
4089 struct drm_dp_mst_topology_state *mst_state)
4091 struct drm_dp_vcpi_allocation *vcpi;
4092 int avail_slots = 63, payload_count = 0;
4094 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
4095 /* Releasing VCPI is always OK-even if the port is gone */
4097 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
4102 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
4103 vcpi->port, vcpi->vcpi);
4105 avail_slots -= vcpi->vcpi;
4106 if (avail_slots < 0) {
4107 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
4108 vcpi->port, mst_state,
4109 avail_slots + vcpi->vcpi);
4113 if (++payload_count > mgr->max_payloads) {
4114 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
4115 mgr, mst_state, mgr->max_payloads);
4119 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
4120 mgr, mst_state, avail_slots,
4127 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
4128 * atomic update is valid
4129 * @state: Pointer to the new &struct drm_dp_mst_topology_state
4131 * Checks the given topology state for an atomic update to ensure that it's
4132 * valid. This includes checking whether there's enough bandwidth to support
4133 * the new VCPI allocations in the atomic update.
4135 * Any atomic drivers supporting DP MST must make sure to call this after
4136 * checking the rest of their state in their
4137 * &drm_mode_config_funcs.atomic_check() callback.
4140 * drm_dp_atomic_find_vcpi_slots()
4141 * drm_dp_atomic_release_vcpi_slots()
4145 * 0 if the new state is valid, negative error code otherwise.
4147 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
4149 struct drm_dp_mst_topology_mgr *mgr;
4150 struct drm_dp_mst_topology_state *mst_state;
4153 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
4154 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
4161 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
4163 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
4164 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
4165 .atomic_destroy_state = drm_dp_mst_destroy_state,
4167 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
4170 * drm_atomic_get_mst_topology_state: get MST topology state
4172 * @state: global atomic state
4173 * @mgr: MST topology manager, also the private object in this case
4175 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
4176 * state vtable so that the private object state returned is that of a MST
4177 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
4178 * to care of the locking, so warn if don't hold the connection_mutex.
4182 * The MST topology state or error pointer.
4184 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
4185 struct drm_dp_mst_topology_mgr *mgr)
4187 struct drm_device *dev = mgr->dev;
4189 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4190 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
4192 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
4195 * drm_dp_mst_topology_mgr_init - initialise a topology manager
4196 * @mgr: manager struct to initialise
4197 * @dev: device providing this structure - for i2c addition.
4198 * @aux: DP helper aux channel to talk to this device
4199 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
4200 * @max_payloads: maximum number of payloads this GPU can source
4201 * @conn_base_id: the connector object ID the MST device is connected to.
4203 * Return 0 for success, or negative error code on failure
4205 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
4206 struct drm_device *dev, struct drm_dp_aux *aux,
4207 int max_dpcd_transaction_bytes,
4208 int max_payloads, int conn_base_id)
4210 struct drm_dp_mst_topology_state *mst_state;
4212 mutex_init(&mgr->lock);
4213 mutex_init(&mgr->qlock);
4214 mutex_init(&mgr->payload_lock);
4215 mutex_init(&mgr->destroy_connector_lock);
4216 INIT_LIST_HEAD(&mgr->tx_msg_downq);
4217 INIT_LIST_HEAD(&mgr->destroy_connector_list);
4218 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
4219 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
4220 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
4221 init_waitqueue_head(&mgr->tx_waitq);
4224 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
4225 mgr->max_payloads = max_payloads;
4226 mgr->conn_base_id = conn_base_id;
4227 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
4228 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
4230 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
4233 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
4234 if (!mgr->proposed_vcpis)
4236 set_bit(0, &mgr->payload_mask);
4238 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
4239 if (mst_state == NULL)
4242 mst_state->mgr = mgr;
4243 INIT_LIST_HEAD(&mst_state->vcpis);
4245 drm_atomic_private_obj_init(dev, &mgr->base,
4247 &drm_dp_mst_topology_state_funcs);
4251 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
4254 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
4255 * @mgr: manager to destroy
4257 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
4259 drm_dp_mst_topology_mgr_set_mst(mgr, false);
4260 flush_work(&mgr->work);
4261 flush_work(&mgr->destroy_connector_work);
4262 mutex_lock(&mgr->payload_lock);
4263 kfree(mgr->payloads);
4264 mgr->payloads = NULL;
4265 kfree(mgr->proposed_vcpis);
4266 mgr->proposed_vcpis = NULL;
4267 mutex_unlock(&mgr->payload_lock);
4270 drm_atomic_private_obj_fini(&mgr->base);
4273 mutex_destroy(&mgr->destroy_connector_lock);
4274 mutex_destroy(&mgr->payload_lock);
4275 mutex_destroy(&mgr->qlock);
4276 mutex_destroy(&mgr->lock);
4278 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
4280 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
4284 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
4287 for (i = 0; i < num - 1; i++) {
4288 if (msgs[i].flags & I2C_M_RD ||
4293 return msgs[num - 1].flags & I2C_M_RD &&
4294 msgs[num - 1].len <= 0xff;
4298 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
4301 struct drm_dp_aux *aux = adapter->algo_data;
4302 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
4303 struct drm_dp_mst_branch *mstb;
4304 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
4306 struct drm_dp_sideband_msg_req_body msg;
4307 struct drm_dp_sideband_msg_tx *txmsg = NULL;
4310 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
4314 if (!remote_i2c_read_ok(msgs, num)) {
4315 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
4320 memset(&msg, 0, sizeof(msg));
4321 msg.req_type = DP_REMOTE_I2C_READ;
4322 msg.u.i2c_read.num_transactions = num - 1;
4323 msg.u.i2c_read.port_number = port->port_num;
4324 for (i = 0; i < num - 1; i++) {
4325 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
4326 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
4327 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
4328 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
4330 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
4331 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
4333 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
4340 drm_dp_encode_sideband_req(&msg, txmsg);
4342 drm_dp_queue_down_tx(mgr, txmsg);
4344 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
4347 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
4351 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
4355 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
4360 drm_dp_mst_topology_put_mstb(mstb);
4364 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
4366 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
4367 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
4368 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
4369 I2C_FUNC_10BIT_ADDR;
4372 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
4373 .functionality = drm_dp_mst_i2c_functionality,
4374 .master_xfer = drm_dp_mst_i2c_xfer,
4378 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
4379 * @aux: DisplayPort AUX channel
4381 * Returns 0 on success or a negative error code on failure.
4383 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
4385 aux->ddc.algo = &drm_dp_mst_i2c_algo;
4386 aux->ddc.algo_data = aux;
4387 aux->ddc.retries = 3;
4389 aux->ddc.class = I2C_CLASS_DDC;
4390 aux->ddc.owner = THIS_MODULE;
4391 aux->ddc.dev.parent = aux->dev;
4392 aux->ddc.dev.of_node = aux->dev->of_node;
4394 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4395 sizeof(aux->ddc.name));
4397 return i2c_add_adapter(&aux->ddc);
4401 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
4402 * @aux: DisplayPort AUX channel
4404 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4406 i2c_del_adapter(&aux->ddc);