1 // SPDX-License-Identifier: GPL-2.0
3 * Cadence MHDP8546 DP bridge driver.
5 * Copyright (C) 2020 Cadence Design Systems, Inc.
14 * - Implement optimized mailbox communication using mailbox interrupts
15 * - Add support for power management
16 * - Add support for features like audio, MST and fast link training
17 * - Implement request_fw_cancel to handle HW_STATE
18 * - Fix asynchronous loading of firmware implementation
19 * - Add DRM helper function for cdns_mhdp_lower_link_rate
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/media-bus-format.h>
30 #include <linux/module.h>
32 #include <linux/of_device.h>
33 #include <linux/phy/phy.h>
34 #include <linux/phy/phy-dp.h>
35 #include <linux/platform_device.h>
36 #include <linux/slab.h>
37 #include <linux/wait.h>
39 #include <drm/display/drm_dp_helper.h>
40 #include <drm/display/drm_hdcp_helper.h>
41 #include <drm/drm_atomic.h>
42 #include <drm/drm_atomic_helper.h>
43 #include <drm/drm_atomic_state_helper.h>
44 #include <drm/drm_bridge.h>
45 #include <drm/drm_connector.h>
46 #include <drm/drm_edid.h>
47 #include <drm/drm_modeset_helper_vtables.h>
48 #include <drm/drm_print.h>
49 #include <drm/drm_probe_helper.h>
51 #include <asm/unaligned.h>
53 #include "cdns-mhdp8546-core.h"
54 #include "cdns-mhdp8546-hdcp.h"
55 #include "cdns-mhdp8546-j721e.h"
57 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
61 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
63 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
64 empty, !empty, MAILBOX_RETRY_US,
69 return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
72 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
76 WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
78 ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
79 full, !full, MAILBOX_RETRY_US,
84 writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
89 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
90 u8 module_id, u8 opcode,
97 /* read the header of the message */
98 for (i = 0; i < sizeof(header); i++) {
99 ret = cdns_mhdp_mailbox_read(mhdp);
106 mbox_size = get_unaligned_be16(header + 2);
108 if (opcode != header[0] || module_id != header[1] ||
109 req_size != mbox_size) {
111 * If the message in mailbox is not what we want, we need to
112 * clear the mailbox by reading its contents.
114 for (i = 0; i < mbox_size; i++)
115 if (cdns_mhdp_mailbox_read(mhdp) < 0)
124 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
125 u8 *buff, u16 buff_size)
130 for (i = 0; i < buff_size; i++) {
131 ret = cdns_mhdp_mailbox_read(mhdp);
141 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
142 u8 opcode, u16 size, u8 *message)
148 header[1] = module_id;
149 put_unaligned_be16(size, header + 2);
151 for (i = 0; i < sizeof(header); i++) {
152 ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
157 for (i = 0; i < size; i++) {
158 ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
167 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
172 put_unaligned_be32(addr, msg);
174 mutex_lock(&mhdp->mbox_mutex);
176 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
177 GENERAL_REGISTER_READ,
182 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
183 GENERAL_REGISTER_READ,
188 ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
192 /* Returned address value should be the same as requested */
193 if (memcmp(msg, resp, sizeof(msg))) {
198 *value = get_unaligned_be32(resp + 4);
201 mutex_unlock(&mhdp->mbox_mutex);
203 dev_err(mhdp->dev, "Failed to read register\n");
211 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
216 put_unaligned_be16(addr, msg);
217 put_unaligned_be32(val, msg + 2);
219 mutex_lock(&mhdp->mbox_mutex);
221 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
222 DPTX_WRITE_REGISTER, sizeof(msg), msg);
224 mutex_unlock(&mhdp->mbox_mutex);
230 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
231 u8 start_bit, u8 bits_no, u32 val)
236 put_unaligned_be16(addr, field);
237 field[2] = start_bit;
239 put_unaligned_be32(val, field + 4);
241 mutex_lock(&mhdp->mbox_mutex);
243 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
244 DPTX_WRITE_FIELD, sizeof(field), field);
246 mutex_unlock(&mhdp->mbox_mutex);
252 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
253 u32 addr, u8 *data, u16 len)
258 put_unaligned_be16(len, msg);
259 put_unaligned_be24(addr, msg + 2);
261 mutex_lock(&mhdp->mbox_mutex);
263 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
264 DPTX_READ_DPCD, sizeof(msg), msg);
268 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
274 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
278 ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
281 mutex_unlock(&mhdp->mbox_mutex);
287 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
292 put_unaligned_be16(1, msg);
293 put_unaligned_be24(addr, msg + 2);
296 mutex_lock(&mhdp->mbox_mutex);
298 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
299 DPTX_WRITE_DPCD, sizeof(msg), msg);
303 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
304 DPTX_WRITE_DPCD, sizeof(reg));
308 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
312 if (addr != get_unaligned_be24(reg + 2))
316 mutex_unlock(&mhdp->mbox_mutex);
319 dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
324 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
329 msg[0] = GENERAL_MAIN_CONTROL;
330 msg[1] = MB_MODULE_ID_GENERAL;
333 msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
335 mutex_lock(&mhdp->mbox_mutex);
337 for (i = 0; i < sizeof(msg); i++) {
338 ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
343 /* read the firmware state */
344 ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
351 mutex_unlock(&mhdp->mbox_mutex);
354 dev_err(mhdp->dev, "set firmware active failed\n");
359 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
364 mutex_lock(&mhdp->mbox_mutex);
366 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
367 DPTX_HPD_STATE, 0, NULL);
371 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
377 ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
381 mutex_unlock(&mhdp->mbox_mutex);
383 dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
389 mutex_unlock(&mhdp->mbox_mutex);
395 int cdns_mhdp_get_edid_block(void *data, u8 *edid,
396 unsigned int block, size_t length)
398 struct cdns_mhdp_device *mhdp = data;
399 u8 msg[2], reg[2], i;
402 mutex_lock(&mhdp->mbox_mutex);
404 for (i = 0; i < 4; i++) {
408 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
409 DPTX_GET_EDID, sizeof(msg), msg);
413 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
415 sizeof(reg) + length);
419 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
423 ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
427 if (reg[0] == length && reg[1] == block / 2)
431 mutex_unlock(&mhdp->mbox_mutex);
434 dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
441 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
446 mutex_lock(&mhdp->mbox_mutex);
448 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
449 DPTX_READ_EVENT, 0, NULL);
453 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
454 DPTX_READ_EVENT, sizeof(event));
458 ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
460 mutex_unlock(&mhdp->mbox_mutex);
465 dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
466 (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
467 (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
468 (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
469 (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
475 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
476 unsigned int udelay, const u8 *lanes_data,
477 u8 link_status[DP_LINK_STATUS_SIZE])
480 u8 hdr[5]; /* For DPCD read response header */
484 if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
485 dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
491 put_unaligned_be16(udelay, payload + 1);
492 memcpy(payload + 3, lanes_data, nlanes);
494 mutex_lock(&mhdp->mbox_mutex);
496 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
498 sizeof(payload), payload);
502 /* Yes, read the DPCD read command response */
503 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
505 sizeof(hdr) + DP_LINK_STATUS_SIZE);
509 ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
513 addr = get_unaligned_be24(hdr + 2);
514 if (addr != DP_LANE0_1_STATUS)
517 ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
518 DP_LINK_STATUS_SIZE);
521 mutex_unlock(&mhdp->mbox_mutex);
524 dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
530 * cdns_mhdp_link_power_up() - power up a DisplayPort link
531 * @aux: DisplayPort AUX channel
532 * @link: pointer to a structure containing the link configuration
534 * Returns 0 on success or a negative error code on failure.
537 int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
542 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
543 if (link->revision < 0x11)
546 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
550 value &= ~DP_SET_POWER_MASK;
551 value |= DP_SET_POWER_D0;
553 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
558 * According to the DP 1.1 specification, a "Sink Device must exit the
559 * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
560 * Control Field" (register 0x600).
562 usleep_range(1000, 2000);
568 * cdns_mhdp_link_power_down() - power down a DisplayPort link
569 * @aux: DisplayPort AUX channel
570 * @link: pointer to a structure containing the link configuration
572 * Returns 0 on success or a negative error code on failure.
575 int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
576 struct cdns_mhdp_link *link)
581 /* DP_SET_POWER register is only available on DPCD v1.1 and later */
582 if (link->revision < 0x11)
585 err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
589 value &= ~DP_SET_POWER_MASK;
590 value |= DP_SET_POWER_D3;
592 err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
600 * cdns_mhdp_link_configure() - configure a DisplayPort link
601 * @aux: DisplayPort AUX channel
602 * @link: pointer to a structure containing the link configuration
604 * Returns 0 on success or a negative error code on failure.
607 int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
608 struct cdns_mhdp_link *link)
613 values[0] = drm_dp_link_rate_to_bw_code(link->rate);
614 values[1] = link->num_lanes;
616 if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
617 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
619 err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
626 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
628 return min(mhdp->host.link_rate, mhdp->sink.link_rate);
631 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
633 return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
636 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
638 return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
641 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
643 /* Check if SSC is supported by both sides */
644 return mhdp->host.ssc && mhdp->sink.ssc;
647 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
649 dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
652 return connector_status_connected;
654 return connector_status_disconnected;
657 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
659 u32 major_num, minor_num, revision;
662 fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
663 | readl(mhdp->regs + CDNS_VER_L);
665 lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
666 | readl(mhdp->regs + CDNS_LIB_L_ADDR);
668 if (lib_ver < 33984) {
670 * Older FW versions with major number 1, used to store FW
671 * version information by storing repository revision number
672 * in registers. This is for identifying these FW versions.
676 if (fw_ver == 26098) {
678 } else if (lib_ver == 0 && fw_ver == 0) {
681 dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
686 /* To identify newer FW versions with major number 2 onwards. */
687 major_num = fw_ver / 10000;
688 minor_num = (fw_ver / 100) % 100;
689 revision = (fw_ver % 10000) % 100;
692 dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
697 static int cdns_mhdp_fw_activate(const struct firmware *fw,
698 struct cdns_mhdp_device *mhdp)
703 /* Release uCPU reset and stall it. */
704 writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
706 memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
708 /* Leave debug mode, release stall */
709 writel(0, mhdp->regs + CDNS_APB_CTRL);
712 * Wait for the KEEP_ALIVE "message" on the first 8 bits.
713 * Updated each sched "tick" (~2ms)
715 ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
716 reg & CDNS_KEEP_ALIVE_MASK, 500,
717 CDNS_KEEP_ALIVE_TIMEOUT);
720 "device didn't give any life sign: reg %d\n", reg);
724 ret = cdns_mhdp_check_fw_version(mhdp);
728 /* Init events to 0 as it's not cleared by FW at boot but on read */
729 readl(mhdp->regs + CDNS_SW_EVENT0);
730 readl(mhdp->regs + CDNS_SW_EVENT1);
731 readl(mhdp->regs + CDNS_SW_EVENT2);
732 readl(mhdp->regs + CDNS_SW_EVENT3);
735 ret = cdns_mhdp_set_firmware_active(mhdp, true);
739 spin_lock(&mhdp->start_lock);
741 mhdp->hw_state = MHDP_HW_READY;
744 * Here we must keep the lock while enabling the interrupts
745 * since it would otherwise be possible that interrupt enable
746 * code is executed after the bridge is detached. The similar
747 * situation is not possible in attach()/detach() callbacks
748 * since the hw_state changes from MHDP_HW_READY to
749 * MHDP_HW_STOPPED happens only due to driver removal when
750 * bridge should already be detached.
752 if (mhdp->bridge_attached)
753 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
754 mhdp->regs + CDNS_APB_INT_MASK);
756 spin_unlock(&mhdp->start_lock);
758 wake_up(&mhdp->fw_load_wq);
759 dev_dbg(mhdp->dev, "DP FW activated\n");
764 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
766 struct cdns_mhdp_device *mhdp = context;
767 bool bridge_attached;
770 dev_dbg(mhdp->dev, "firmware callback\n");
772 if (!fw || !fw->data) {
773 dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
777 ret = cdns_mhdp_fw_activate(fw, mhdp);
779 release_firmware(fw);
785 * XXX how to make sure the bridge is still attached when
786 * calling drm_kms_helper_hotplug_event() after releasing
787 * the lock? We should not hold the spin lock when
788 * calling drm_kms_helper_hotplug_event() since it may
789 * cause a dead lock. FB-dev console calls detect from the
790 * same thread just down the call stack started here.
792 spin_lock(&mhdp->start_lock);
793 bridge_attached = mhdp->bridge_attached;
794 spin_unlock(&mhdp->start_lock);
795 if (bridge_attached) {
796 if (mhdp->connector.dev)
797 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
799 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
803 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
807 ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
808 GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
810 dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
818 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
819 struct drm_dp_aux_msg *msg)
821 struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
824 if (msg->request != DP_AUX_NATIVE_WRITE &&
825 msg->request != DP_AUX_NATIVE_READ)
828 if (msg->request == DP_AUX_NATIVE_WRITE) {
829 const u8 *buf = msg->buffer;
832 for (i = 0; i < msg->size; ++i) {
833 ret = cdns_mhdp_dpcd_write(mhdp,
834 msg->address + i, buf[i]);
839 "Failed to write DPCD addr %u\n",
845 ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
846 msg->buffer, msg->size);
849 "Failed to read DPCD addr %u\n",
859 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
861 union phy_configure_opts phy_cfg;
865 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
866 DP_TRAINING_PATTERN_DISABLE);
868 /* Reset PHY configuration */
869 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
870 if (!mhdp->host.scrambler)
871 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
873 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
875 cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
876 mhdp->sink.enhanced & mhdp->host.enhanced);
878 cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
879 CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
881 cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
882 phy_cfg.dp.link_rate = mhdp->link.rate / 100;
883 phy_cfg.dp.lanes = mhdp->link.num_lanes;
885 memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
886 memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
888 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
889 phy_cfg.dp.set_lanes = true;
890 phy_cfg.dp.set_rate = true;
891 phy_cfg.dp.set_voltages = true;
892 ret = phy_configure(mhdp->phy, &phy_cfg);
894 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
899 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
900 CDNS_PHY_COMMON_CONFIG |
901 CDNS_PHY_TRAINING_EN |
902 CDNS_PHY_TRAINING_TYPE(1) |
903 CDNS_PHY_SCRAMBLER_BYPASS);
905 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
906 DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
911 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
912 u8 link_status[DP_LINK_STATUS_SIZE],
913 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
914 union phy_configure_opts *phy_cfg)
916 u8 adjust, max_pre_emph, max_volt_swing;
917 u8 set_volt, set_pre;
920 max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
921 << DP_TRAIN_PRE_EMPHASIS_SHIFT;
922 max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
924 for (i = 0; i < mhdp->link.num_lanes; i++) {
925 /* Check if Voltage swing and pre-emphasis are within limits */
926 adjust = drm_dp_get_adjust_request_voltage(link_status, i);
927 set_volt = min(adjust, max_volt_swing);
929 adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
930 set_pre = min(adjust, max_pre_emph)
931 >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
934 * Voltage swing level and pre-emphasis level combination is
935 * not allowed: leaving pre-emphasis as-is, and adjusting
938 if (set_volt + set_pre > 3)
939 set_volt = 3 - set_pre;
941 phy_cfg->dp.voltage[i] = set_volt;
942 lanes_data[i] = set_volt;
944 if (set_volt == max_volt_swing)
945 lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
947 phy_cfg->dp.pre[i] = set_pre;
948 lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
950 if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
951 lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
956 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
957 unsigned int lane, u8 volt)
959 unsigned int s = ((lane & 1) ?
960 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
961 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
962 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
964 link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
965 link_status[idx] |= volt << s;
969 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
970 unsigned int lane, u8 pre_emphasis)
972 unsigned int s = ((lane & 1) ?
973 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
974 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
975 unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
977 link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
978 link_status[idx] |= pre_emphasis << s;
981 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
982 u8 link_status[DP_LINK_STATUS_SIZE])
984 u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
985 u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
989 for (i = 0; i < mhdp->link.num_lanes; i++) {
990 volt = drm_dp_get_adjust_request_voltage(link_status, i);
991 pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
993 cdns_mhdp_set_adjust_request_voltage(link_status, i,
995 if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
996 cdns_mhdp_set_adjust_request_voltage(link_status, i,
998 if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
999 cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
1004 static void cdns_mhdp_print_lt_status(const char *prefix,
1005 struct cdns_mhdp_device *mhdp,
1006 union phy_configure_opts *phy_cfg)
1008 char vs[8] = "0/0/0/0";
1009 char pe[8] = "0/0/0/0";
1012 for (i = 0; i < mhdp->link.num_lanes; i++) {
1013 vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
1014 pe[i * 2] = '0' + phy_cfg->dp.pre[i];
1017 vs[i * 2 - 1] = '\0';
1018 pe[i * 2 - 1] = '\0';
1020 dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1022 mhdp->link.num_lanes, mhdp->link.rate / 100,
1026 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
1028 unsigned int training_interval)
1030 u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
1031 u8 link_status[DP_LINK_STATUS_SIZE];
1032 union phy_configure_opts phy_cfg;
1037 dev_dbg(mhdp->dev, "Starting EQ phase\n");
1039 /* Enable link training TPS[eq_tps] in PHY */
1040 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
1041 CDNS_PHY_TRAINING_TYPE(eq_tps);
1043 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1044 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1046 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1047 (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
1048 CDNS_DP_TRAINING_PATTERN_4);
1050 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1053 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1055 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1056 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1057 phy_cfg.dp.set_lanes = false;
1058 phy_cfg.dp.set_rate = false;
1059 phy_cfg.dp.set_voltages = true;
1060 ret = phy_configure(mhdp->phy, &phy_cfg);
1062 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1067 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1068 training_interval, lanes_data, link_status);
1070 r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1074 if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1075 cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1080 fail_counter_short++;
1082 cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1083 } while (fail_counter_short < 5);
1086 cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1091 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1092 u8 link_status[DP_LINK_STATUS_SIZE],
1093 u8 *req_volt, u8 *req_pre)
1095 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1096 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1099 for (i = 0; i < mhdp->link.num_lanes; i++) {
1102 val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1103 max_volt : req_volt[i];
1104 cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1106 val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1107 max_pre : req_pre[i];
1108 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1113 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1114 bool *same_before_adjust, bool *max_swing_reached,
1115 u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1116 u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1119 const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1120 const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1121 bool same_pre, same_volt;
1125 *same_before_adjust = false;
1126 *max_swing_reached = false;
1127 *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1129 for (i = 0; i < mhdp->link.num_lanes; i++) {
1130 adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1131 req_volt[i] = min(adjust, max_volt);
1133 adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1134 DP_TRAIN_PRE_EMPHASIS_SHIFT;
1135 req_pre[i] = min(adjust, max_pre);
1137 same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1138 req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1139 same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1141 if (same_pre && same_volt)
1142 *same_before_adjust = true;
1144 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1145 if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1146 *max_swing_reached = true;
1152 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1154 u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1155 fail_counter_short = 0, fail_counter_cr_long = 0;
1156 u8 link_status[DP_LINK_STATUS_SIZE];
1158 union phy_configure_opts phy_cfg;
1161 dev_dbg(mhdp->dev, "Starting CR phase\n");
1163 ret = cdns_mhdp_link_training_init(mhdp);
1167 drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1170 u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1171 u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1172 bool same_before_adjust, max_swing_reached;
1174 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1176 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1177 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1178 phy_cfg.dp.set_lanes = false;
1179 phy_cfg.dp.set_rate = false;
1180 phy_cfg.dp.set_voltages = true;
1181 ret = phy_configure(mhdp->phy, &phy_cfg);
1183 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1188 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1189 lanes_data, link_status);
1191 cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1192 &max_swing_reached, lanes_data,
1194 requested_adjust_volt_swing,
1195 requested_adjust_pre_emphasis);
1197 if (max_swing_reached) {
1198 dev_err(mhdp->dev, "CR: max swing reached\n");
1203 cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1208 /* Not all CR_DONE bits set */
1209 fail_counter_cr_long++;
1211 if (same_before_adjust) {
1212 fail_counter_short++;
1216 fail_counter_short = 0;
1218 * Voltage swing/pre-emphasis adjust requested
1221 cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1222 requested_adjust_volt_swing,
1223 requested_adjust_pre_emphasis);
1224 } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1227 cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1232 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1234 switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1235 case DP_LINK_BW_2_7:
1236 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1238 case DP_LINK_BW_5_4:
1239 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1241 case DP_LINK_BW_8_1:
1242 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1247 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1248 unsigned int training_interval)
1251 const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1255 if (!cdns_mhdp_link_training_cr(mhdp)) {
1256 if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1259 "Reducing link rate during CR phase\n");
1260 cdns_mhdp_lower_link_rate(&mhdp->link);
1263 } else if (mhdp->link.num_lanes > 1) {
1265 "Reducing lanes number during CR phase\n");
1266 mhdp->link.num_lanes >>= 1;
1267 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1273 "Link training failed during CR phase\n");
1277 if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1281 if (mhdp->link.num_lanes > 1) {
1283 "Reducing lanes number during EQ phase\n");
1284 mhdp->link.num_lanes >>= 1;
1287 } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1290 "Reducing link rate during EQ phase\n");
1291 cdns_mhdp_lower_link_rate(&mhdp->link);
1292 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1297 dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1301 dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1302 mhdp->link.num_lanes, mhdp->link.rate / 100);
1304 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1305 mhdp->host.scrambler ? 0 :
1306 DP_LINK_SCRAMBLING_DISABLE);
1308 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, ®32);
1311 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1315 reg32 &= ~GENMASK(1, 0);
1316 reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1317 reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1318 reg32 |= CDNS_DP_FRAMER_EN;
1319 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1321 /* Reset PHY config */
1322 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1323 if (!mhdp->host.scrambler)
1324 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1325 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1329 /* Reset PHY config */
1330 reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1331 if (!mhdp->host.scrambler)
1332 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1333 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1335 drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1336 DP_TRAINING_PATTERN_DISABLE);
1341 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1347 return 4000 << (interval - 1);
1349 "wrong training interval returned by DPCD: %d\n", interval);
1353 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1355 unsigned int link_rate;
1357 /* Get source capabilities based on PHY attributes */
1359 mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1360 if (!mhdp->host.lanes_cnt)
1361 mhdp->host.lanes_cnt = 4;
1363 link_rate = mhdp->phy->attrs.max_link_rate;
1365 link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1367 /* PHY uses Mb/s, DRM uses tens of kb/s. */
1370 mhdp->host.link_rate = link_rate;
1371 mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1372 mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1373 mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1374 CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1375 CDNS_SUPPORT_TPS(4);
1376 mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1377 mhdp->host.fast_link = false;
1378 mhdp->host.enhanced = true;
1379 mhdp->host.scrambler = true;
1380 mhdp->host.ssc = false;
1383 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1384 u8 dpcd[DP_RECEIVER_CAP_SIZE])
1386 mhdp->sink.link_rate = mhdp->link.rate;
1387 mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1388 mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1389 DP_LINK_CAP_ENHANCED_FRAMING);
1391 /* Set SSC support */
1392 mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1393 DP_MAX_DOWNSPREAD_0_5);
1395 /* Set TPS support */
1396 mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1397 if (drm_dp_tps3_supported(dpcd))
1398 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1399 if (drm_dp_tps4_supported(dpcd))
1400 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1402 /* Set fast link support */
1403 mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1404 DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1407 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1409 u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1410 u32 resp, interval, interval_us;
1415 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1417 drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1420 if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1421 addr = DP_DP13_DPCD_REV;
1425 err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1427 dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1431 mhdp->link.revision = dpcd[0];
1432 mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1433 mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1435 if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1436 mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1438 dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1439 cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
1441 cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1443 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1444 mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1446 /* Disable framer for link training */
1447 err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1450 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1455 resp &= ~CDNS_DP_FRAMER_EN;
1456 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1458 /* Spread AMP if required, enable 8b/10b coding */
1459 amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1460 amp[1] = DP_SET_ANSI_8B10B;
1461 drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1463 if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1464 dev_err(mhdp->dev, "fastlink not supported\n");
1468 interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1469 interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1471 cdns_mhdp_link_training(mhdp, interval_us)) {
1472 dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1476 mhdp->link_up = true;
1481 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1483 WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1486 cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
1488 mhdp->link_up = false;
1491 static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
1492 struct drm_connector *connector)
1497 return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
1500 static int cdns_mhdp_get_modes(struct drm_connector *connector)
1502 struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1509 edid = cdns_mhdp_get_edid(mhdp, connector);
1511 dev_err(mhdp->dev, "Failed to read EDID\n");
1515 drm_connector_update_edid_property(connector, edid);
1516 num_modes = drm_add_edid_modes(connector, edid);
1520 * HACK: Warn about unsupported display formats until we deal
1521 * with them correctly.
1523 if (connector->display_info.color_formats &&
1524 !(connector->display_info.color_formats &
1525 mhdp->display_fmt.color_format))
1527 "%s: No supported color_format found (0x%08x)\n",
1528 __func__, connector->display_info.color_formats);
1530 if (connector->display_info.bpc &&
1531 connector->display_info.bpc < mhdp->display_fmt.bpc)
1532 dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1533 __func__, connector->display_info.bpc,
1534 mhdp->display_fmt.bpc);
1539 static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1540 struct drm_modeset_acquire_ctx *ctx,
1543 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1545 return cdns_mhdp_detect(mhdp);
1548 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1555 switch (fmt->color_format) {
1556 case DRM_COLOR_FORMAT_RGB444:
1557 case DRM_COLOR_FORMAT_YCBCR444:
1560 case DRM_COLOR_FORMAT_YCBCR422:
1563 case DRM_COLOR_FORMAT_YCBCR420:
1564 bpp = fmt->bpc * 3 / 2;
1574 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1575 const struct drm_display_mode *mode,
1576 unsigned int lanes, unsigned int rate)
1578 u32 max_bw, req_bw, bpp;
1581 * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1582 * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1583 * value thus equals the bandwidth in 10kb/s units, which matches the
1584 * units of the rate parameter.
1587 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1588 req_bw = mode->clock * bpp / 8;
1589 max_bw = lanes * rate;
1590 if (req_bw > max_bw) {
1592 "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1593 mode->name, req_bw, max_bw);
1602 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1603 struct drm_display_mode *mode)
1605 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1607 mutex_lock(&mhdp->link_mutex);
1609 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1611 mutex_unlock(&mhdp->link_mutex);
1612 return MODE_CLOCK_HIGH;
1615 mutex_unlock(&mhdp->link_mutex);
1619 static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
1620 struct drm_atomic_state *state)
1622 struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1623 struct drm_connector_state *old_state, *new_state;
1624 struct drm_crtc_state *crtc_state;
1627 if (!mhdp->hdcp_supported)
1630 old_state = drm_atomic_get_old_connector_state(state, conn);
1631 new_state = drm_atomic_get_new_connector_state(state, conn);
1632 old_cp = old_state->content_protection;
1633 new_cp = new_state->content_protection;
1635 if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
1636 new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1637 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1641 if (!new_state->crtc) {
1642 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1643 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1647 if (old_cp == new_cp ||
1648 (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1649 new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1653 crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
1654 crtc_state->mode_changed = true;
1659 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1660 .detect_ctx = cdns_mhdp_connector_detect,
1661 .get_modes = cdns_mhdp_get_modes,
1662 .mode_valid = cdns_mhdp_mode_valid,
1663 .atomic_check = cdns_mhdp_connector_atomic_check,
1666 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1667 .fill_modes = drm_helper_probe_single_connector_modes,
1668 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1669 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1670 .reset = drm_atomic_helper_connector_reset,
1671 .destroy = drm_connector_cleanup,
1674 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1676 u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1677 struct drm_connector *conn = &mhdp->connector;
1678 struct drm_bridge *bridge = &mhdp->bridge;
1681 if (!bridge->encoder) {
1682 dev_err(mhdp->dev, "Parent encoder object not found");
1686 conn->polled = DRM_CONNECTOR_POLL_HPD;
1688 ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1689 DRM_MODE_CONNECTOR_DisplayPort);
1691 dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1695 drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1697 ret = drm_display_info_set_bus_formats(&conn->display_info,
1702 ret = drm_connector_attach_encoder(conn, bridge->encoder);
1704 dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1708 if (mhdp->hdcp_supported)
1709 ret = drm_connector_attach_content_protection_property(conn, true);
1714 static int cdns_mhdp_attach(struct drm_bridge *bridge,
1715 enum drm_bridge_attach_flags flags)
1717 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1721 dev_dbg(mhdp->dev, "%s\n", __func__);
1723 mhdp->aux.drm_dev = bridge->dev;
1724 ret = drm_dp_aux_register(&mhdp->aux);
1728 if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1729 ret = cdns_mhdp_connector_init(mhdp);
1731 goto aux_unregister;
1734 spin_lock(&mhdp->start_lock);
1736 mhdp->bridge_attached = true;
1737 hw_ready = mhdp->hw_state == MHDP_HW_READY;
1739 spin_unlock(&mhdp->start_lock);
1741 /* Enable SW event interrupts */
1743 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
1744 mhdp->regs + CDNS_APB_INT_MASK);
1748 drm_dp_aux_unregister(&mhdp->aux);
1752 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1753 const struct drm_display_mode *mode)
1755 unsigned int dp_framer_sp = 0, msa_horizontal_1,
1756 msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1757 misc0 = 0, misc1 = 0, pxl_repr,
1758 front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1760 u8 stream_id = mhdp->stream_id;
1761 u32 bpp, bpc, pxlfmt, framer;
1764 pxlfmt = mhdp->display_fmt.color_format;
1765 bpc = mhdp->display_fmt.bpc;
1768 * If YCBCR supported and stream not SD, use ITU709
1769 * Need to handle ITU version with YCBCR420 when supported
1771 if ((pxlfmt == DRM_COLOR_FORMAT_YCBCR444 ||
1772 pxlfmt == DRM_COLOR_FORMAT_YCBCR422) && mode->crtc_vdisplay >= 720)
1773 misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1775 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1778 case DRM_COLOR_FORMAT_RGB444:
1779 pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1780 misc0 |= DP_COLOR_FORMAT_RGB;
1782 case DRM_COLOR_FORMAT_YCBCR444:
1783 pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1784 misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1786 case DRM_COLOR_FORMAT_YCBCR422:
1787 pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1788 misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1790 case DRM_COLOR_FORMAT_YCBCR420:
1791 pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1794 pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1799 misc0 |= DP_TEST_BIT_DEPTH_6;
1800 pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1803 misc0 |= DP_TEST_BIT_DEPTH_8;
1804 pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1807 misc0 |= DP_TEST_BIT_DEPTH_10;
1808 pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1811 misc0 |= DP_TEST_BIT_DEPTH_12;
1812 pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1815 misc0 |= DP_TEST_BIT_DEPTH_16;
1816 pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1820 bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1821 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1822 bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1824 cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1827 hsync2vsync_pol_ctrl = 0;
1828 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1829 hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1830 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1831 hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1832 cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1833 hsync2vsync_pol_ctrl);
1835 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1837 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1838 dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1839 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1840 dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1841 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1842 dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1843 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1845 front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1846 back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1847 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1848 CDNS_DP_FRONT_PORCH(front_porch) |
1849 CDNS_DP_BACK_PORCH(back_porch));
1851 cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1852 mode->crtc_hdisplay * bpp / 8);
1854 msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1855 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1856 CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1857 CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1859 hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1860 msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1861 CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1862 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1863 msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1864 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1867 msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1868 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1869 CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1870 CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1872 vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1873 msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1874 CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1875 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1876 msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1877 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1880 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1881 mode->crtc_vtotal % 2 == 0)
1882 misc1 = DP_TEST_INTERLACED;
1883 if (mhdp->display_fmt.y_only)
1884 misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1885 /* Use VSC SDP for Y420 */
1886 if (pxlfmt == DRM_COLOR_FORMAT_YCBCR420)
1887 misc1 = CDNS_DP_TEST_VSC_SDP;
1889 cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1890 misc0 | (misc1 << 8));
1892 cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1893 CDNS_DP_H_HSYNC_WIDTH(hsync) |
1894 CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1896 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1897 CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1898 CDNS_DP_V0_VSTART(msa_v0));
1900 dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1901 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1902 mode->crtc_vtotal % 2 == 0)
1903 dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1905 cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1907 cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1908 (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1909 CDNS_DP_VB_ID_INTERLACED : 0);
1911 ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1914 "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1918 framer |= CDNS_DP_FRAMER_EN;
1919 framer &= ~CDNS_DP_NO_VIDEO_MODE;
1920 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1923 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1924 const struct drm_display_mode *mode)
1926 u32 rate, vs, required_bandwidth, available_bandwidth;
1927 s32 line_thresh1, line_thresh2, line_thresh = 0;
1928 int pxlclock = mode->crtc_clock;
1932 /* Get rate in MSymbols per second per lane */
1933 rate = mhdp->link.rate / 1000;
1935 bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1937 required_bandwidth = pxlclock * bpp / 8;
1938 available_bandwidth = mhdp->link.num_lanes * rate;
1940 vs = tu_size * required_bandwidth / available_bandwidth;
1946 line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1947 line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1948 line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1949 line_thresh = (line_thresh >> 5) + 2;
1951 mhdp->stream_id = 0;
1953 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1954 CDNS_DP_FRAMER_TU_VS(vs) |
1955 CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1956 CDNS_DP_FRAMER_TU_CNT_RST_EN);
1958 cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1959 line_thresh & GENMASK(5, 0));
1961 cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1962 CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1965 cdns_mhdp_configure_video(mhdp, mode);
1968 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1969 struct drm_bridge_state *bridge_state)
1971 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1972 struct drm_atomic_state *state = bridge_state->base.state;
1973 struct cdns_mhdp_bridge_state *mhdp_state;
1974 struct drm_crtc_state *crtc_state;
1975 struct drm_connector *connector;
1976 struct drm_connector_state *conn_state;
1977 struct drm_bridge_state *new_state;
1978 const struct drm_display_mode *mode;
1982 dev_dbg(mhdp->dev, "bridge enable\n");
1984 mutex_lock(&mhdp->link_mutex);
1986 if (mhdp->plugged && !mhdp->link_up) {
1987 ret = cdns_mhdp_link_up(mhdp);
1992 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
1993 mhdp->info->ops->enable(mhdp);
1995 /* Enable VIF clock for stream 0 */
1996 ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
1998 dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
2002 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2003 resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
2005 connector = drm_atomic_get_new_connector_for_encoder(state,
2007 if (WARN_ON(!connector))
2010 conn_state = drm_atomic_get_new_connector_state(state, connector);
2011 if (WARN_ON(!conn_state))
2014 if (mhdp->hdcp_supported &&
2015 mhdp->hw_state == MHDP_HW_READY &&
2016 conn_state->content_protection ==
2017 DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2018 mutex_unlock(&mhdp->link_mutex);
2019 cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
2020 mutex_lock(&mhdp->link_mutex);
2023 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
2024 if (WARN_ON(!crtc_state))
2027 mode = &crtc_state->adjusted_mode;
2029 new_state = drm_atomic_get_new_bridge_state(state, bridge);
2030 if (WARN_ON(!new_state))
2033 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2039 cdns_mhdp_sst_enable(mhdp, mode);
2041 mhdp_state = to_cdns_mhdp_bridge_state(new_state);
2043 mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
2044 drm_mode_set_name(mhdp_state->current_mode);
2046 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
2048 mhdp->bridge_enabled = true;
2051 mutex_unlock(&mhdp->link_mutex);
2053 schedule_work(&mhdp->modeset_retry_work);
2056 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
2057 struct drm_bridge_state *bridge_state)
2059 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2062 dev_dbg(mhdp->dev, "%s\n", __func__);
2064 mutex_lock(&mhdp->link_mutex);
2066 if (mhdp->hdcp_supported)
2067 cdns_mhdp_hdcp_disable(mhdp);
2069 mhdp->bridge_enabled = false;
2070 cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2071 resp &= ~CDNS_DP_FRAMER_EN;
2072 resp |= CDNS_DP_NO_VIDEO_MODE;
2073 cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2075 cdns_mhdp_link_down(mhdp);
2077 /* Disable VIF clock for stream 0 */
2078 cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2079 cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2080 resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2082 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2083 mhdp->info->ops->disable(mhdp);
2085 mutex_unlock(&mhdp->link_mutex);
2088 static void cdns_mhdp_detach(struct drm_bridge *bridge)
2090 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2092 dev_dbg(mhdp->dev, "%s\n", __func__);
2094 drm_dp_aux_unregister(&mhdp->aux);
2096 spin_lock(&mhdp->start_lock);
2098 mhdp->bridge_attached = false;
2100 spin_unlock(&mhdp->start_lock);
2102 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2105 static struct drm_bridge_state *
2106 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2108 struct cdns_mhdp_bridge_state *state;
2110 state = kzalloc(sizeof(*state), GFP_KERNEL);
2114 __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2116 return &state->base;
2120 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2121 struct drm_bridge_state *state)
2123 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2125 cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2127 if (cdns_mhdp_state->current_mode) {
2128 drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2129 cdns_mhdp_state->current_mode = NULL;
2132 kfree(cdns_mhdp_state);
2135 static struct drm_bridge_state *
2136 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2138 struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2140 cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2141 if (!cdns_mhdp_state)
2144 __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2146 return &cdns_mhdp_state->base;
2149 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2150 struct drm_bridge_state *bridge_state,
2151 struct drm_crtc_state *crtc_state,
2152 struct drm_connector_state *conn_state)
2154 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2155 const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2157 mutex_lock(&mhdp->link_mutex);
2159 if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2161 dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2162 __func__, mode->name, mhdp->link.num_lanes,
2163 mhdp->link.rate / 100);
2164 mutex_unlock(&mhdp->link_mutex);
2168 mutex_unlock(&mhdp->link_mutex);
2172 static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
2174 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2176 return cdns_mhdp_detect(mhdp);
2179 static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
2180 struct drm_connector *connector)
2182 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2184 return cdns_mhdp_get_edid(mhdp, connector);
2187 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
2189 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2191 /* Enable SW event interrupts */
2192 if (mhdp->bridge_attached)
2193 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
2194 mhdp->regs + CDNS_APB_INT_MASK);
2197 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
2199 struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2201 writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
2204 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2205 .atomic_enable = cdns_mhdp_atomic_enable,
2206 .atomic_disable = cdns_mhdp_atomic_disable,
2207 .atomic_check = cdns_mhdp_atomic_check,
2208 .attach = cdns_mhdp_attach,
2209 .detach = cdns_mhdp_detach,
2210 .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2211 .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2212 .atomic_reset = cdns_mhdp_bridge_atomic_reset,
2213 .detect = cdns_mhdp_bridge_detect,
2214 .get_edid = cdns_mhdp_bridge_get_edid,
2215 .hpd_enable = cdns_mhdp_bridge_hpd_enable,
2216 .hpd_disable = cdns_mhdp_bridge_hpd_disable,
2219 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2221 int hpd_event, hpd_status;
2225 hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2227 /* Getting event bits failed, bail out */
2228 if (hpd_event < 0) {
2229 dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2230 __func__, hpd_event);
2234 hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2235 if (hpd_status < 0) {
2236 dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2237 __func__, hpd_status);
2241 if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2244 return !!hpd_status;
2247 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2249 struct cdns_mhdp_bridge_state *cdns_bridge_state;
2250 struct drm_display_mode *current_mode;
2251 bool old_plugged = mhdp->plugged;
2252 struct drm_bridge_state *state;
2253 u8 status[DP_LINK_STATUS_SIZE];
2257 mutex_lock(&mhdp->link_mutex);
2259 mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2261 if (!mhdp->plugged) {
2262 cdns_mhdp_link_down(mhdp);
2263 mhdp->link.rate = mhdp->host.link_rate;
2264 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2269 * If we get a HPD pulse event and we were and still are connected,
2270 * check the link status. If link status is ok, there's nothing to do
2271 * as we don't handle DP interrupts. If link status is bad, continue
2272 * with full link setup.
2274 if (hpd_pulse && old_plugged == mhdp->plugged) {
2275 ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2278 * If everything looks fine, just return, as we don't handle
2282 drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2283 drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2286 /* If link is bad, mark link as down so that we do a new LT */
2287 mhdp->link_up = false;
2290 if (!mhdp->link_up) {
2291 ret = cdns_mhdp_link_up(mhdp);
2296 if (mhdp->bridge_enabled) {
2297 state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2303 cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2304 if (!cdns_bridge_state) {
2309 current_mode = cdns_bridge_state->current_mode;
2310 if (!current_mode) {
2315 if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2321 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2322 current_mode->name);
2324 cdns_mhdp_sst_enable(mhdp, current_mode);
2327 mutex_unlock(&mhdp->link_mutex);
2331 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2333 struct cdns_mhdp_device *mhdp;
2334 struct drm_connector *conn;
2336 mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2338 conn = &mhdp->connector;
2340 /* Grab the locks before changing connector property */
2341 mutex_lock(&conn->dev->mode_config.mutex);
2344 * Set connector link status to BAD and send a Uevent to notify
2345 * userspace to do a modeset.
2347 drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2348 mutex_unlock(&conn->dev->mode_config.mutex);
2350 /* Send Hotplug uevent so userspace can reprobe */
2351 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2354 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2356 struct cdns_mhdp_device *mhdp = data;
2357 u32 apb_stat, sw_ev0;
2358 bool bridge_attached;
2360 apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2361 if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2364 sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2367 * Calling drm_kms_helper_hotplug_event() when not attached
2368 * to drm device causes an oops because the drm_bridge->dev
2369 * is NULL. See cdns_mhdp_fw_cb() comments for details about the
2370 * problems related drm_kms_helper_hotplug_event() call.
2372 spin_lock(&mhdp->start_lock);
2373 bridge_attached = mhdp->bridge_attached;
2374 spin_unlock(&mhdp->start_lock);
2376 if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2377 schedule_work(&mhdp->hpd_work);
2380 if (sw_ev0 & ~CDNS_DPTX_HPD) {
2381 mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
2382 wake_up(&mhdp->sw_events_wq);
2388 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
2392 ret = wait_event_timeout(mhdp->sw_events_wq,
2393 mhdp->sw_events & event,
2394 msecs_to_jiffies(500));
2396 dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
2400 ret = mhdp->sw_events;
2401 mhdp->sw_events &= ~event;
2407 static void cdns_mhdp_hpd_work(struct work_struct *work)
2409 struct cdns_mhdp_device *mhdp = container_of(work,
2410 struct cdns_mhdp_device,
2414 ret = cdns_mhdp_update_link_status(mhdp);
2415 if (mhdp->connector.dev) {
2417 schedule_work(&mhdp->modeset_retry_work);
2419 drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2421 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2425 static int cdns_mhdp_probe(struct platform_device *pdev)
2427 struct device *dev = &pdev->dev;
2428 struct cdns_mhdp_device *mhdp;
2434 mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
2438 clk = devm_clk_get(dev, NULL);
2440 dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
2441 return PTR_ERR(clk);
2446 mutex_init(&mhdp->mbox_mutex);
2447 mutex_init(&mhdp->link_mutex);
2448 spin_lock_init(&mhdp->start_lock);
2450 drm_dp_aux_init(&mhdp->aux);
2451 mhdp->aux.dev = dev;
2452 mhdp->aux.transfer = cdns_mhdp_transfer;
2454 mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2455 if (IS_ERR(mhdp->regs)) {
2456 dev_err(dev, "Failed to get memory resource\n");
2457 return PTR_ERR(mhdp->regs);
2460 mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
2461 if (IS_ERR(mhdp->sapb_regs)) {
2462 mhdp->hdcp_supported = false;
2464 "Failed to get SAPB memory resource, HDCP not supported\n");
2466 mhdp->hdcp_supported = true;
2469 mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2470 if (IS_ERR(mhdp->phy)) {
2471 dev_err(dev, "no PHY configured\n");
2472 return PTR_ERR(mhdp->phy);
2475 platform_set_drvdata(pdev, mhdp);
2477 mhdp->info = of_device_get_match_data(dev);
2479 clk_prepare_enable(clk);
2481 pm_runtime_enable(dev);
2482 ret = pm_runtime_resume_and_get(dev);
2484 dev_err(dev, "pm_runtime_resume_and_get failed\n");
2485 pm_runtime_disable(dev);
2489 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2490 ret = mhdp->info->ops->init(mhdp);
2492 dev_err(dev, "MHDP platform initialization failed: %d\n",
2498 rate = clk_get_rate(clk);
2499 writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2500 writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2502 dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2504 writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2506 irq = platform_get_irq(pdev, 0);
2507 ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2508 cdns_mhdp_irq_handler, IRQF_ONESHOT,
2511 dev_err(dev, "cannot install IRQ %d\n", irq);
2516 cdns_mhdp_fill_host_caps(mhdp);
2518 /* Initialize link rate and num of lanes to host values */
2519 mhdp->link.rate = mhdp->host.link_rate;
2520 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2522 /* The only currently supported format */
2523 mhdp->display_fmt.y_only = false;
2524 mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2525 mhdp->display_fmt.bpc = 8;
2527 mhdp->bridge.of_node = pdev->dev.of_node;
2528 mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
2529 mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2531 mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2533 mhdp->bridge.timings = mhdp->info->timings;
2535 ret = phy_init(mhdp->phy);
2537 dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2541 /* Initialize the work for modeset in case of link train failure */
2542 INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2543 INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
2545 init_waitqueue_head(&mhdp->fw_load_wq);
2546 init_waitqueue_head(&mhdp->sw_events_wq);
2548 ret = cdns_mhdp_load_firmware(mhdp);
2552 if (mhdp->hdcp_supported)
2553 cdns_mhdp_hdcp_init(mhdp);
2555 drm_bridge_add(&mhdp->bridge);
2560 phy_exit(mhdp->phy);
2562 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2563 mhdp->info->ops->exit(mhdp);
2565 pm_runtime_put_sync(dev);
2566 pm_runtime_disable(dev);
2568 clk_disable_unprepare(mhdp->clk);
2573 static int cdns_mhdp_remove(struct platform_device *pdev)
2575 struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
2576 unsigned long timeout = msecs_to_jiffies(100);
2577 bool stop_fw = false;
2580 drm_bridge_remove(&mhdp->bridge);
2582 ret = wait_event_timeout(mhdp->fw_load_wq,
2583 mhdp->hw_state == MHDP_HW_READY,
2586 dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2591 spin_lock(&mhdp->start_lock);
2592 mhdp->hw_state = MHDP_HW_STOPPED;
2593 spin_unlock(&mhdp->start_lock);
2596 ret = cdns_mhdp_set_firmware_active(mhdp, false);
2598 phy_exit(mhdp->phy);
2600 if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2601 mhdp->info->ops->exit(mhdp);
2603 pm_runtime_put_sync(&pdev->dev);
2604 pm_runtime_disable(&pdev->dev);
2606 cancel_work_sync(&mhdp->modeset_retry_work);
2607 flush_work(&mhdp->hpd_work);
2608 /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
2610 clk_disable_unprepare(mhdp->clk);
2615 static const struct of_device_id mhdp_ids[] = {
2616 { .compatible = "cdns,mhdp8546", },
2617 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2618 { .compatible = "ti,j721e-mhdp8546",
2619 .data = &(const struct cdns_mhdp_platform_info) {
2620 .timings = &mhdp_ti_j721e_bridge_timings,
2621 .ops = &mhdp_ti_j721e_ops,
2627 MODULE_DEVICE_TABLE(of, mhdp_ids);
2629 static struct platform_driver mhdp_driver = {
2631 .name = "cdns-mhdp8546",
2632 .of_match_table = of_match_ptr(mhdp_ids),
2634 .probe = cdns_mhdp_probe,
2635 .remove = cdns_mhdp_remove,
2637 module_platform_driver(mhdp_driver);
2639 MODULE_FIRMWARE(FW_NAME);
2646 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2647 MODULE_LICENSE("GPL");
2648 MODULE_ALIAS("platform:cdns-mhdp8546");