]> Git Repo - J-linux.git/blob - drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
Merge patch series "RISC-V: Test th.sxstatus.MAEE bit before enabling MAEE"
[J-linux.git] / drivers / gpu / drm / bridge / cadence / cdns-mhdp8546-core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence MHDP8546 DP bridge driver.
4  *
5  * Copyright (C) 2020 Cadence Design Systems, Inc.
6  *
7  * Authors: Quentin Schulz <[email protected]>
8  *          Swapnil Jakhade <[email protected]>
9  *          Yuti Amonkar <[email protected]>
10  *          Tomi Valkeinen <[email protected]>
11  *          Jyri Sarha <[email protected]>
12  *
13  * TODO:
14  *     - Implement optimized mailbox communication using mailbox interrupts
15  *     - Add support for power management
16  *     - Add support for features like audio, MST and fast link training
17  *     - Implement request_fw_cancel to handle HW_STATE
18  *     - Fix asynchronous loading of firmware implementation
19  *     - Add DRM helper function for cdns_mhdp_lower_link_rate
20  */
21
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/media-bus-format.h>
30 #include <linux/module.h>
31 #include <linux/of.h>
32 #include <linux/phy/phy.h>
33 #include <linux/phy/phy-dp.h>
34 #include <linux/platform_device.h>
35 #include <linux/slab.h>
36 #include <linux/wait.h>
37
38 #include <drm/display/drm_dp_helper.h>
39 #include <drm/display/drm_hdcp_helper.h>
40 #include <drm/drm_atomic.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_atomic_state_helper.h>
43 #include <drm/drm_bridge.h>
44 #include <drm/drm_connector.h>
45 #include <drm/drm_edid.h>
46 #include <drm/drm_modeset_helper_vtables.h>
47 #include <drm/drm_print.h>
48 #include <drm/drm_probe_helper.h>
49
50 #include <asm/unaligned.h>
51
52 #include "cdns-mhdp8546-core.h"
53 #include "cdns-mhdp8546-hdcp.h"
54 #include "cdns-mhdp8546-j721e.h"
55
56 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
57 {
58         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
59
60         /* Enable SW event interrupts */
61         if (mhdp->bridge_attached)
62                 writel(readl(mhdp->regs + CDNS_APB_INT_MASK) &
63                        ~CDNS_APB_INT_MASK_SW_EVENT_INT,
64                        mhdp->regs + CDNS_APB_INT_MASK);
65 }
66
67 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
68 {
69         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
70
71         writel(readl(mhdp->regs + CDNS_APB_INT_MASK) |
72                CDNS_APB_INT_MASK_SW_EVENT_INT,
73                mhdp->regs + CDNS_APB_INT_MASK);
74 }
75
76 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
77 {
78         int ret, empty;
79
80         WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
81
82         ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
83                                  empty, !empty, MAILBOX_RETRY_US,
84                                  MAILBOX_TIMEOUT_US);
85         if (ret < 0)
86                 return ret;
87
88         return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
89 }
90
91 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
92 {
93         int ret, full;
94
95         WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
96
97         ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
98                                  full, !full, MAILBOX_RETRY_US,
99                                  MAILBOX_TIMEOUT_US);
100         if (ret < 0)
101                 return ret;
102
103         writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
104
105         return 0;
106 }
107
108 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
109                                          u8 module_id, u8 opcode,
110                                          u16 req_size)
111 {
112         u32 mbox_size, i;
113         u8 header[4];
114         int ret;
115
116         /* read the header of the message */
117         for (i = 0; i < sizeof(header); i++) {
118                 ret = cdns_mhdp_mailbox_read(mhdp);
119                 if (ret < 0)
120                         return ret;
121
122                 header[i] = ret;
123         }
124
125         mbox_size = get_unaligned_be16(header + 2);
126
127         if (opcode != header[0] || module_id != header[1] ||
128             req_size != mbox_size) {
129                 /*
130                  * If the message in mailbox is not what we want, we need to
131                  * clear the mailbox by reading its contents.
132                  */
133                 for (i = 0; i < mbox_size; i++)
134                         if (cdns_mhdp_mailbox_read(mhdp) < 0)
135                                 break;
136
137                 return -EINVAL;
138         }
139
140         return 0;
141 }
142
143 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
144                                        u8 *buff, u16 buff_size)
145 {
146         u32 i;
147         int ret;
148
149         for (i = 0; i < buff_size; i++) {
150                 ret = cdns_mhdp_mailbox_read(mhdp);
151                 if (ret < 0)
152                         return ret;
153
154                 buff[i] = ret;
155         }
156
157         return 0;
158 }
159
160 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
161                                   u8 opcode, u16 size, u8 *message)
162 {
163         u8 header[4];
164         int ret, i;
165
166         header[0] = opcode;
167         header[1] = module_id;
168         put_unaligned_be16(size, header + 2);
169
170         for (i = 0; i < sizeof(header); i++) {
171                 ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
172                 if (ret)
173                         return ret;
174         }
175
176         for (i = 0; i < size; i++) {
177                 ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
178                 if (ret)
179                         return ret;
180         }
181
182         return 0;
183 }
184
185 static
186 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
187 {
188         u8 msg[4], resp[8];
189         int ret;
190
191         put_unaligned_be32(addr, msg);
192
193         mutex_lock(&mhdp->mbox_mutex);
194
195         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
196                                      GENERAL_REGISTER_READ,
197                                      sizeof(msg), msg);
198         if (ret)
199                 goto out;
200
201         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
202                                             GENERAL_REGISTER_READ,
203                                             sizeof(resp));
204         if (ret)
205                 goto out;
206
207         ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
208         if (ret)
209                 goto out;
210
211         /* Returned address value should be the same as requested */
212         if (memcmp(msg, resp, sizeof(msg))) {
213                 ret = -EINVAL;
214                 goto out;
215         }
216
217         *value = get_unaligned_be32(resp + 4);
218
219 out:
220         mutex_unlock(&mhdp->mbox_mutex);
221         if (ret) {
222                 dev_err(mhdp->dev, "Failed to read register\n");
223                 *value = 0;
224         }
225
226         return ret;
227 }
228
229 static
230 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
231 {
232         u8 msg[6];
233         int ret;
234
235         put_unaligned_be16(addr, msg);
236         put_unaligned_be32(val, msg + 2);
237
238         mutex_lock(&mhdp->mbox_mutex);
239
240         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
241                                      DPTX_WRITE_REGISTER, sizeof(msg), msg);
242
243         mutex_unlock(&mhdp->mbox_mutex);
244
245         return ret;
246 }
247
248 static
249 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
250                             u8 start_bit, u8 bits_no, u32 val)
251 {
252         u8 field[8];
253         int ret;
254
255         put_unaligned_be16(addr, field);
256         field[2] = start_bit;
257         field[3] = bits_no;
258         put_unaligned_be32(val, field + 4);
259
260         mutex_lock(&mhdp->mbox_mutex);
261
262         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
263                                      DPTX_WRITE_FIELD, sizeof(field), field);
264
265         mutex_unlock(&mhdp->mbox_mutex);
266
267         return ret;
268 }
269
270 static
271 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
272                         u32 addr, u8 *data, u16 len)
273 {
274         u8 msg[5], reg[5];
275         int ret;
276
277         put_unaligned_be16(len, msg);
278         put_unaligned_be24(addr, msg + 2);
279
280         mutex_lock(&mhdp->mbox_mutex);
281
282         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
283                                      DPTX_READ_DPCD, sizeof(msg), msg);
284         if (ret)
285                 goto out;
286
287         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
288                                             DPTX_READ_DPCD,
289                                             sizeof(reg) + len);
290         if (ret)
291                 goto out;
292
293         ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
294         if (ret)
295                 goto out;
296
297         ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
298
299 out:
300         mutex_unlock(&mhdp->mbox_mutex);
301
302         return ret;
303 }
304
305 static
306 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
307 {
308         u8 msg[6], reg[5];
309         int ret;
310
311         put_unaligned_be16(1, msg);
312         put_unaligned_be24(addr, msg + 2);
313         msg[5] = value;
314
315         mutex_lock(&mhdp->mbox_mutex);
316
317         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
318                                      DPTX_WRITE_DPCD, sizeof(msg), msg);
319         if (ret)
320                 goto out;
321
322         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
323                                             DPTX_WRITE_DPCD, sizeof(reg));
324         if (ret)
325                 goto out;
326
327         ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
328         if (ret)
329                 goto out;
330
331         if (addr != get_unaligned_be24(reg + 2))
332                 ret = -EINVAL;
333
334 out:
335         mutex_unlock(&mhdp->mbox_mutex);
336
337         if (ret)
338                 dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
339         return ret;
340 }
341
342 static
343 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
344 {
345         u8 msg[5];
346         int ret, i;
347
348         msg[0] = GENERAL_MAIN_CONTROL;
349         msg[1] = MB_MODULE_ID_GENERAL;
350         msg[2] = 0;
351         msg[3] = 1;
352         msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
353
354         mutex_lock(&mhdp->mbox_mutex);
355
356         for (i = 0; i < sizeof(msg); i++) {
357                 ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
358                 if (ret)
359                         goto out;
360         }
361
362         /* read the firmware state */
363         ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
364         if (ret)
365                 goto out;
366
367         ret = 0;
368
369 out:
370         mutex_unlock(&mhdp->mbox_mutex);
371
372         if (ret < 0)
373                 dev_err(mhdp->dev, "set firmware active failed\n");
374         return ret;
375 }
376
377 static
378 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
379 {
380         u8 status;
381         int ret;
382
383         mutex_lock(&mhdp->mbox_mutex);
384
385         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
386                                      DPTX_HPD_STATE, 0, NULL);
387         if (ret)
388                 goto err_get_hpd;
389
390         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
391                                             DPTX_HPD_STATE,
392                                             sizeof(status));
393         if (ret)
394                 goto err_get_hpd;
395
396         ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
397         if (ret)
398                 goto err_get_hpd;
399
400         mutex_unlock(&mhdp->mbox_mutex);
401
402         dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
403                 status ? "" : "un");
404
405         return status;
406
407 err_get_hpd:
408         mutex_unlock(&mhdp->mbox_mutex);
409
410         return ret;
411 }
412
413 static
414 int cdns_mhdp_get_edid_block(void *data, u8 *edid,
415                              unsigned int block, size_t length)
416 {
417         struct cdns_mhdp_device *mhdp = data;
418         u8 msg[2], reg[2], i;
419         int ret;
420
421         mutex_lock(&mhdp->mbox_mutex);
422
423         for (i = 0; i < 4; i++) {
424                 msg[0] = block / 2;
425                 msg[1] = block % 2;
426
427                 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
428                                              DPTX_GET_EDID, sizeof(msg), msg);
429                 if (ret)
430                         continue;
431
432                 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
433                                                     DPTX_GET_EDID,
434                                                     sizeof(reg) + length);
435                 if (ret)
436                         continue;
437
438                 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
439                 if (ret)
440                         continue;
441
442                 ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
443                 if (ret)
444                         continue;
445
446                 if (reg[0] == length && reg[1] == block / 2)
447                         break;
448         }
449
450         mutex_unlock(&mhdp->mbox_mutex);
451
452         if (ret)
453                 dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
454                         block, ret);
455
456         return ret;
457 }
458
459 static
460 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
461 {
462         u8 event = 0;
463         int ret;
464
465         mutex_lock(&mhdp->mbox_mutex);
466
467         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
468                                      DPTX_READ_EVENT, 0, NULL);
469         if (ret)
470                 goto out;
471
472         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
473                                             DPTX_READ_EVENT, sizeof(event));
474         if (ret < 0)
475                 goto out;
476
477         ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
478 out:
479         mutex_unlock(&mhdp->mbox_mutex);
480
481         if (ret < 0)
482                 return ret;
483
484         dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
485                 (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
486                 (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
487                 (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
488                 (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
489
490         return event;
491 }
492
493 static
494 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
495                         unsigned int udelay, const u8 *lanes_data,
496                         u8 link_status[DP_LINK_STATUS_SIZE])
497 {
498         u8 payload[7];
499         u8 hdr[5]; /* For DPCD read response header */
500         u32 addr;
501         int ret;
502
503         if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
504                 dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
505                 ret = -EINVAL;
506                 goto out;
507         }
508
509         payload[0] = nlanes;
510         put_unaligned_be16(udelay, payload + 1);
511         memcpy(payload + 3, lanes_data, nlanes);
512
513         mutex_lock(&mhdp->mbox_mutex);
514
515         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
516                                      DPTX_ADJUST_LT,
517                                      sizeof(payload), payload);
518         if (ret)
519                 goto out;
520
521         /* Yes, read the DPCD read command response */
522         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
523                                             DPTX_READ_DPCD,
524                                             sizeof(hdr) + DP_LINK_STATUS_SIZE);
525         if (ret)
526                 goto out;
527
528         ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
529         if (ret)
530                 goto out;
531
532         addr = get_unaligned_be24(hdr + 2);
533         if (addr != DP_LANE0_1_STATUS)
534                 goto out;
535
536         ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
537                                           DP_LINK_STATUS_SIZE);
538
539 out:
540         mutex_unlock(&mhdp->mbox_mutex);
541
542         if (ret)
543                 dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
544
545         return ret;
546 }
547
548 /**
549  * cdns_mhdp_link_power_up() - power up a DisplayPort link
550  * @aux: DisplayPort AUX channel
551  * @link: pointer to a structure containing the link configuration
552  *
553  * Returns 0 on success or a negative error code on failure.
554  */
555 static
556 int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
557 {
558         u8 value;
559         int err;
560
561         /* DP_SET_POWER register is only available on DPCD v1.1 and later */
562         if (link->revision < 0x11)
563                 return 0;
564
565         err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
566         if (err < 0)
567                 return err;
568
569         value &= ~DP_SET_POWER_MASK;
570         value |= DP_SET_POWER_D0;
571
572         err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
573         if (err < 0)
574                 return err;
575
576         /*
577          * According to the DP 1.1 specification, a "Sink Device must exit the
578          * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
579          * Control Field" (register 0x600).
580          */
581         usleep_range(1000, 2000);
582
583         return 0;
584 }
585
586 /**
587  * cdns_mhdp_link_power_down() - power down a DisplayPort link
588  * @aux: DisplayPort AUX channel
589  * @link: pointer to a structure containing the link configuration
590  *
591  * Returns 0 on success or a negative error code on failure.
592  */
593 static
594 int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
595                               struct cdns_mhdp_link *link)
596 {
597         u8 value;
598         int err;
599
600         /* DP_SET_POWER register is only available on DPCD v1.1 and later */
601         if (link->revision < 0x11)
602                 return 0;
603
604         err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
605         if (err < 0)
606                 return err;
607
608         value &= ~DP_SET_POWER_MASK;
609         value |= DP_SET_POWER_D3;
610
611         err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
612         if (err < 0)
613                 return err;
614
615         return 0;
616 }
617
618 /**
619  * cdns_mhdp_link_configure() - configure a DisplayPort link
620  * @aux: DisplayPort AUX channel
621  * @link: pointer to a structure containing the link configuration
622  *
623  * Returns 0 on success or a negative error code on failure.
624  */
625 static
626 int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
627                              struct cdns_mhdp_link *link)
628 {
629         u8 values[2];
630         int err;
631
632         values[0] = drm_dp_link_rate_to_bw_code(link->rate);
633         values[1] = link->num_lanes;
634
635         if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
636                 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
637
638         err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
639         if (err < 0)
640                 return err;
641
642         return 0;
643 }
644
645 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
646 {
647         return min(mhdp->host.link_rate, mhdp->sink.link_rate);
648 }
649
650 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
651 {
652         return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
653 }
654
655 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
656 {
657         return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
658 }
659
660 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
661 {
662         /* Check if SSC is supported by both sides */
663         return mhdp->host.ssc && mhdp->sink.ssc;
664 }
665
666 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
667 {
668         dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
669
670         if (mhdp->plugged)
671                 return connector_status_connected;
672         else
673                 return connector_status_disconnected;
674 }
675
676 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
677 {
678         u32 major_num, minor_num, revision;
679         u32 fw_ver, lib_ver;
680
681         fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
682                | readl(mhdp->regs + CDNS_VER_L);
683
684         lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
685                 | readl(mhdp->regs + CDNS_LIB_L_ADDR);
686
687         if (lib_ver < 33984) {
688                 /*
689                  * Older FW versions with major number 1, used to store FW
690                  * version information by storing repository revision number
691                  * in registers. This is for identifying these FW versions.
692                  */
693                 major_num = 1;
694                 minor_num = 2;
695                 if (fw_ver == 26098) {
696                         revision = 15;
697                 } else if (lib_ver == 0 && fw_ver == 0) {
698                         revision = 17;
699                 } else {
700                         dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
701                                 fw_ver, lib_ver);
702                         return -ENODEV;
703                 }
704         } else {
705                 /* To identify newer FW versions with major number 2 onwards. */
706                 major_num = fw_ver / 10000;
707                 minor_num = (fw_ver / 100) % 100;
708                 revision = (fw_ver % 10000) % 100;
709         }
710
711         dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
712                 revision);
713         return 0;
714 }
715
716 static int cdns_mhdp_fw_activate(const struct firmware *fw,
717                                  struct cdns_mhdp_device *mhdp)
718 {
719         unsigned int reg;
720         int ret;
721
722         /* Release uCPU reset and stall it. */
723         writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
724
725         memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
726
727         /* Leave debug mode, release stall */
728         writel(0, mhdp->regs + CDNS_APB_CTRL);
729
730         /*
731          * Wait for the KEEP_ALIVE "message" on the first 8 bits.
732          * Updated each sched "tick" (~2ms)
733          */
734         ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
735                                  reg & CDNS_KEEP_ALIVE_MASK, 500,
736                                  CDNS_KEEP_ALIVE_TIMEOUT);
737         if (ret) {
738                 dev_err(mhdp->dev,
739                         "device didn't give any life sign: reg %d\n", reg);
740                 return ret;
741         }
742
743         ret = cdns_mhdp_check_fw_version(mhdp);
744         if (ret)
745                 return ret;
746
747         /* Init events to 0 as it's not cleared by FW at boot but on read */
748         readl(mhdp->regs + CDNS_SW_EVENT0);
749         readl(mhdp->regs + CDNS_SW_EVENT1);
750         readl(mhdp->regs + CDNS_SW_EVENT2);
751         readl(mhdp->regs + CDNS_SW_EVENT3);
752
753         /* Activate uCPU */
754         ret = cdns_mhdp_set_firmware_active(mhdp, true);
755         if (ret)
756                 return ret;
757
758         spin_lock(&mhdp->start_lock);
759
760         mhdp->hw_state = MHDP_HW_READY;
761
762         /*
763          * Here we must keep the lock while enabling the interrupts
764          * since it would otherwise be possible that interrupt enable
765          * code is executed after the bridge is detached. The similar
766          * situation is not possible in attach()/detach() callbacks
767          * since the hw_state changes from MHDP_HW_READY to
768          * MHDP_HW_STOPPED happens only due to driver removal when
769          * bridge should already be detached.
770          */
771         cdns_mhdp_bridge_hpd_enable(&mhdp->bridge);
772
773         spin_unlock(&mhdp->start_lock);
774
775         wake_up(&mhdp->fw_load_wq);
776         dev_dbg(mhdp->dev, "DP FW activated\n");
777
778         return 0;
779 }
780
781 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
782 {
783         struct cdns_mhdp_device *mhdp = context;
784         bool bridge_attached;
785         int ret;
786
787         dev_dbg(mhdp->dev, "firmware callback\n");
788
789         if (!fw || !fw->data) {
790                 dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
791                 return;
792         }
793
794         ret = cdns_mhdp_fw_activate(fw, mhdp);
795
796         release_firmware(fw);
797
798         if (ret)
799                 return;
800
801         /*
802          *  XXX how to make sure the bridge is still attached when
803          *      calling drm_kms_helper_hotplug_event() after releasing
804          *      the lock? We should not hold the spin lock when
805          *      calling drm_kms_helper_hotplug_event() since it may
806          *      cause a dead lock. FB-dev console calls detect from the
807          *      same thread just down the call stack started here.
808          */
809         spin_lock(&mhdp->start_lock);
810         bridge_attached = mhdp->bridge_attached;
811         spin_unlock(&mhdp->start_lock);
812         if (bridge_attached) {
813                 if (mhdp->connector.dev)
814                         drm_kms_helper_hotplug_event(mhdp->bridge.dev);
815                 else
816                         drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
817         }
818 }
819
820 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
821 {
822         int ret;
823
824         ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
825                                       GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
826         if (ret) {
827                 dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
828                         FW_NAME, ret);
829                 return ret;
830         }
831
832         return 0;
833 }
834
835 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
836                                   struct drm_dp_aux_msg *msg)
837 {
838         struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
839         int ret;
840
841         if (msg->request != DP_AUX_NATIVE_WRITE &&
842             msg->request != DP_AUX_NATIVE_READ)
843                 return -EOPNOTSUPP;
844
845         if (msg->request == DP_AUX_NATIVE_WRITE) {
846                 const u8 *buf = msg->buffer;
847                 unsigned int i;
848
849                 for (i = 0; i < msg->size; ++i) {
850                         ret = cdns_mhdp_dpcd_write(mhdp,
851                                                    msg->address + i, buf[i]);
852                         if (!ret)
853                                 continue;
854
855                         dev_err(mhdp->dev,
856                                 "Failed to write DPCD addr %u\n",
857                                 msg->address + i);
858
859                         return ret;
860                 }
861         } else {
862                 ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
863                                           msg->buffer, msg->size);
864                 if (ret) {
865                         dev_err(mhdp->dev,
866                                 "Failed to read DPCD addr %u\n",
867                                 msg->address);
868
869                         return ret;
870                 }
871         }
872
873         return msg->size;
874 }
875
876 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
877 {
878         union phy_configure_opts phy_cfg;
879         u32 reg32;
880         int ret;
881
882         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
883                            DP_TRAINING_PATTERN_DISABLE);
884
885         /* Reset PHY configuration */
886         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
887         if (!mhdp->host.scrambler)
888                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
889
890         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
891
892         cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
893                             mhdp->sink.enhanced & mhdp->host.enhanced);
894
895         cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
896                             CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
897
898         cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
899         phy_cfg.dp.link_rate = mhdp->link.rate / 100;
900         phy_cfg.dp.lanes = mhdp->link.num_lanes;
901
902         memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
903         memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
904
905         phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
906         phy_cfg.dp.set_lanes = true;
907         phy_cfg.dp.set_rate = true;
908         phy_cfg.dp.set_voltages = true;
909         ret = phy_configure(mhdp->phy,  &phy_cfg);
910         if (ret) {
911                 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
912                         __func__, ret);
913                 return ret;
914         }
915
916         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
917                             CDNS_PHY_COMMON_CONFIG |
918                             CDNS_PHY_TRAINING_EN |
919                             CDNS_PHY_TRAINING_TYPE(1) |
920                             CDNS_PHY_SCRAMBLER_BYPASS);
921
922         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
923                            DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
924
925         return 0;
926 }
927
928 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
929                                        u8 link_status[DP_LINK_STATUS_SIZE],
930                                        u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
931                                        union phy_configure_opts *phy_cfg)
932 {
933         u8 adjust, max_pre_emph, max_volt_swing;
934         u8 set_volt, set_pre;
935         unsigned int i;
936
937         max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
938                            << DP_TRAIN_PRE_EMPHASIS_SHIFT;
939         max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
940
941         for (i = 0; i < mhdp->link.num_lanes; i++) {
942                 /* Check if Voltage swing and pre-emphasis are within limits */
943                 adjust = drm_dp_get_adjust_request_voltage(link_status, i);
944                 set_volt = min(adjust, max_volt_swing);
945
946                 adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
947                 set_pre = min(adjust, max_pre_emph)
948                           >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
949
950                 /*
951                  * Voltage swing level and pre-emphasis level combination is
952                  * not allowed: leaving pre-emphasis as-is, and adjusting
953                  * voltage swing.
954                  */
955                 if (set_volt + set_pre > 3)
956                         set_volt = 3 - set_pre;
957
958                 phy_cfg->dp.voltage[i] = set_volt;
959                 lanes_data[i] = set_volt;
960
961                 if (set_volt == max_volt_swing)
962                         lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
963
964                 phy_cfg->dp.pre[i] = set_pre;
965                 lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
966
967                 if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
968                         lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
969         }
970 }
971
972 static
973 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
974                                           unsigned int lane, u8 volt)
975 {
976         unsigned int s = ((lane & 1) ?
977                           DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
978                           DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
979         unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
980
981         link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
982         link_status[idx] |= volt << s;
983 }
984
985 static
986 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
987                                                unsigned int lane, u8 pre_emphasis)
988 {
989         unsigned int s = ((lane & 1) ?
990                           DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
991                           DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
992         unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
993
994         link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
995         link_status[idx] |= pre_emphasis << s;
996 }
997
998 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
999                                           u8 link_status[DP_LINK_STATUS_SIZE])
1000 {
1001         u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1002         u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1003         unsigned int i;
1004         u8 volt, pre;
1005
1006         for (i = 0; i < mhdp->link.num_lanes; i++) {
1007                 volt = drm_dp_get_adjust_request_voltage(link_status, i);
1008                 pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
1009                 if (volt + pre > 3)
1010                         cdns_mhdp_set_adjust_request_voltage(link_status, i,
1011                                                              3 - pre);
1012                 if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
1013                         cdns_mhdp_set_adjust_request_voltage(link_status, i,
1014                                                              max_volt);
1015                 if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
1016                         cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
1017                                                                   i, max_pre);
1018         }
1019 }
1020
1021 static void cdns_mhdp_print_lt_status(const char *prefix,
1022                                       struct cdns_mhdp_device *mhdp,
1023                                       union phy_configure_opts *phy_cfg)
1024 {
1025         char vs[8] = "0/0/0/0";
1026         char pe[8] = "0/0/0/0";
1027         unsigned int i;
1028
1029         for (i = 0; i < mhdp->link.num_lanes; i++) {
1030                 vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
1031                 pe[i * 2] = '0' + phy_cfg->dp.pre[i];
1032         }
1033
1034         vs[i * 2 - 1] = '\0';
1035         pe[i * 2 - 1] = '\0';
1036
1037         dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1038                 prefix,
1039                 mhdp->link.num_lanes, mhdp->link.rate / 100,
1040                 vs, pe);
1041 }
1042
1043 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
1044                                                u8 eq_tps,
1045                                                unsigned int training_interval)
1046 {
1047         u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
1048         u8 link_status[DP_LINK_STATUS_SIZE];
1049         union phy_configure_opts phy_cfg;
1050         u32 reg32;
1051         int ret;
1052         bool r;
1053
1054         dev_dbg(mhdp->dev, "Starting EQ phase\n");
1055
1056         /* Enable link training TPS[eq_tps] in PHY */
1057         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
1058                 CDNS_PHY_TRAINING_TYPE(eq_tps);
1059         if (eq_tps != 4)
1060                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1061         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1062
1063         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1064                            (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
1065                            CDNS_DP_TRAINING_PATTERN_4);
1066
1067         drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1068
1069         do {
1070                 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1071                                            &phy_cfg);
1072                 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1073                 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1074                 phy_cfg.dp.set_lanes = false;
1075                 phy_cfg.dp.set_rate = false;
1076                 phy_cfg.dp.set_voltages = true;
1077                 ret = phy_configure(mhdp->phy,  &phy_cfg);
1078                 if (ret) {
1079                         dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1080                                 __func__, ret);
1081                         goto err;
1082                 }
1083
1084                 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1085                                     training_interval, lanes_data, link_status);
1086
1087                 r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1088                 if (!r)
1089                         goto err;
1090
1091                 if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1092                         cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1093                                                   &phy_cfg);
1094                         return true;
1095                 }
1096
1097                 fail_counter_short++;
1098
1099                 cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1100         } while (fail_counter_short < 5);
1101
1102 err:
1103         cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1104
1105         return false;
1106 }
1107
1108 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1109                                           u8 link_status[DP_LINK_STATUS_SIZE],
1110                                           u8 *req_volt, u8 *req_pre)
1111 {
1112         const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1113         const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1114         unsigned int i;
1115
1116         for (i = 0; i < mhdp->link.num_lanes; i++) {
1117                 u8 val;
1118
1119                 val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1120                       max_volt : req_volt[i];
1121                 cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1122
1123                 val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1124                       max_pre : req_pre[i];
1125                 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1126         }
1127 }
1128
1129 static
1130 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1131                            bool *same_before_adjust, bool *max_swing_reached,
1132                            u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1133                            u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1134                            u8 *req_pre)
1135 {
1136         const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1137         const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1138         bool same_pre, same_volt;
1139         unsigned int i;
1140         u8 adjust;
1141
1142         *same_before_adjust = false;
1143         *max_swing_reached = false;
1144         *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1145
1146         for (i = 0; i < mhdp->link.num_lanes; i++) {
1147                 adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1148                 req_volt[i] = min(adjust, max_volt);
1149
1150                 adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1151                       DP_TRAIN_PRE_EMPHASIS_SHIFT;
1152                 req_pre[i] = min(adjust, max_pre);
1153
1154                 same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1155                            req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1156                 same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1157                             req_volt[i];
1158                 if (same_pre && same_volt)
1159                         *same_before_adjust = true;
1160
1161                 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1162                 if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1163                         *max_swing_reached = true;
1164                         return;
1165                 }
1166         }
1167 }
1168
1169 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1170 {
1171         u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1172         fail_counter_short = 0, fail_counter_cr_long = 0;
1173         u8 link_status[DP_LINK_STATUS_SIZE];
1174         bool cr_done;
1175         union phy_configure_opts phy_cfg;
1176         int ret;
1177
1178         dev_dbg(mhdp->dev, "Starting CR phase\n");
1179
1180         ret = cdns_mhdp_link_training_init(mhdp);
1181         if (ret)
1182                 goto err;
1183
1184         drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1185
1186         do {
1187                 u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1188                 u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1189                 bool same_before_adjust, max_swing_reached;
1190
1191                 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1192                                            &phy_cfg);
1193                 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1194                 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1195                 phy_cfg.dp.set_lanes = false;
1196                 phy_cfg.dp.set_rate = false;
1197                 phy_cfg.dp.set_voltages = true;
1198                 ret = phy_configure(mhdp->phy,  &phy_cfg);
1199                 if (ret) {
1200                         dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1201                                 __func__, ret);
1202                         goto err;
1203                 }
1204
1205                 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1206                                     lanes_data, link_status);
1207
1208                 cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1209                                       &max_swing_reached, lanes_data,
1210                                       link_status,
1211                                       requested_adjust_volt_swing,
1212                                       requested_adjust_pre_emphasis);
1213
1214                 if (max_swing_reached) {
1215                         dev_err(mhdp->dev, "CR: max swing reached\n");
1216                         goto err;
1217                 }
1218
1219                 if (cr_done) {
1220                         cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1221                                                   &phy_cfg);
1222                         return true;
1223                 }
1224
1225                 /* Not all CR_DONE bits set */
1226                 fail_counter_cr_long++;
1227
1228                 if (same_before_adjust) {
1229                         fail_counter_short++;
1230                         continue;
1231                 }
1232
1233                 fail_counter_short = 0;
1234                 /*
1235                  * Voltage swing/pre-emphasis adjust requested
1236                  * during CR phase
1237                  */
1238                 cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1239                                               requested_adjust_volt_swing,
1240                                               requested_adjust_pre_emphasis);
1241         } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1242
1243 err:
1244         cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1245
1246         return false;
1247 }
1248
1249 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1250 {
1251         switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1252         case DP_LINK_BW_2_7:
1253                 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1254                 break;
1255         case DP_LINK_BW_5_4:
1256                 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1257                 break;
1258         case DP_LINK_BW_8_1:
1259                 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1260                 break;
1261         }
1262 }
1263
1264 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1265                                    unsigned int training_interval)
1266 {
1267         u32 reg32;
1268         const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1269         int ret;
1270
1271         while (1) {
1272                 if (!cdns_mhdp_link_training_cr(mhdp)) {
1273                         if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1274                             DP_LINK_BW_1_62) {
1275                                 dev_dbg(mhdp->dev,
1276                                         "Reducing link rate during CR phase\n");
1277                                 cdns_mhdp_lower_link_rate(&mhdp->link);
1278
1279                                 continue;
1280                         } else if (mhdp->link.num_lanes > 1) {
1281                                 dev_dbg(mhdp->dev,
1282                                         "Reducing lanes number during CR phase\n");
1283                                 mhdp->link.num_lanes >>= 1;
1284                                 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1285
1286                                 continue;
1287                         }
1288
1289                         dev_err(mhdp->dev,
1290                                 "Link training failed during CR phase\n");
1291                         goto err;
1292                 }
1293
1294                 if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1295                                                        training_interval))
1296                         break;
1297
1298                 if (mhdp->link.num_lanes > 1) {
1299                         dev_dbg(mhdp->dev,
1300                                 "Reducing lanes number during EQ phase\n");
1301                         mhdp->link.num_lanes >>= 1;
1302
1303                         continue;
1304                 } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1305                            DP_LINK_BW_1_62) {
1306                         dev_dbg(mhdp->dev,
1307                                 "Reducing link rate during EQ phase\n");
1308                         cdns_mhdp_lower_link_rate(&mhdp->link);
1309                         mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1310
1311                         continue;
1312                 }
1313
1314                 dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1315                 goto err;
1316         }
1317
1318         dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1319                 mhdp->link.num_lanes, mhdp->link.rate / 100);
1320
1321         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1322                            mhdp->host.scrambler ? 0 :
1323                            DP_LINK_SCRAMBLING_DISABLE);
1324
1325         ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
1326         if (ret < 0) {
1327                 dev_err(mhdp->dev,
1328                         "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1329                         ret);
1330                 return ret;
1331         }
1332         reg32 &= ~GENMASK(1, 0);
1333         reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1334         reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1335         reg32 |= CDNS_DP_FRAMER_EN;
1336         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1337
1338         /* Reset PHY config */
1339         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1340         if (!mhdp->host.scrambler)
1341                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1342         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1343
1344         return 0;
1345 err:
1346         /* Reset PHY config */
1347         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1348         if (!mhdp->host.scrambler)
1349                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1350         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1351
1352         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1353                            DP_TRAINING_PATTERN_DISABLE);
1354
1355         return -EIO;
1356 }
1357
1358 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1359                                               u32 interval)
1360 {
1361         if (interval == 0)
1362                 return 400;
1363         if (interval < 5)
1364                 return 4000 << (interval - 1);
1365         dev_err(mhdp->dev,
1366                 "wrong training interval returned by DPCD: %d\n", interval);
1367         return 0;
1368 }
1369
1370 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1371 {
1372         unsigned int link_rate;
1373
1374         /* Get source capabilities based on PHY attributes */
1375
1376         mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1377         if (!mhdp->host.lanes_cnt)
1378                 mhdp->host.lanes_cnt = 4;
1379
1380         link_rate = mhdp->phy->attrs.max_link_rate;
1381         if (!link_rate)
1382                 link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1383         else
1384                 /* PHY uses Mb/s, DRM uses tens of kb/s. */
1385                 link_rate *= 100;
1386
1387         mhdp->host.link_rate = link_rate;
1388         mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1389         mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1390         mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1391                                   CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1392                                   CDNS_SUPPORT_TPS(4);
1393         mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1394         mhdp->host.fast_link = false;
1395         mhdp->host.enhanced = true;
1396         mhdp->host.scrambler = true;
1397         mhdp->host.ssc = false;
1398 }
1399
1400 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1401                                      u8 dpcd[DP_RECEIVER_CAP_SIZE])
1402 {
1403         mhdp->sink.link_rate = mhdp->link.rate;
1404         mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1405         mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1406                                  DP_LINK_CAP_ENHANCED_FRAMING);
1407
1408         /* Set SSC support */
1409         mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1410                                   DP_MAX_DOWNSPREAD_0_5);
1411
1412         /* Set TPS support */
1413         mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1414         if (drm_dp_tps3_supported(dpcd))
1415                 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1416         if (drm_dp_tps4_supported(dpcd))
1417                 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1418
1419         /* Set fast link support */
1420         mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1421                                   DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1422 }
1423
1424 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1425 {
1426         u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1427         u32 resp, interval, interval_us;
1428         u8 ext_cap_chk = 0;
1429         unsigned int addr;
1430         int err;
1431
1432         WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1433
1434         drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1435                           &ext_cap_chk);
1436
1437         if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1438                 addr = DP_DP13_DPCD_REV;
1439         else
1440                 addr = DP_DPCD_REV;
1441
1442         err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1443         if (err < 0) {
1444                 dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1445                 return err;
1446         }
1447
1448         mhdp->link.revision = dpcd[0];
1449         mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1450         mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1451
1452         if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1453                 mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1454
1455         dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1456         cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
1457
1458         cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1459
1460         mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1461         mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1462
1463         /* Disable framer for link training */
1464         err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1465         if (err < 0) {
1466                 dev_err(mhdp->dev,
1467                         "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1468                         err);
1469                 return err;
1470         }
1471
1472         resp &= ~CDNS_DP_FRAMER_EN;
1473         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1474
1475         /* Spread AMP if required, enable 8b/10b coding */
1476         amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1477         amp[1] = DP_SET_ANSI_8B10B;
1478         drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1479
1480         if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1481                 dev_err(mhdp->dev, "fastlink not supported\n");
1482                 return -EOPNOTSUPP;
1483         }
1484
1485         interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1486         interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1487         if (!interval_us ||
1488             cdns_mhdp_link_training(mhdp, interval_us)) {
1489                 dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1490                 return -EIO;
1491         }
1492
1493         mhdp->link_up = true;
1494
1495         return 0;
1496 }
1497
1498 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1499 {
1500         WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1501
1502         if (mhdp->plugged)
1503                 cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
1504
1505         mhdp->link_up = false;
1506 }
1507
1508 static const struct drm_edid *cdns_mhdp_edid_read(struct cdns_mhdp_device *mhdp,
1509                                                   struct drm_connector *connector)
1510 {
1511         if (!mhdp->plugged)
1512                 return NULL;
1513
1514         return drm_edid_read_custom(connector, cdns_mhdp_get_edid_block, mhdp);
1515 }
1516
1517 static int cdns_mhdp_get_modes(struct drm_connector *connector)
1518 {
1519         struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1520         const struct drm_edid *drm_edid;
1521         int num_modes;
1522
1523         if (!mhdp->plugged)
1524                 return 0;
1525
1526         drm_edid = cdns_mhdp_edid_read(mhdp, connector);
1527
1528         drm_edid_connector_update(connector, drm_edid);
1529
1530         if (!drm_edid) {
1531                 dev_err(mhdp->dev, "Failed to read EDID\n");
1532                 return 0;
1533         }
1534
1535         num_modes = drm_edid_connector_add_modes(connector);
1536         drm_edid_free(drm_edid);
1537
1538         /*
1539          * HACK: Warn about unsupported display formats until we deal
1540          *       with them correctly.
1541          */
1542         if (connector->display_info.color_formats &&
1543             !(connector->display_info.color_formats &
1544               mhdp->display_fmt.color_format))
1545                 dev_warn(mhdp->dev,
1546                          "%s: No supported color_format found (0x%08x)\n",
1547                         __func__, connector->display_info.color_formats);
1548
1549         if (connector->display_info.bpc &&
1550             connector->display_info.bpc < mhdp->display_fmt.bpc)
1551                 dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1552                          __func__, connector->display_info.bpc,
1553                          mhdp->display_fmt.bpc);
1554
1555         return num_modes;
1556 }
1557
1558 static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1559                                       struct drm_modeset_acquire_ctx *ctx,
1560                                       bool force)
1561 {
1562         struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1563
1564         return cdns_mhdp_detect(mhdp);
1565 }
1566
1567 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1568 {
1569         u32 bpp;
1570
1571         if (fmt->y_only)
1572                 return fmt->bpc;
1573
1574         switch (fmt->color_format) {
1575         case DRM_COLOR_FORMAT_RGB444:
1576         case DRM_COLOR_FORMAT_YCBCR444:
1577                 bpp = fmt->bpc * 3;
1578                 break;
1579         case DRM_COLOR_FORMAT_YCBCR422:
1580                 bpp = fmt->bpc * 2;
1581                 break;
1582         case DRM_COLOR_FORMAT_YCBCR420:
1583                 bpp = fmt->bpc * 3 / 2;
1584                 break;
1585         default:
1586                 bpp = fmt->bpc * 3;
1587                 WARN_ON(1);
1588         }
1589         return bpp;
1590 }
1591
1592 static
1593 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1594                             const struct drm_display_mode *mode,
1595                             unsigned int lanes, unsigned int rate)
1596 {
1597         u32 max_bw, req_bw, bpp;
1598
1599         /*
1600          * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1601          * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1602          * value thus equals the bandwidth in 10kb/s units, which matches the
1603          * units of the rate parameter.
1604          */
1605
1606         bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1607         req_bw = mode->clock * bpp / 8;
1608         max_bw = lanes * rate;
1609         if (req_bw > max_bw) {
1610                 dev_dbg(mhdp->dev,
1611                         "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1612                         mode->name, req_bw, max_bw);
1613
1614                 return false;
1615         }
1616
1617         return true;
1618 }
1619
1620 static
1621 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1622                                           struct drm_display_mode *mode)
1623 {
1624         struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1625
1626         mutex_lock(&mhdp->link_mutex);
1627
1628         if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1629                                     mhdp->link.rate)) {
1630                 mutex_unlock(&mhdp->link_mutex);
1631                 return MODE_CLOCK_HIGH;
1632         }
1633
1634         mutex_unlock(&mhdp->link_mutex);
1635         return MODE_OK;
1636 }
1637
1638 static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
1639                                             struct drm_atomic_state *state)
1640 {
1641         struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1642         struct drm_connector_state *old_state, *new_state;
1643         struct drm_crtc_state *crtc_state;
1644         u64 old_cp, new_cp;
1645
1646         if (!mhdp->hdcp_supported)
1647                 return 0;
1648
1649         old_state = drm_atomic_get_old_connector_state(state, conn);
1650         new_state = drm_atomic_get_new_connector_state(state, conn);
1651         old_cp = old_state->content_protection;
1652         new_cp = new_state->content_protection;
1653
1654         if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
1655             new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1656                 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1657                 goto mode_changed;
1658         }
1659
1660         if (!new_state->crtc) {
1661                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1662                         new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1663                 return 0;
1664         }
1665
1666         if (old_cp == new_cp ||
1667             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1668              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1669                 return 0;
1670
1671 mode_changed:
1672         crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
1673         crtc_state->mode_changed = true;
1674
1675         return 0;
1676 }
1677
1678 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1679         .detect_ctx = cdns_mhdp_connector_detect,
1680         .get_modes = cdns_mhdp_get_modes,
1681         .mode_valid = cdns_mhdp_mode_valid,
1682         .atomic_check = cdns_mhdp_connector_atomic_check,
1683 };
1684
1685 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1686         .fill_modes = drm_helper_probe_single_connector_modes,
1687         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1688         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1689         .reset = drm_atomic_helper_connector_reset,
1690         .destroy = drm_connector_cleanup,
1691 };
1692
1693 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1694 {
1695         u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1696         struct drm_connector *conn = &mhdp->connector;
1697         struct drm_bridge *bridge = &mhdp->bridge;
1698         int ret;
1699
1700         if (!bridge->encoder) {
1701                 dev_err(mhdp->dev, "Parent encoder object not found");
1702                 return -ENODEV;
1703         }
1704
1705         conn->polled = DRM_CONNECTOR_POLL_HPD;
1706
1707         ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1708                                  DRM_MODE_CONNECTOR_DisplayPort);
1709         if (ret) {
1710                 dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1711                 return ret;
1712         }
1713
1714         drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1715
1716         ret = drm_display_info_set_bus_formats(&conn->display_info,
1717                                                &bus_format, 1);
1718         if (ret)
1719                 return ret;
1720
1721         ret = drm_connector_attach_encoder(conn, bridge->encoder);
1722         if (ret) {
1723                 dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1724                 return ret;
1725         }
1726
1727         if (mhdp->hdcp_supported)
1728                 ret = drm_connector_attach_content_protection_property(conn, true);
1729
1730         return ret;
1731 }
1732
1733 static int cdns_mhdp_attach(struct drm_bridge *bridge,
1734                             enum drm_bridge_attach_flags flags)
1735 {
1736         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1737         bool hw_ready;
1738         int ret;
1739
1740         dev_dbg(mhdp->dev, "%s\n", __func__);
1741
1742         mhdp->aux.drm_dev = bridge->dev;
1743         ret = drm_dp_aux_register(&mhdp->aux);
1744         if (ret < 0)
1745                 return ret;
1746
1747         if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1748                 ret = cdns_mhdp_connector_init(mhdp);
1749                 if (ret)
1750                         goto aux_unregister;
1751         }
1752
1753         spin_lock(&mhdp->start_lock);
1754
1755         mhdp->bridge_attached = true;
1756         hw_ready = mhdp->hw_state == MHDP_HW_READY;
1757
1758         spin_unlock(&mhdp->start_lock);
1759
1760         /* Enable SW event interrupts */
1761         if (hw_ready)
1762                 cdns_mhdp_bridge_hpd_enable(bridge);
1763
1764         return 0;
1765 aux_unregister:
1766         drm_dp_aux_unregister(&mhdp->aux);
1767         return ret;
1768 }
1769
1770 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1771                                       const struct drm_display_mode *mode)
1772 {
1773         unsigned int dp_framer_sp = 0, msa_horizontal_1,
1774                 msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1775                 misc0 = 0, misc1 = 0, pxl_repr,
1776                 front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1777                 dp_vertical_1;
1778         u8 stream_id = mhdp->stream_id;
1779         u32 bpp, bpc, pxlfmt, framer;
1780         int ret;
1781
1782         pxlfmt = mhdp->display_fmt.color_format;
1783         bpc = mhdp->display_fmt.bpc;
1784
1785         /*
1786          * If YCBCR supported and stream not SD, use ITU709
1787          * Need to handle ITU version with YCBCR420 when supported
1788          */
1789         if ((pxlfmt == DRM_COLOR_FORMAT_YCBCR444 ||
1790              pxlfmt == DRM_COLOR_FORMAT_YCBCR422) && mode->crtc_vdisplay >= 720)
1791                 misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1792
1793         bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1794
1795         switch (pxlfmt) {
1796         case DRM_COLOR_FORMAT_RGB444:
1797                 pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1798                 misc0 |= DP_COLOR_FORMAT_RGB;
1799                 break;
1800         case DRM_COLOR_FORMAT_YCBCR444:
1801                 pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1802                 misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1803                 break;
1804         case DRM_COLOR_FORMAT_YCBCR422:
1805                 pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1806                 misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1807                 break;
1808         case DRM_COLOR_FORMAT_YCBCR420:
1809                 pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1810                 break;
1811         default:
1812                 pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1813         }
1814
1815         switch (bpc) {
1816         case 6:
1817                 misc0 |= DP_TEST_BIT_DEPTH_6;
1818                 pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1819                 break;
1820         case 8:
1821                 misc0 |= DP_TEST_BIT_DEPTH_8;
1822                 pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1823                 break;
1824         case 10:
1825                 misc0 |= DP_TEST_BIT_DEPTH_10;
1826                 pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1827                 break;
1828         case 12:
1829                 misc0 |= DP_TEST_BIT_DEPTH_12;
1830                 pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1831                 break;
1832         case 16:
1833                 misc0 |= DP_TEST_BIT_DEPTH_16;
1834                 pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1835                 break;
1836         }
1837
1838         bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1839         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1840                 bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1841
1842         cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1843                             bnd_hsync2vsync);
1844
1845         hsync2vsync_pol_ctrl = 0;
1846         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1847                 hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1848         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1849                 hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1850         cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1851                             hsync2vsync_pol_ctrl);
1852
1853         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1854
1855         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1856                 dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1857         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1858                 dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1859         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1860                 dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1861         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1862
1863         front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1864         back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1865         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1866                             CDNS_DP_FRONT_PORCH(front_porch) |
1867                             CDNS_DP_BACK_PORCH(back_porch));
1868
1869         cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1870                             mode->crtc_hdisplay * bpp / 8);
1871
1872         msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1873         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1874                             CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1875                             CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1876
1877         hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1878         msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1879                            CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1880         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1881                 msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1882         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1883                             msa_horizontal_1);
1884
1885         msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1886         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1887                             CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1888                             CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1889
1890         vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1891         msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1892                          CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1893         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1894                 msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1895         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1896                             msa_vertical_1);
1897
1898         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1899             mode->crtc_vtotal % 2 == 0)
1900                 misc1 = DP_TEST_INTERLACED;
1901         if (mhdp->display_fmt.y_only)
1902                 misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1903         /* Use VSC SDP for Y420 */
1904         if (pxlfmt == DRM_COLOR_FORMAT_YCBCR420)
1905                 misc1 = CDNS_DP_TEST_VSC_SDP;
1906
1907         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1908                             misc0 | (misc1 << 8));
1909
1910         cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1911                             CDNS_DP_H_HSYNC_WIDTH(hsync) |
1912                             CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1913
1914         cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1915                             CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1916                             CDNS_DP_V0_VSTART(msa_v0));
1917
1918         dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1919         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1920             mode->crtc_vtotal % 2 == 0)
1921                 dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1922
1923         cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1924
1925         cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1926                                 (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1927                                 CDNS_DP_VB_ID_INTERLACED : 0);
1928
1929         ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1930         if (ret < 0) {
1931                 dev_err(mhdp->dev,
1932                         "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1933                         ret);
1934                 return;
1935         }
1936         framer |= CDNS_DP_FRAMER_EN;
1937         framer &= ~CDNS_DP_NO_VIDEO_MODE;
1938         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1939 }
1940
1941 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1942                                  const struct drm_display_mode *mode)
1943 {
1944         u32 rate, vs, required_bandwidth, available_bandwidth;
1945         s32 line_thresh1, line_thresh2, line_thresh = 0;
1946         int pxlclock = mode->crtc_clock;
1947         u32 tu_size = 64;
1948         u32 bpp;
1949
1950         /* Get rate in MSymbols per second per lane */
1951         rate = mhdp->link.rate / 1000;
1952
1953         bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1954
1955         required_bandwidth = pxlclock * bpp / 8;
1956         available_bandwidth = mhdp->link.num_lanes * rate;
1957
1958         vs = tu_size * required_bandwidth / available_bandwidth;
1959         vs /= 1000;
1960
1961         if (vs == tu_size)
1962                 vs = tu_size - 1;
1963
1964         line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1965         line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1966         line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1967         line_thresh = (line_thresh >> 5) + 2;
1968
1969         mhdp->stream_id = 0;
1970
1971         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1972                             CDNS_DP_FRAMER_TU_VS(vs) |
1973                             CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1974                             CDNS_DP_FRAMER_TU_CNT_RST_EN);
1975
1976         cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1977                             line_thresh & GENMASK(5, 0));
1978
1979         cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1980                             CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1981                                                    0 : tu_size - vs));
1982
1983         cdns_mhdp_configure_video(mhdp, mode);
1984 }
1985
1986 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1987                                     struct drm_bridge_state *bridge_state)
1988 {
1989         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1990         struct drm_atomic_state *state = bridge_state->base.state;
1991         struct cdns_mhdp_bridge_state *mhdp_state;
1992         struct drm_crtc_state *crtc_state;
1993         struct drm_connector *connector;
1994         struct drm_connector_state *conn_state;
1995         struct drm_bridge_state *new_state;
1996         const struct drm_display_mode *mode;
1997         u32 resp;
1998         int ret;
1999
2000         dev_dbg(mhdp->dev, "bridge enable\n");
2001
2002         mutex_lock(&mhdp->link_mutex);
2003
2004         if (mhdp->plugged && !mhdp->link_up) {
2005                 ret = cdns_mhdp_link_up(mhdp);
2006                 if (ret < 0)
2007                         goto out;
2008         }
2009
2010         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
2011                 mhdp->info->ops->enable(mhdp);
2012
2013         /* Enable VIF clock for stream 0 */
2014         ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2015         if (ret < 0) {
2016                 dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
2017                 goto out;
2018         }
2019
2020         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2021                             resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
2022
2023         connector = drm_atomic_get_new_connector_for_encoder(state,
2024                                                              bridge->encoder);
2025         if (WARN_ON(!connector))
2026                 goto out;
2027
2028         conn_state = drm_atomic_get_new_connector_state(state, connector);
2029         if (WARN_ON(!conn_state))
2030                 goto out;
2031
2032         if (mhdp->hdcp_supported &&
2033             mhdp->hw_state == MHDP_HW_READY &&
2034             conn_state->content_protection ==
2035             DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2036                 mutex_unlock(&mhdp->link_mutex);
2037                 cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
2038                 mutex_lock(&mhdp->link_mutex);
2039         }
2040
2041         crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
2042         if (WARN_ON(!crtc_state))
2043                 goto out;
2044
2045         mode = &crtc_state->adjusted_mode;
2046
2047         new_state = drm_atomic_get_new_bridge_state(state, bridge);
2048         if (WARN_ON(!new_state))
2049                 goto out;
2050
2051         if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2052                                     mhdp->link.rate)) {
2053                 ret = -EINVAL;
2054                 goto out;
2055         }
2056
2057         cdns_mhdp_sst_enable(mhdp, mode);
2058
2059         mhdp_state = to_cdns_mhdp_bridge_state(new_state);
2060
2061         mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
2062         drm_mode_set_name(mhdp_state->current_mode);
2063
2064         dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
2065
2066         mhdp->bridge_enabled = true;
2067
2068 out:
2069         mutex_unlock(&mhdp->link_mutex);
2070         if (ret < 0)
2071                 schedule_work(&mhdp->modeset_retry_work);
2072 }
2073
2074 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
2075                                      struct drm_bridge_state *bridge_state)
2076 {
2077         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2078         u32 resp;
2079
2080         dev_dbg(mhdp->dev, "%s\n", __func__);
2081
2082         mutex_lock(&mhdp->link_mutex);
2083
2084         if (mhdp->hdcp_supported)
2085                 cdns_mhdp_hdcp_disable(mhdp);
2086
2087         mhdp->bridge_enabled = false;
2088         cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2089         resp &= ~CDNS_DP_FRAMER_EN;
2090         resp |= CDNS_DP_NO_VIDEO_MODE;
2091         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2092
2093         cdns_mhdp_link_down(mhdp);
2094
2095         /* Disable VIF clock for stream 0 */
2096         cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2097         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2098                             resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2099
2100         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2101                 mhdp->info->ops->disable(mhdp);
2102
2103         mutex_unlock(&mhdp->link_mutex);
2104 }
2105
2106 static void cdns_mhdp_detach(struct drm_bridge *bridge)
2107 {
2108         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2109
2110         dev_dbg(mhdp->dev, "%s\n", __func__);
2111
2112         drm_dp_aux_unregister(&mhdp->aux);
2113
2114         spin_lock(&mhdp->start_lock);
2115
2116         mhdp->bridge_attached = false;
2117
2118         spin_unlock(&mhdp->start_lock);
2119
2120         writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2121 }
2122
2123 static struct drm_bridge_state *
2124 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2125 {
2126         struct cdns_mhdp_bridge_state *state;
2127
2128         state = kzalloc(sizeof(*state), GFP_KERNEL);
2129         if (!state)
2130                 return NULL;
2131
2132         __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2133
2134         return &state->base;
2135 }
2136
2137 static void
2138 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2139                                       struct drm_bridge_state *state)
2140 {
2141         struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2142
2143         cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2144
2145         if (cdns_mhdp_state->current_mode) {
2146                 drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2147                 cdns_mhdp_state->current_mode = NULL;
2148         }
2149
2150         kfree(cdns_mhdp_state);
2151 }
2152
2153 static struct drm_bridge_state *
2154 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2155 {
2156         struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2157
2158         cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2159         if (!cdns_mhdp_state)
2160                 return NULL;
2161
2162         __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2163
2164         return &cdns_mhdp_state->base;
2165 }
2166
2167 static u32 *cdns_mhdp_get_input_bus_fmts(struct drm_bridge *bridge,
2168                                          struct drm_bridge_state *bridge_state,
2169                                          struct drm_crtc_state *crtc_state,
2170                                          struct drm_connector_state *conn_state,
2171                                          u32 output_fmt,
2172                                          unsigned int *num_input_fmts)
2173 {
2174         u32 *input_fmts;
2175
2176         *num_input_fmts = 0;
2177
2178         input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
2179         if (!input_fmts)
2180                 return NULL;
2181
2182         *num_input_fmts = 1;
2183         input_fmts[0] = MEDIA_BUS_FMT_RGB121212_1X36;
2184
2185         return input_fmts;
2186 }
2187
2188 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2189                                   struct drm_bridge_state *bridge_state,
2190                                   struct drm_crtc_state *crtc_state,
2191                                   struct drm_connector_state *conn_state)
2192 {
2193         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2194         const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2195
2196         mutex_lock(&mhdp->link_mutex);
2197
2198         if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2199                                     mhdp->link.rate)) {
2200                 dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2201                         __func__, mode->name, mhdp->link.num_lanes,
2202                         mhdp->link.rate / 100);
2203                 mutex_unlock(&mhdp->link_mutex);
2204                 return -EINVAL;
2205         }
2206
2207         /*
2208          * There might be flags negotiation supported in future.
2209          * Set the bus flags in atomic_check statically for now.
2210          */
2211         if (mhdp->info)
2212                 bridge_state->input_bus_cfg.flags = *mhdp->info->input_bus_flags;
2213
2214         mutex_unlock(&mhdp->link_mutex);
2215         return 0;
2216 }
2217
2218 static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
2219 {
2220         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2221
2222         return cdns_mhdp_detect(mhdp);
2223 }
2224
2225 static const struct drm_edid *cdns_mhdp_bridge_edid_read(struct drm_bridge *bridge,
2226                                                          struct drm_connector *connector)
2227 {
2228         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2229
2230         return cdns_mhdp_edid_read(mhdp, connector);
2231 }
2232
2233 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2234         .atomic_enable = cdns_mhdp_atomic_enable,
2235         .atomic_disable = cdns_mhdp_atomic_disable,
2236         .atomic_check = cdns_mhdp_atomic_check,
2237         .attach = cdns_mhdp_attach,
2238         .detach = cdns_mhdp_detach,
2239         .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2240         .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2241         .atomic_reset = cdns_mhdp_bridge_atomic_reset,
2242         .atomic_get_input_bus_fmts = cdns_mhdp_get_input_bus_fmts,
2243         .detect = cdns_mhdp_bridge_detect,
2244         .edid_read = cdns_mhdp_bridge_edid_read,
2245         .hpd_enable = cdns_mhdp_bridge_hpd_enable,
2246         .hpd_disable = cdns_mhdp_bridge_hpd_disable,
2247 };
2248
2249 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2250 {
2251         int hpd_event, hpd_status;
2252
2253         *hpd_pulse = false;
2254
2255         hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2256
2257         /* Getting event bits failed, bail out */
2258         if (hpd_event < 0) {
2259                 dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2260                          __func__, hpd_event);
2261                 return false;
2262         }
2263
2264         hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2265         if (hpd_status < 0) {
2266                 dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2267                          __func__, hpd_status);
2268                 return false;
2269         }
2270
2271         if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2272                 *hpd_pulse = true;
2273
2274         return !!hpd_status;
2275 }
2276
2277 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2278 {
2279         struct cdns_mhdp_bridge_state *cdns_bridge_state;
2280         struct drm_display_mode *current_mode;
2281         bool old_plugged = mhdp->plugged;
2282         struct drm_bridge_state *state;
2283         u8 status[DP_LINK_STATUS_SIZE];
2284         bool hpd_pulse;
2285         int ret = 0;
2286
2287         mutex_lock(&mhdp->link_mutex);
2288
2289         mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2290
2291         if (!mhdp->plugged) {
2292                 cdns_mhdp_link_down(mhdp);
2293                 mhdp->link.rate = mhdp->host.link_rate;
2294                 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2295                 goto out;
2296         }
2297
2298         /*
2299          * If we get a HPD pulse event and we were and still are connected,
2300          * check the link status. If link status is ok, there's nothing to do
2301          * as we don't handle DP interrupts. If link status is bad, continue
2302          * with full link setup.
2303          */
2304         if (hpd_pulse && old_plugged == mhdp->plugged) {
2305                 ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2306
2307                 /*
2308                  * If everything looks fine, just return, as we don't handle
2309                  * DP IRQs.
2310                  */
2311                 if (ret > 0 &&
2312                     drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2313                     drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2314                         goto out;
2315
2316                 /* If link is bad, mark link as down so that we do a new LT */
2317                 mhdp->link_up = false;
2318         }
2319
2320         if (!mhdp->link_up) {
2321                 ret = cdns_mhdp_link_up(mhdp);
2322                 if (ret < 0)
2323                         goto out;
2324         }
2325
2326         if (mhdp->bridge_enabled) {
2327                 state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2328                 if (!state) {
2329                         ret = -EINVAL;
2330                         goto out;
2331                 }
2332
2333                 cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2334                 if (!cdns_bridge_state) {
2335                         ret = -EINVAL;
2336                         goto out;
2337                 }
2338
2339                 current_mode = cdns_bridge_state->current_mode;
2340                 if (!current_mode) {
2341                         ret = -EINVAL;
2342                         goto out;
2343                 }
2344
2345                 if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2346                                             mhdp->link.rate)) {
2347                         ret = -EINVAL;
2348                         goto out;
2349                 }
2350
2351                 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2352                         current_mode->name);
2353
2354                 cdns_mhdp_sst_enable(mhdp, current_mode);
2355         }
2356 out:
2357         mutex_unlock(&mhdp->link_mutex);
2358         return ret;
2359 }
2360
2361 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2362 {
2363         struct cdns_mhdp_device *mhdp;
2364         struct drm_connector *conn;
2365
2366         mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2367
2368         conn = &mhdp->connector;
2369
2370         /* Grab the locks before changing connector property */
2371         mutex_lock(&conn->dev->mode_config.mutex);
2372
2373         /*
2374          * Set connector link status to BAD and send a Uevent to notify
2375          * userspace to do a modeset.
2376          */
2377         drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2378         mutex_unlock(&conn->dev->mode_config.mutex);
2379
2380         /* Send Hotplug uevent so userspace can reprobe */
2381         drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2382 }
2383
2384 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2385 {
2386         struct cdns_mhdp_device *mhdp = data;
2387         u32 apb_stat, sw_ev0;
2388         bool bridge_attached;
2389
2390         apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2391         if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2392                 return IRQ_NONE;
2393
2394         sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2395
2396         /*
2397          *  Calling drm_kms_helper_hotplug_event() when not attached
2398          *  to drm device causes an oops because the drm_bridge->dev
2399          *  is NULL. See cdns_mhdp_fw_cb() comments for details about the
2400          *  problems related drm_kms_helper_hotplug_event() call.
2401          */
2402         spin_lock(&mhdp->start_lock);
2403         bridge_attached = mhdp->bridge_attached;
2404         spin_unlock(&mhdp->start_lock);
2405
2406         if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2407                 schedule_work(&mhdp->hpd_work);
2408         }
2409
2410         if (sw_ev0 & ~CDNS_DPTX_HPD) {
2411                 mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
2412                 wake_up(&mhdp->sw_events_wq);
2413         }
2414
2415         return IRQ_HANDLED;
2416 }
2417
2418 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
2419 {
2420         u32 ret;
2421
2422         ret = wait_event_timeout(mhdp->sw_events_wq,
2423                                  mhdp->sw_events & event,
2424                                  msecs_to_jiffies(500));
2425         if (!ret) {
2426                 dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
2427                 goto sw_event_out;
2428         }
2429
2430         ret = mhdp->sw_events;
2431         mhdp->sw_events &= ~event;
2432
2433 sw_event_out:
2434         return ret;
2435 }
2436
2437 static void cdns_mhdp_hpd_work(struct work_struct *work)
2438 {
2439         struct cdns_mhdp_device *mhdp = container_of(work,
2440                                                      struct cdns_mhdp_device,
2441                                                      hpd_work);
2442         int ret;
2443
2444         ret = cdns_mhdp_update_link_status(mhdp);
2445         if (mhdp->connector.dev) {
2446                 if (ret < 0)
2447                         schedule_work(&mhdp->modeset_retry_work);
2448                 else
2449                         drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2450         } else {
2451                 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2452         }
2453 }
2454
2455 static int cdns_mhdp_probe(struct platform_device *pdev)
2456 {
2457         struct device *dev = &pdev->dev;
2458         struct cdns_mhdp_device *mhdp;
2459         unsigned long rate;
2460         struct clk *clk;
2461         int ret;
2462         int irq;
2463
2464         mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
2465         if (!mhdp)
2466                 return -ENOMEM;
2467
2468         clk = devm_clk_get(dev, NULL);
2469         if (IS_ERR(clk)) {
2470                 dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
2471                 return PTR_ERR(clk);
2472         }
2473
2474         mhdp->clk = clk;
2475         mhdp->dev = dev;
2476         mutex_init(&mhdp->mbox_mutex);
2477         mutex_init(&mhdp->link_mutex);
2478         spin_lock_init(&mhdp->start_lock);
2479
2480         drm_dp_aux_init(&mhdp->aux);
2481         mhdp->aux.dev = dev;
2482         mhdp->aux.transfer = cdns_mhdp_transfer;
2483
2484         mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2485         if (IS_ERR(mhdp->regs)) {
2486                 dev_err(dev, "Failed to get memory resource\n");
2487                 return PTR_ERR(mhdp->regs);
2488         }
2489
2490         mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
2491         if (IS_ERR(mhdp->sapb_regs)) {
2492                 mhdp->hdcp_supported = false;
2493                 dev_warn(dev,
2494                          "Failed to get SAPB memory resource, HDCP not supported\n");
2495         } else {
2496                 mhdp->hdcp_supported = true;
2497         }
2498
2499         mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2500         if (IS_ERR(mhdp->phy)) {
2501                 dev_err(dev, "no PHY configured\n");
2502                 return PTR_ERR(mhdp->phy);
2503         }
2504
2505         platform_set_drvdata(pdev, mhdp);
2506
2507         mhdp->info = of_device_get_match_data(dev);
2508
2509         clk_prepare_enable(clk);
2510
2511         pm_runtime_enable(dev);
2512         ret = pm_runtime_resume_and_get(dev);
2513         if (ret < 0) {
2514                 dev_err(dev, "pm_runtime_resume_and_get failed\n");
2515                 pm_runtime_disable(dev);
2516                 goto clk_disable;
2517         }
2518
2519         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2520                 ret = mhdp->info->ops->init(mhdp);
2521                 if (ret != 0) {
2522                         dev_err(dev, "MHDP platform initialization failed: %d\n",
2523                                 ret);
2524                         goto runtime_put;
2525                 }
2526         }
2527
2528         rate = clk_get_rate(clk);
2529         writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2530         writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2531
2532         dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2533
2534         writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2535
2536         irq = platform_get_irq(pdev, 0);
2537         ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2538                                         cdns_mhdp_irq_handler, IRQF_ONESHOT,
2539                                         "mhdp8546", mhdp);
2540         if (ret) {
2541                 dev_err(dev, "cannot install IRQ %d\n", irq);
2542                 ret = -EIO;
2543                 goto plat_fini;
2544         }
2545
2546         cdns_mhdp_fill_host_caps(mhdp);
2547
2548         /* Initialize link rate and num of lanes to host values */
2549         mhdp->link.rate = mhdp->host.link_rate;
2550         mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2551
2552         /* The only currently supported format */
2553         mhdp->display_fmt.y_only = false;
2554         mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2555         mhdp->display_fmt.bpc = 8;
2556
2557         mhdp->bridge.of_node = pdev->dev.of_node;
2558         mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
2559         mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2560                            DRM_BRIDGE_OP_HPD;
2561         mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2562
2563         ret = phy_init(mhdp->phy);
2564         if (ret) {
2565                 dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2566                 goto plat_fini;
2567         }
2568
2569         /* Initialize the work for modeset in case of link train failure */
2570         INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2571         INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
2572
2573         init_waitqueue_head(&mhdp->fw_load_wq);
2574         init_waitqueue_head(&mhdp->sw_events_wq);
2575
2576         ret = cdns_mhdp_load_firmware(mhdp);
2577         if (ret)
2578                 goto phy_exit;
2579
2580         if (mhdp->hdcp_supported)
2581                 cdns_mhdp_hdcp_init(mhdp);
2582
2583         drm_bridge_add(&mhdp->bridge);
2584
2585         return 0;
2586
2587 phy_exit:
2588         phy_exit(mhdp->phy);
2589 plat_fini:
2590         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2591                 mhdp->info->ops->exit(mhdp);
2592 runtime_put:
2593         pm_runtime_put_sync(dev);
2594         pm_runtime_disable(dev);
2595 clk_disable:
2596         clk_disable_unprepare(mhdp->clk);
2597
2598         return ret;
2599 }
2600
2601 static void cdns_mhdp_remove(struct platform_device *pdev)
2602 {
2603         struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
2604         unsigned long timeout = msecs_to_jiffies(100);
2605         int ret;
2606
2607         drm_bridge_remove(&mhdp->bridge);
2608
2609         ret = wait_event_timeout(mhdp->fw_load_wq,
2610                                  mhdp->hw_state == MHDP_HW_READY,
2611                                  timeout);
2612         spin_lock(&mhdp->start_lock);
2613         mhdp->hw_state = MHDP_HW_STOPPED;
2614         spin_unlock(&mhdp->start_lock);
2615
2616         if (ret == 0) {
2617                 dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2618                         __func__);
2619         } else {
2620                 ret = cdns_mhdp_set_firmware_active(mhdp, false);
2621                 if (ret)
2622                         dev_err(mhdp->dev, "Failed to stop firmware (%pe)\n",
2623                                 ERR_PTR(ret));
2624         }
2625
2626         phy_exit(mhdp->phy);
2627
2628         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2629                 mhdp->info->ops->exit(mhdp);
2630
2631         pm_runtime_put_sync(&pdev->dev);
2632         pm_runtime_disable(&pdev->dev);
2633
2634         cancel_work_sync(&mhdp->modeset_retry_work);
2635         flush_work(&mhdp->hpd_work);
2636         /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
2637
2638         clk_disable_unprepare(mhdp->clk);
2639 }
2640
2641 static const struct of_device_id mhdp_ids[] = {
2642         { .compatible = "cdns,mhdp8546", },
2643 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2644         { .compatible = "ti,j721e-mhdp8546",
2645           .data = &(const struct cdns_mhdp_platform_info) {
2646                   .input_bus_flags = &mhdp_ti_j721e_bridge_input_bus_flags,
2647                   .ops = &mhdp_ti_j721e_ops,
2648           },
2649         },
2650 #endif
2651         { /* sentinel */ }
2652 };
2653 MODULE_DEVICE_TABLE(of, mhdp_ids);
2654
2655 static struct platform_driver mhdp_driver = {
2656         .driver = {
2657                 .name           = "cdns-mhdp8546",
2658                 .of_match_table = mhdp_ids,
2659         },
2660         .probe  = cdns_mhdp_probe,
2661         .remove_new = cdns_mhdp_remove,
2662 };
2663 module_platform_driver(mhdp_driver);
2664
2665 MODULE_FIRMWARE(FW_NAME);
2666
2667 MODULE_AUTHOR("Quentin Schulz <[email protected]>");
2668 MODULE_AUTHOR("Swapnil Jakhade <[email protected]>");
2669 MODULE_AUTHOR("Yuti Amonkar <[email protected]>");
2670 MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
2671 MODULE_AUTHOR("Jyri Sarha <[email protected]>");
2672 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2673 MODULE_LICENSE("GPL");
2674 MODULE_ALIAS("platform:cdns-mhdp8546");
This page took 0.196267 seconds and 4 git commands to generate.