]> Git Repo - linux.git/blob - drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
Merge tag 'devicetree-fixes-for-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / drivers / gpu / drm / bridge / cadence / cdns-mhdp8546-core.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Cadence MHDP8546 DP bridge driver.
4  *
5  * Copyright (C) 2020 Cadence Design Systems, Inc.
6  *
7  * Authors: Quentin Schulz <[email protected]>
8  *          Swapnil Jakhade <[email protected]>
9  *          Yuti Amonkar <[email protected]>
10  *          Tomi Valkeinen <[email protected]>
11  *          Jyri Sarha <[email protected]>
12  *
13  * TODO:
14  *     - Implement optimized mailbox communication using mailbox interrupts
15  *     - Add support for power management
16  *     - Add support for features like audio, MST and fast link training
17  *     - Implement request_fw_cancel to handle HW_STATE
18  *     - Fix asynchronous loading of firmware implementation
19  *     - Add DRM helper function for cdns_mhdp_lower_link_rate
20  */
21
22 #include <linux/clk.h>
23 #include <linux/delay.h>
24 #include <linux/err.h>
25 #include <linux/firmware.h>
26 #include <linux/io.h>
27 #include <linux/iopoll.h>
28 #include <linux/irq.h>
29 #include <linux/media-bus-format.h>
30 #include <linux/module.h>
31 #include <linux/of.h>
32 #include <linux/of_device.h>
33 #include <linux/phy/phy.h>
34 #include <linux/phy/phy-dp.h>
35 #include <linux/platform_device.h>
36 #include <linux/slab.h>
37 #include <linux/wait.h>
38
39 #include <drm/display/drm_dp_helper.h>
40 #include <drm/display/drm_hdcp_helper.h>
41 #include <drm/drm_atomic.h>
42 #include <drm/drm_atomic_helper.h>
43 #include <drm/drm_atomic_state_helper.h>
44 #include <drm/drm_bridge.h>
45 #include <drm/drm_connector.h>
46 #include <drm/drm_edid.h>
47 #include <drm/drm_modeset_helper_vtables.h>
48 #include <drm/drm_print.h>
49 #include <drm/drm_probe_helper.h>
50
51 #include <asm/unaligned.h>
52
53 #include "cdns-mhdp8546-core.h"
54 #include "cdns-mhdp8546-hdcp.h"
55 #include "cdns-mhdp8546-j721e.h"
56
57 static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
58 {
59         int ret, empty;
60
61         WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
62
63         ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_EMPTY,
64                                  empty, !empty, MAILBOX_RETRY_US,
65                                  MAILBOX_TIMEOUT_US);
66         if (ret < 0)
67                 return ret;
68
69         return readl(mhdp->regs + CDNS_MAILBOX_RX_DATA) & 0xff;
70 }
71
72 static int cdns_mhdp_mailbox_write(struct cdns_mhdp_device *mhdp, u8 val)
73 {
74         int ret, full;
75
76         WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
77
78         ret = readx_poll_timeout(readl, mhdp->regs + CDNS_MAILBOX_FULL,
79                                  full, !full, MAILBOX_RETRY_US,
80                                  MAILBOX_TIMEOUT_US);
81         if (ret < 0)
82                 return ret;
83
84         writel(val, mhdp->regs + CDNS_MAILBOX_TX_DATA);
85
86         return 0;
87 }
88
89 static int cdns_mhdp_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
90                                          u8 module_id, u8 opcode,
91                                          u16 req_size)
92 {
93         u32 mbox_size, i;
94         u8 header[4];
95         int ret;
96
97         /* read the header of the message */
98         for (i = 0; i < sizeof(header); i++) {
99                 ret = cdns_mhdp_mailbox_read(mhdp);
100                 if (ret < 0)
101                         return ret;
102
103                 header[i] = ret;
104         }
105
106         mbox_size = get_unaligned_be16(header + 2);
107
108         if (opcode != header[0] || module_id != header[1] ||
109             req_size != mbox_size) {
110                 /*
111                  * If the message in mailbox is not what we want, we need to
112                  * clear the mailbox by reading its contents.
113                  */
114                 for (i = 0; i < mbox_size; i++)
115                         if (cdns_mhdp_mailbox_read(mhdp) < 0)
116                                 break;
117
118                 return -EINVAL;
119         }
120
121         return 0;
122 }
123
124 static int cdns_mhdp_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
125                                        u8 *buff, u16 buff_size)
126 {
127         u32 i;
128         int ret;
129
130         for (i = 0; i < buff_size; i++) {
131                 ret = cdns_mhdp_mailbox_read(mhdp);
132                 if (ret < 0)
133                         return ret;
134
135                 buff[i] = ret;
136         }
137
138         return 0;
139 }
140
141 static int cdns_mhdp_mailbox_send(struct cdns_mhdp_device *mhdp, u8 module_id,
142                                   u8 opcode, u16 size, u8 *message)
143 {
144         u8 header[4];
145         int ret, i;
146
147         header[0] = opcode;
148         header[1] = module_id;
149         put_unaligned_be16(size, header + 2);
150
151         for (i = 0; i < sizeof(header); i++) {
152                 ret = cdns_mhdp_mailbox_write(mhdp, header[i]);
153                 if (ret)
154                         return ret;
155         }
156
157         for (i = 0; i < size; i++) {
158                 ret = cdns_mhdp_mailbox_write(mhdp, message[i]);
159                 if (ret)
160                         return ret;
161         }
162
163         return 0;
164 }
165
166 static
167 int cdns_mhdp_reg_read(struct cdns_mhdp_device *mhdp, u32 addr, u32 *value)
168 {
169         u8 msg[4], resp[8];
170         int ret;
171
172         put_unaligned_be32(addr, msg);
173
174         mutex_lock(&mhdp->mbox_mutex);
175
176         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_GENERAL,
177                                      GENERAL_REGISTER_READ,
178                                      sizeof(msg), msg);
179         if (ret)
180                 goto out;
181
182         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_GENERAL,
183                                             GENERAL_REGISTER_READ,
184                                             sizeof(resp));
185         if (ret)
186                 goto out;
187
188         ret = cdns_mhdp_mailbox_recv_data(mhdp, resp, sizeof(resp));
189         if (ret)
190                 goto out;
191
192         /* Returned address value should be the same as requested */
193         if (memcmp(msg, resp, sizeof(msg))) {
194                 ret = -EINVAL;
195                 goto out;
196         }
197
198         *value = get_unaligned_be32(resp + 4);
199
200 out:
201         mutex_unlock(&mhdp->mbox_mutex);
202         if (ret) {
203                 dev_err(mhdp->dev, "Failed to read register\n");
204                 *value = 0;
205         }
206
207         return ret;
208 }
209
210 static
211 int cdns_mhdp_reg_write(struct cdns_mhdp_device *mhdp, u16 addr, u32 val)
212 {
213         u8 msg[6];
214         int ret;
215
216         put_unaligned_be16(addr, msg);
217         put_unaligned_be32(val, msg + 2);
218
219         mutex_lock(&mhdp->mbox_mutex);
220
221         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
222                                      DPTX_WRITE_REGISTER, sizeof(msg), msg);
223
224         mutex_unlock(&mhdp->mbox_mutex);
225
226         return ret;
227 }
228
229 static
230 int cdns_mhdp_reg_write_bit(struct cdns_mhdp_device *mhdp, u16 addr,
231                             u8 start_bit, u8 bits_no, u32 val)
232 {
233         u8 field[8];
234         int ret;
235
236         put_unaligned_be16(addr, field);
237         field[2] = start_bit;
238         field[3] = bits_no;
239         put_unaligned_be32(val, field + 4);
240
241         mutex_lock(&mhdp->mbox_mutex);
242
243         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
244                                      DPTX_WRITE_FIELD, sizeof(field), field);
245
246         mutex_unlock(&mhdp->mbox_mutex);
247
248         return ret;
249 }
250
251 static
252 int cdns_mhdp_dpcd_read(struct cdns_mhdp_device *mhdp,
253                         u32 addr, u8 *data, u16 len)
254 {
255         u8 msg[5], reg[5];
256         int ret;
257
258         put_unaligned_be16(len, msg);
259         put_unaligned_be24(addr, msg + 2);
260
261         mutex_lock(&mhdp->mbox_mutex);
262
263         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
264                                      DPTX_READ_DPCD, sizeof(msg), msg);
265         if (ret)
266                 goto out;
267
268         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
269                                             DPTX_READ_DPCD,
270                                             sizeof(reg) + len);
271         if (ret)
272                 goto out;
273
274         ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
275         if (ret)
276                 goto out;
277
278         ret = cdns_mhdp_mailbox_recv_data(mhdp, data, len);
279
280 out:
281         mutex_unlock(&mhdp->mbox_mutex);
282
283         return ret;
284 }
285
286 static
287 int cdns_mhdp_dpcd_write(struct cdns_mhdp_device *mhdp, u32 addr, u8 value)
288 {
289         u8 msg[6], reg[5];
290         int ret;
291
292         put_unaligned_be16(1, msg);
293         put_unaligned_be24(addr, msg + 2);
294         msg[5] = value;
295
296         mutex_lock(&mhdp->mbox_mutex);
297
298         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
299                                      DPTX_WRITE_DPCD, sizeof(msg), msg);
300         if (ret)
301                 goto out;
302
303         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
304                                             DPTX_WRITE_DPCD, sizeof(reg));
305         if (ret)
306                 goto out;
307
308         ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
309         if (ret)
310                 goto out;
311
312         if (addr != get_unaligned_be24(reg + 2))
313                 ret = -EINVAL;
314
315 out:
316         mutex_unlock(&mhdp->mbox_mutex);
317
318         if (ret)
319                 dev_err(mhdp->dev, "dpcd write failed: %d\n", ret);
320         return ret;
321 }
322
323 static
324 int cdns_mhdp_set_firmware_active(struct cdns_mhdp_device *mhdp, bool enable)
325 {
326         u8 msg[5];
327         int ret, i;
328
329         msg[0] = GENERAL_MAIN_CONTROL;
330         msg[1] = MB_MODULE_ID_GENERAL;
331         msg[2] = 0;
332         msg[3] = 1;
333         msg[4] = enable ? FW_ACTIVE : FW_STANDBY;
334
335         mutex_lock(&mhdp->mbox_mutex);
336
337         for (i = 0; i < sizeof(msg); i++) {
338                 ret = cdns_mhdp_mailbox_write(mhdp, msg[i]);
339                 if (ret)
340                         goto out;
341         }
342
343         /* read the firmware state */
344         ret = cdns_mhdp_mailbox_recv_data(mhdp, msg, sizeof(msg));
345         if (ret)
346                 goto out;
347
348         ret = 0;
349
350 out:
351         mutex_unlock(&mhdp->mbox_mutex);
352
353         if (ret < 0)
354                 dev_err(mhdp->dev, "set firmware active failed\n");
355         return ret;
356 }
357
358 static
359 int cdns_mhdp_get_hpd_status(struct cdns_mhdp_device *mhdp)
360 {
361         u8 status;
362         int ret;
363
364         mutex_lock(&mhdp->mbox_mutex);
365
366         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
367                                      DPTX_HPD_STATE, 0, NULL);
368         if (ret)
369                 goto err_get_hpd;
370
371         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
372                                             DPTX_HPD_STATE,
373                                             sizeof(status));
374         if (ret)
375                 goto err_get_hpd;
376
377         ret = cdns_mhdp_mailbox_recv_data(mhdp, &status, sizeof(status));
378         if (ret)
379                 goto err_get_hpd;
380
381         mutex_unlock(&mhdp->mbox_mutex);
382
383         dev_dbg(mhdp->dev, "%s: HPD %splugged\n", __func__,
384                 status ? "" : "un");
385
386         return status;
387
388 err_get_hpd:
389         mutex_unlock(&mhdp->mbox_mutex);
390
391         return ret;
392 }
393
394 static
395 int cdns_mhdp_get_edid_block(void *data, u8 *edid,
396                              unsigned int block, size_t length)
397 {
398         struct cdns_mhdp_device *mhdp = data;
399         u8 msg[2], reg[2], i;
400         int ret;
401
402         mutex_lock(&mhdp->mbox_mutex);
403
404         for (i = 0; i < 4; i++) {
405                 msg[0] = block / 2;
406                 msg[1] = block % 2;
407
408                 ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
409                                              DPTX_GET_EDID, sizeof(msg), msg);
410                 if (ret)
411                         continue;
412
413                 ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
414                                                     DPTX_GET_EDID,
415                                                     sizeof(reg) + length);
416                 if (ret)
417                         continue;
418
419                 ret = cdns_mhdp_mailbox_recv_data(mhdp, reg, sizeof(reg));
420                 if (ret)
421                         continue;
422
423                 ret = cdns_mhdp_mailbox_recv_data(mhdp, edid, length);
424                 if (ret)
425                         continue;
426
427                 if (reg[0] == length && reg[1] == block / 2)
428                         break;
429         }
430
431         mutex_unlock(&mhdp->mbox_mutex);
432
433         if (ret)
434                 dev_err(mhdp->dev, "get block[%d] edid failed: %d\n",
435                         block, ret);
436
437         return ret;
438 }
439
440 static
441 int cdns_mhdp_read_hpd_event(struct cdns_mhdp_device *mhdp)
442 {
443         u8 event = 0;
444         int ret;
445
446         mutex_lock(&mhdp->mbox_mutex);
447
448         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
449                                      DPTX_READ_EVENT, 0, NULL);
450         if (ret)
451                 goto out;
452
453         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
454                                             DPTX_READ_EVENT, sizeof(event));
455         if (ret < 0)
456                 goto out;
457
458         ret = cdns_mhdp_mailbox_recv_data(mhdp, &event, sizeof(event));
459 out:
460         mutex_unlock(&mhdp->mbox_mutex);
461
462         if (ret < 0)
463                 return ret;
464
465         dev_dbg(mhdp->dev, "%s: %s%s%s%s\n", __func__,
466                 (event & DPTX_READ_EVENT_HPD_TO_HIGH) ? "TO_HIGH " : "",
467                 (event & DPTX_READ_EVENT_HPD_TO_LOW) ? "TO_LOW " : "",
468                 (event & DPTX_READ_EVENT_HPD_PULSE) ? "PULSE " : "",
469                 (event & DPTX_READ_EVENT_HPD_STATE) ? "HPD_STATE " : "");
470
471         return event;
472 }
473
474 static
475 int cdns_mhdp_adjust_lt(struct cdns_mhdp_device *mhdp, unsigned int nlanes,
476                         unsigned int udelay, const u8 *lanes_data,
477                         u8 link_status[DP_LINK_STATUS_SIZE])
478 {
479         u8 payload[7];
480         u8 hdr[5]; /* For DPCD read response header */
481         u32 addr;
482         int ret;
483
484         if (nlanes != 4 && nlanes != 2 && nlanes != 1) {
485                 dev_err(mhdp->dev, "invalid number of lanes: %u\n", nlanes);
486                 ret = -EINVAL;
487                 goto out;
488         }
489
490         payload[0] = nlanes;
491         put_unaligned_be16(udelay, payload + 1);
492         memcpy(payload + 3, lanes_data, nlanes);
493
494         mutex_lock(&mhdp->mbox_mutex);
495
496         ret = cdns_mhdp_mailbox_send(mhdp, MB_MODULE_ID_DP_TX,
497                                      DPTX_ADJUST_LT,
498                                      sizeof(payload), payload);
499         if (ret)
500                 goto out;
501
502         /* Yes, read the DPCD read command response */
503         ret = cdns_mhdp_mailbox_recv_header(mhdp, MB_MODULE_ID_DP_TX,
504                                             DPTX_READ_DPCD,
505                                             sizeof(hdr) + DP_LINK_STATUS_SIZE);
506         if (ret)
507                 goto out;
508
509         ret = cdns_mhdp_mailbox_recv_data(mhdp, hdr, sizeof(hdr));
510         if (ret)
511                 goto out;
512
513         addr = get_unaligned_be24(hdr + 2);
514         if (addr != DP_LANE0_1_STATUS)
515                 goto out;
516
517         ret = cdns_mhdp_mailbox_recv_data(mhdp, link_status,
518                                           DP_LINK_STATUS_SIZE);
519
520 out:
521         mutex_unlock(&mhdp->mbox_mutex);
522
523         if (ret)
524                 dev_err(mhdp->dev, "Failed to adjust Link Training.\n");
525
526         return ret;
527 }
528
529 /**
530  * cdns_mhdp_link_power_up() - power up a DisplayPort link
531  * @aux: DisplayPort AUX channel
532  * @link: pointer to a structure containing the link configuration
533  *
534  * Returns 0 on success or a negative error code on failure.
535  */
536 static
537 int cdns_mhdp_link_power_up(struct drm_dp_aux *aux, struct cdns_mhdp_link *link)
538 {
539         u8 value;
540         int err;
541
542         /* DP_SET_POWER register is only available on DPCD v1.1 and later */
543         if (link->revision < 0x11)
544                 return 0;
545
546         err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
547         if (err < 0)
548                 return err;
549
550         value &= ~DP_SET_POWER_MASK;
551         value |= DP_SET_POWER_D0;
552
553         err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
554         if (err < 0)
555                 return err;
556
557         /*
558          * According to the DP 1.1 specification, a "Sink Device must exit the
559          * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink
560          * Control Field" (register 0x600).
561          */
562         usleep_range(1000, 2000);
563
564         return 0;
565 }
566
567 /**
568  * cdns_mhdp_link_power_down() - power down a DisplayPort link
569  * @aux: DisplayPort AUX channel
570  * @link: pointer to a structure containing the link configuration
571  *
572  * Returns 0 on success or a negative error code on failure.
573  */
574 static
575 int cdns_mhdp_link_power_down(struct drm_dp_aux *aux,
576                               struct cdns_mhdp_link *link)
577 {
578         u8 value;
579         int err;
580
581         /* DP_SET_POWER register is only available on DPCD v1.1 and later */
582         if (link->revision < 0x11)
583                 return 0;
584
585         err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
586         if (err < 0)
587                 return err;
588
589         value &= ~DP_SET_POWER_MASK;
590         value |= DP_SET_POWER_D3;
591
592         err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
593         if (err < 0)
594                 return err;
595
596         return 0;
597 }
598
599 /**
600  * cdns_mhdp_link_configure() - configure a DisplayPort link
601  * @aux: DisplayPort AUX channel
602  * @link: pointer to a structure containing the link configuration
603  *
604  * Returns 0 on success or a negative error code on failure.
605  */
606 static
607 int cdns_mhdp_link_configure(struct drm_dp_aux *aux,
608                              struct cdns_mhdp_link *link)
609 {
610         u8 values[2];
611         int err;
612
613         values[0] = drm_dp_link_rate_to_bw_code(link->rate);
614         values[1] = link->num_lanes;
615
616         if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
617                 values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
618
619         err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
620         if (err < 0)
621                 return err;
622
623         return 0;
624 }
625
626 static unsigned int cdns_mhdp_max_link_rate(struct cdns_mhdp_device *mhdp)
627 {
628         return min(mhdp->host.link_rate, mhdp->sink.link_rate);
629 }
630
631 static u8 cdns_mhdp_max_num_lanes(struct cdns_mhdp_device *mhdp)
632 {
633         return min(mhdp->sink.lanes_cnt, mhdp->host.lanes_cnt);
634 }
635
636 static u8 cdns_mhdp_eq_training_pattern_supported(struct cdns_mhdp_device *mhdp)
637 {
638         return fls(mhdp->host.pattern_supp & mhdp->sink.pattern_supp);
639 }
640
641 static bool cdns_mhdp_get_ssc_supported(struct cdns_mhdp_device *mhdp)
642 {
643         /* Check if SSC is supported by both sides */
644         return mhdp->host.ssc && mhdp->sink.ssc;
645 }
646
647 static enum drm_connector_status cdns_mhdp_detect(struct cdns_mhdp_device *mhdp)
648 {
649         dev_dbg(mhdp->dev, "%s: %d\n", __func__, mhdp->plugged);
650
651         if (mhdp->plugged)
652                 return connector_status_connected;
653         else
654                 return connector_status_disconnected;
655 }
656
657 static int cdns_mhdp_check_fw_version(struct cdns_mhdp_device *mhdp)
658 {
659         u32 major_num, minor_num, revision;
660         u32 fw_ver, lib_ver;
661
662         fw_ver = (readl(mhdp->regs + CDNS_VER_H) << 8)
663                | readl(mhdp->regs + CDNS_VER_L);
664
665         lib_ver = (readl(mhdp->regs + CDNS_LIB_H_ADDR) << 8)
666                 | readl(mhdp->regs + CDNS_LIB_L_ADDR);
667
668         if (lib_ver < 33984) {
669                 /*
670                  * Older FW versions with major number 1, used to store FW
671                  * version information by storing repository revision number
672                  * in registers. This is for identifying these FW versions.
673                  */
674                 major_num = 1;
675                 minor_num = 2;
676                 if (fw_ver == 26098) {
677                         revision = 15;
678                 } else if (lib_ver == 0 && fw_ver == 0) {
679                         revision = 17;
680                 } else {
681                         dev_err(mhdp->dev, "Unsupported FW version: fw_ver = %u, lib_ver = %u\n",
682                                 fw_ver, lib_ver);
683                         return -ENODEV;
684                 }
685         } else {
686                 /* To identify newer FW versions with major number 2 onwards. */
687                 major_num = fw_ver / 10000;
688                 minor_num = (fw_ver / 100) % 100;
689                 revision = (fw_ver % 10000) % 100;
690         }
691
692         dev_dbg(mhdp->dev, "FW version: v%u.%u.%u\n", major_num, minor_num,
693                 revision);
694         return 0;
695 }
696
697 static int cdns_mhdp_fw_activate(const struct firmware *fw,
698                                  struct cdns_mhdp_device *mhdp)
699 {
700         unsigned int reg;
701         int ret;
702
703         /* Release uCPU reset and stall it. */
704         writel(CDNS_CPU_STALL, mhdp->regs + CDNS_APB_CTRL);
705
706         memcpy_toio(mhdp->regs + CDNS_MHDP_IMEM, fw->data, fw->size);
707
708         /* Leave debug mode, release stall */
709         writel(0, mhdp->regs + CDNS_APB_CTRL);
710
711         /*
712          * Wait for the KEEP_ALIVE "message" on the first 8 bits.
713          * Updated each sched "tick" (~2ms)
714          */
715         ret = readl_poll_timeout(mhdp->regs + CDNS_KEEP_ALIVE, reg,
716                                  reg & CDNS_KEEP_ALIVE_MASK, 500,
717                                  CDNS_KEEP_ALIVE_TIMEOUT);
718         if (ret) {
719                 dev_err(mhdp->dev,
720                         "device didn't give any life sign: reg %d\n", reg);
721                 return ret;
722         }
723
724         ret = cdns_mhdp_check_fw_version(mhdp);
725         if (ret)
726                 return ret;
727
728         /* Init events to 0 as it's not cleared by FW at boot but on read */
729         readl(mhdp->regs + CDNS_SW_EVENT0);
730         readl(mhdp->regs + CDNS_SW_EVENT1);
731         readl(mhdp->regs + CDNS_SW_EVENT2);
732         readl(mhdp->regs + CDNS_SW_EVENT3);
733
734         /* Activate uCPU */
735         ret = cdns_mhdp_set_firmware_active(mhdp, true);
736         if (ret)
737                 return ret;
738
739         spin_lock(&mhdp->start_lock);
740
741         mhdp->hw_state = MHDP_HW_READY;
742
743         /*
744          * Here we must keep the lock while enabling the interrupts
745          * since it would otherwise be possible that interrupt enable
746          * code is executed after the bridge is detached. The similar
747          * situation is not possible in attach()/detach() callbacks
748          * since the hw_state changes from MHDP_HW_READY to
749          * MHDP_HW_STOPPED happens only due to driver removal when
750          * bridge should already be detached.
751          */
752         if (mhdp->bridge_attached)
753                 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
754                        mhdp->regs + CDNS_APB_INT_MASK);
755
756         spin_unlock(&mhdp->start_lock);
757
758         wake_up(&mhdp->fw_load_wq);
759         dev_dbg(mhdp->dev, "DP FW activated\n");
760
761         return 0;
762 }
763
764 static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
765 {
766         struct cdns_mhdp_device *mhdp = context;
767         bool bridge_attached;
768         int ret;
769
770         dev_dbg(mhdp->dev, "firmware callback\n");
771
772         if (!fw || !fw->data) {
773                 dev_err(mhdp->dev, "%s: No firmware.\n", __func__);
774                 return;
775         }
776
777         ret = cdns_mhdp_fw_activate(fw, mhdp);
778
779         release_firmware(fw);
780
781         if (ret)
782                 return;
783
784         /*
785          *  XXX how to make sure the bridge is still attached when
786          *      calling drm_kms_helper_hotplug_event() after releasing
787          *      the lock? We should not hold the spin lock when
788          *      calling drm_kms_helper_hotplug_event() since it may
789          *      cause a dead lock. FB-dev console calls detect from the
790          *      same thread just down the call stack started here.
791          */
792         spin_lock(&mhdp->start_lock);
793         bridge_attached = mhdp->bridge_attached;
794         spin_unlock(&mhdp->start_lock);
795         if (bridge_attached) {
796                 if (mhdp->connector.dev)
797                         drm_kms_helper_hotplug_event(mhdp->bridge.dev);
798                 else
799                         drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
800         }
801 }
802
803 static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
804 {
805         int ret;
806
807         ret = request_firmware_nowait(THIS_MODULE, true, FW_NAME, mhdp->dev,
808                                       GFP_KERNEL, mhdp, cdns_mhdp_fw_cb);
809         if (ret) {
810                 dev_err(mhdp->dev, "failed to load firmware (%s), ret: %d\n",
811                         FW_NAME, ret);
812                 return ret;
813         }
814
815         return 0;
816 }
817
818 static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
819                                   struct drm_dp_aux_msg *msg)
820 {
821         struct cdns_mhdp_device *mhdp = dev_get_drvdata(aux->dev);
822         int ret;
823
824         if (msg->request != DP_AUX_NATIVE_WRITE &&
825             msg->request != DP_AUX_NATIVE_READ)
826                 return -EOPNOTSUPP;
827
828         if (msg->request == DP_AUX_NATIVE_WRITE) {
829                 const u8 *buf = msg->buffer;
830                 unsigned int i;
831
832                 for (i = 0; i < msg->size; ++i) {
833                         ret = cdns_mhdp_dpcd_write(mhdp,
834                                                    msg->address + i, buf[i]);
835                         if (!ret)
836                                 continue;
837
838                         dev_err(mhdp->dev,
839                                 "Failed to write DPCD addr %u\n",
840                                 msg->address + i);
841
842                         return ret;
843                 }
844         } else {
845                 ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
846                                           msg->buffer, msg->size);
847                 if (ret) {
848                         dev_err(mhdp->dev,
849                                 "Failed to read DPCD addr %u\n",
850                                 msg->address);
851
852                         return ret;
853                 }
854         }
855
856         return msg->size;
857 }
858
859 static int cdns_mhdp_link_training_init(struct cdns_mhdp_device *mhdp)
860 {
861         union phy_configure_opts phy_cfg;
862         u32 reg32;
863         int ret;
864
865         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
866                            DP_TRAINING_PATTERN_DISABLE);
867
868         /* Reset PHY configuration */
869         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
870         if (!mhdp->host.scrambler)
871                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
872
873         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
874
875         cdns_mhdp_reg_write(mhdp, CDNS_DP_ENHNCD,
876                             mhdp->sink.enhanced & mhdp->host.enhanced);
877
878         cdns_mhdp_reg_write(mhdp, CDNS_DP_LANE_EN,
879                             CDNS_DP_LANE_EN_LANES(mhdp->link.num_lanes));
880
881         cdns_mhdp_link_configure(&mhdp->aux, &mhdp->link);
882         phy_cfg.dp.link_rate = mhdp->link.rate / 100;
883         phy_cfg.dp.lanes = mhdp->link.num_lanes;
884
885         memset(phy_cfg.dp.voltage, 0, sizeof(phy_cfg.dp.voltage));
886         memset(phy_cfg.dp.pre, 0, sizeof(phy_cfg.dp.pre));
887
888         phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
889         phy_cfg.dp.set_lanes = true;
890         phy_cfg.dp.set_rate = true;
891         phy_cfg.dp.set_voltages = true;
892         ret = phy_configure(mhdp->phy,  &phy_cfg);
893         if (ret) {
894                 dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
895                         __func__, ret);
896                 return ret;
897         }
898
899         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG,
900                             CDNS_PHY_COMMON_CONFIG |
901                             CDNS_PHY_TRAINING_EN |
902                             CDNS_PHY_TRAINING_TYPE(1) |
903                             CDNS_PHY_SCRAMBLER_BYPASS);
904
905         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
906                            DP_TRAINING_PATTERN_1 | DP_LINK_SCRAMBLING_DISABLE);
907
908         return 0;
909 }
910
911 static void cdns_mhdp_get_adjust_train(struct cdns_mhdp_device *mhdp,
912                                        u8 link_status[DP_LINK_STATUS_SIZE],
913                                        u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
914                                        union phy_configure_opts *phy_cfg)
915 {
916         u8 adjust, max_pre_emph, max_volt_swing;
917         u8 set_volt, set_pre;
918         unsigned int i;
919
920         max_pre_emph = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis)
921                            << DP_TRAIN_PRE_EMPHASIS_SHIFT;
922         max_volt_swing = CDNS_VOLT_SWING(mhdp->host.volt_swing);
923
924         for (i = 0; i < mhdp->link.num_lanes; i++) {
925                 /* Check if Voltage swing and pre-emphasis are within limits */
926                 adjust = drm_dp_get_adjust_request_voltage(link_status, i);
927                 set_volt = min(adjust, max_volt_swing);
928
929                 adjust = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
930                 set_pre = min(adjust, max_pre_emph)
931                           >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
932
933                 /*
934                  * Voltage swing level and pre-emphasis level combination is
935                  * not allowed: leaving pre-emphasis as-is, and adjusting
936                  * voltage swing.
937                  */
938                 if (set_volt + set_pre > 3)
939                         set_volt = 3 - set_pre;
940
941                 phy_cfg->dp.voltage[i] = set_volt;
942                 lanes_data[i] = set_volt;
943
944                 if (set_volt == max_volt_swing)
945                         lanes_data[i] |= DP_TRAIN_MAX_SWING_REACHED;
946
947                 phy_cfg->dp.pre[i] = set_pre;
948                 lanes_data[i] |= (set_pre << DP_TRAIN_PRE_EMPHASIS_SHIFT);
949
950                 if (set_pre == (max_pre_emph >> DP_TRAIN_PRE_EMPHASIS_SHIFT))
951                         lanes_data[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
952         }
953 }
954
955 static
956 void cdns_mhdp_set_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
957                                           unsigned int lane, u8 volt)
958 {
959         unsigned int s = ((lane & 1) ?
960                           DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
961                           DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
962         unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
963
964         link_status[idx] &= ~(DP_ADJUST_VOLTAGE_SWING_LANE0_MASK << s);
965         link_status[idx] |= volt << s;
966 }
967
968 static
969 void cdns_mhdp_set_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
970                                                unsigned int lane, u8 pre_emphasis)
971 {
972         unsigned int s = ((lane & 1) ?
973                           DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
974                           DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
975         unsigned int idx = DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS + (lane >> 1);
976
977         link_status[idx] &= ~(DP_ADJUST_PRE_EMPHASIS_LANE0_MASK << s);
978         link_status[idx] |= pre_emphasis << s;
979 }
980
981 static void cdns_mhdp_adjust_requested_eq(struct cdns_mhdp_device *mhdp,
982                                           u8 link_status[DP_LINK_STATUS_SIZE])
983 {
984         u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
985         u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
986         unsigned int i;
987         u8 volt, pre;
988
989         for (i = 0; i < mhdp->link.num_lanes; i++) {
990                 volt = drm_dp_get_adjust_request_voltage(link_status, i);
991                 pre = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
992                 if (volt + pre > 3)
993                         cdns_mhdp_set_adjust_request_voltage(link_status, i,
994                                                              3 - pre);
995                 if (mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING)
996                         cdns_mhdp_set_adjust_request_voltage(link_status, i,
997                                                              max_volt);
998                 if (mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS)
999                         cdns_mhdp_set_adjust_request_pre_emphasis(link_status,
1000                                                                   i, max_pre);
1001         }
1002 }
1003
1004 static void cdns_mhdp_print_lt_status(const char *prefix,
1005                                       struct cdns_mhdp_device *mhdp,
1006                                       union phy_configure_opts *phy_cfg)
1007 {
1008         char vs[8] = "0/0/0/0";
1009         char pe[8] = "0/0/0/0";
1010         unsigned int i;
1011
1012         for (i = 0; i < mhdp->link.num_lanes; i++) {
1013                 vs[i * 2] = '0' + phy_cfg->dp.voltage[i];
1014                 pe[i * 2] = '0' + phy_cfg->dp.pre[i];
1015         }
1016
1017         vs[i * 2 - 1] = '\0';
1018         pe[i * 2 - 1] = '\0';
1019
1020         dev_dbg(mhdp->dev, "%s, %u lanes, %u Mbps, vs %s, pe %s\n",
1021                 prefix,
1022                 mhdp->link.num_lanes, mhdp->link.rate / 100,
1023                 vs, pe);
1024 }
1025
1026 static bool cdns_mhdp_link_training_channel_eq(struct cdns_mhdp_device *mhdp,
1027                                                u8 eq_tps,
1028                                                unsigned int training_interval)
1029 {
1030         u8 lanes_data[CDNS_DP_MAX_NUM_LANES], fail_counter_short = 0;
1031         u8 link_status[DP_LINK_STATUS_SIZE];
1032         union phy_configure_opts phy_cfg;
1033         u32 reg32;
1034         int ret;
1035         bool r;
1036
1037         dev_dbg(mhdp->dev, "Starting EQ phase\n");
1038
1039         /* Enable link training TPS[eq_tps] in PHY */
1040         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_EN |
1041                 CDNS_PHY_TRAINING_TYPE(eq_tps);
1042         if (eq_tps != 4)
1043                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1044         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1045
1046         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1047                            (eq_tps != 4) ? eq_tps | DP_LINK_SCRAMBLING_DISABLE :
1048                            CDNS_DP_TRAINING_PATTERN_4);
1049
1050         drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1051
1052         do {
1053                 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1054                                            &phy_cfg);
1055                 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1056                 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1057                 phy_cfg.dp.set_lanes = false;
1058                 phy_cfg.dp.set_rate = false;
1059                 phy_cfg.dp.set_voltages = true;
1060                 ret = phy_configure(mhdp->phy,  &phy_cfg);
1061                 if (ret) {
1062                         dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1063                                 __func__, ret);
1064                         goto err;
1065                 }
1066
1067                 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes,
1068                                     training_interval, lanes_data, link_status);
1069
1070                 r = drm_dp_clock_recovery_ok(link_status, mhdp->link.num_lanes);
1071                 if (!r)
1072                         goto err;
1073
1074                 if (drm_dp_channel_eq_ok(link_status, mhdp->link.num_lanes)) {
1075                         cdns_mhdp_print_lt_status("EQ phase ok", mhdp,
1076                                                   &phy_cfg);
1077                         return true;
1078                 }
1079
1080                 fail_counter_short++;
1081
1082                 cdns_mhdp_adjust_requested_eq(mhdp, link_status);
1083         } while (fail_counter_short < 5);
1084
1085 err:
1086         cdns_mhdp_print_lt_status("EQ phase failed", mhdp, &phy_cfg);
1087
1088         return false;
1089 }
1090
1091 static void cdns_mhdp_adjust_requested_cr(struct cdns_mhdp_device *mhdp,
1092                                           u8 link_status[DP_LINK_STATUS_SIZE],
1093                                           u8 *req_volt, u8 *req_pre)
1094 {
1095         const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1096         const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1097         unsigned int i;
1098
1099         for (i = 0; i < mhdp->link.num_lanes; i++) {
1100                 u8 val;
1101
1102                 val = mhdp->host.volt_swing & CDNS_FORCE_VOLT_SWING ?
1103                       max_volt : req_volt[i];
1104                 cdns_mhdp_set_adjust_request_voltage(link_status, i, val);
1105
1106                 val = mhdp->host.pre_emphasis & CDNS_FORCE_PRE_EMPHASIS ?
1107                       max_pre : req_pre[i];
1108                 cdns_mhdp_set_adjust_request_pre_emphasis(link_status, i, val);
1109         }
1110 }
1111
1112 static
1113 void cdns_mhdp_validate_cr(struct cdns_mhdp_device *mhdp, bool *cr_done,
1114                            bool *same_before_adjust, bool *max_swing_reached,
1115                            u8 before_cr[CDNS_DP_MAX_NUM_LANES],
1116                            u8 after_cr[DP_LINK_STATUS_SIZE], u8 *req_volt,
1117                            u8 *req_pre)
1118 {
1119         const u8 max_volt = CDNS_VOLT_SWING(mhdp->host.volt_swing);
1120         const u8 max_pre = CDNS_PRE_EMPHASIS(mhdp->host.pre_emphasis);
1121         bool same_pre, same_volt;
1122         unsigned int i;
1123         u8 adjust;
1124
1125         *same_before_adjust = false;
1126         *max_swing_reached = false;
1127         *cr_done = drm_dp_clock_recovery_ok(after_cr, mhdp->link.num_lanes);
1128
1129         for (i = 0; i < mhdp->link.num_lanes; i++) {
1130                 adjust = drm_dp_get_adjust_request_voltage(after_cr, i);
1131                 req_volt[i] = min(adjust, max_volt);
1132
1133                 adjust = drm_dp_get_adjust_request_pre_emphasis(after_cr, i) >>
1134                       DP_TRAIN_PRE_EMPHASIS_SHIFT;
1135                 req_pre[i] = min(adjust, max_pre);
1136
1137                 same_pre = (before_cr[i] & DP_TRAIN_PRE_EMPHASIS_MASK) ==
1138                            req_pre[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1139                 same_volt = (before_cr[i] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
1140                             req_volt[i];
1141                 if (same_pre && same_volt)
1142                         *same_before_adjust = true;
1143
1144                 /* 3.1.5.2 in DP Standard v1.4. Table 3-1 */
1145                 if (!*cr_done && req_volt[i] + req_pre[i] >= 3) {
1146                         *max_swing_reached = true;
1147                         return;
1148                 }
1149         }
1150 }
1151
1152 static bool cdns_mhdp_link_training_cr(struct cdns_mhdp_device *mhdp)
1153 {
1154         u8 lanes_data[CDNS_DP_MAX_NUM_LANES],
1155         fail_counter_short = 0, fail_counter_cr_long = 0;
1156         u8 link_status[DP_LINK_STATUS_SIZE];
1157         bool cr_done;
1158         union phy_configure_opts phy_cfg;
1159         int ret;
1160
1161         dev_dbg(mhdp->dev, "Starting CR phase\n");
1162
1163         ret = cdns_mhdp_link_training_init(mhdp);
1164         if (ret)
1165                 goto err;
1166
1167         drm_dp_dpcd_read_link_status(&mhdp->aux, link_status);
1168
1169         do {
1170                 u8 requested_adjust_volt_swing[CDNS_DP_MAX_NUM_LANES] = {};
1171                 u8 requested_adjust_pre_emphasis[CDNS_DP_MAX_NUM_LANES] = {};
1172                 bool same_before_adjust, max_swing_reached;
1173
1174                 cdns_mhdp_get_adjust_train(mhdp, link_status, lanes_data,
1175                                            &phy_cfg);
1176                 phy_cfg.dp.lanes = mhdp->link.num_lanes;
1177                 phy_cfg.dp.ssc = cdns_mhdp_get_ssc_supported(mhdp);
1178                 phy_cfg.dp.set_lanes = false;
1179                 phy_cfg.dp.set_rate = false;
1180                 phy_cfg.dp.set_voltages = true;
1181                 ret = phy_configure(mhdp->phy,  &phy_cfg);
1182                 if (ret) {
1183                         dev_err(mhdp->dev, "%s: phy_configure() failed: %d\n",
1184                                 __func__, ret);
1185                         goto err;
1186                 }
1187
1188                 cdns_mhdp_adjust_lt(mhdp, mhdp->link.num_lanes, 100,
1189                                     lanes_data, link_status);
1190
1191                 cdns_mhdp_validate_cr(mhdp, &cr_done, &same_before_adjust,
1192                                       &max_swing_reached, lanes_data,
1193                                       link_status,
1194                                       requested_adjust_volt_swing,
1195                                       requested_adjust_pre_emphasis);
1196
1197                 if (max_swing_reached) {
1198                         dev_err(mhdp->dev, "CR: max swing reached\n");
1199                         goto err;
1200                 }
1201
1202                 if (cr_done) {
1203                         cdns_mhdp_print_lt_status("CR phase ok", mhdp,
1204                                                   &phy_cfg);
1205                         return true;
1206                 }
1207
1208                 /* Not all CR_DONE bits set */
1209                 fail_counter_cr_long++;
1210
1211                 if (same_before_adjust) {
1212                         fail_counter_short++;
1213                         continue;
1214                 }
1215
1216                 fail_counter_short = 0;
1217                 /*
1218                  * Voltage swing/pre-emphasis adjust requested
1219                  * during CR phase
1220                  */
1221                 cdns_mhdp_adjust_requested_cr(mhdp, link_status,
1222                                               requested_adjust_volt_swing,
1223                                               requested_adjust_pre_emphasis);
1224         } while (fail_counter_short < 5 && fail_counter_cr_long < 10);
1225
1226 err:
1227         cdns_mhdp_print_lt_status("CR phase failed", mhdp, &phy_cfg);
1228
1229         return false;
1230 }
1231
1232 static void cdns_mhdp_lower_link_rate(struct cdns_mhdp_link *link)
1233 {
1234         switch (drm_dp_link_rate_to_bw_code(link->rate)) {
1235         case DP_LINK_BW_2_7:
1236                 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_1_62);
1237                 break;
1238         case DP_LINK_BW_5_4:
1239                 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_2_7);
1240                 break;
1241         case DP_LINK_BW_8_1:
1242                 link->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
1243                 break;
1244         }
1245 }
1246
1247 static int cdns_mhdp_link_training(struct cdns_mhdp_device *mhdp,
1248                                    unsigned int training_interval)
1249 {
1250         u32 reg32;
1251         const u8 eq_tps = cdns_mhdp_eq_training_pattern_supported(mhdp);
1252         int ret;
1253
1254         while (1) {
1255                 if (!cdns_mhdp_link_training_cr(mhdp)) {
1256                         if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1257                             DP_LINK_BW_1_62) {
1258                                 dev_dbg(mhdp->dev,
1259                                         "Reducing link rate during CR phase\n");
1260                                 cdns_mhdp_lower_link_rate(&mhdp->link);
1261
1262                                 continue;
1263                         } else if (mhdp->link.num_lanes > 1) {
1264                                 dev_dbg(mhdp->dev,
1265                                         "Reducing lanes number during CR phase\n");
1266                                 mhdp->link.num_lanes >>= 1;
1267                                 mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1268
1269                                 continue;
1270                         }
1271
1272                         dev_err(mhdp->dev,
1273                                 "Link training failed during CR phase\n");
1274                         goto err;
1275                 }
1276
1277                 if (cdns_mhdp_link_training_channel_eq(mhdp, eq_tps,
1278                                                        training_interval))
1279                         break;
1280
1281                 if (mhdp->link.num_lanes > 1) {
1282                         dev_dbg(mhdp->dev,
1283                                 "Reducing lanes number during EQ phase\n");
1284                         mhdp->link.num_lanes >>= 1;
1285
1286                         continue;
1287                 } else if (drm_dp_link_rate_to_bw_code(mhdp->link.rate) !=
1288                            DP_LINK_BW_1_62) {
1289                         dev_dbg(mhdp->dev,
1290                                 "Reducing link rate during EQ phase\n");
1291                         cdns_mhdp_lower_link_rate(&mhdp->link);
1292                         mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1293
1294                         continue;
1295                 }
1296
1297                 dev_err(mhdp->dev, "Link training failed during EQ phase\n");
1298                 goto err;
1299         }
1300
1301         dev_dbg(mhdp->dev, "Link training ok. Lanes: %u, Rate %u Mbps\n",
1302                 mhdp->link.num_lanes, mhdp->link.rate / 100);
1303
1304         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1305                            mhdp->host.scrambler ? 0 :
1306                            DP_LINK_SCRAMBLING_DISABLE);
1307
1308         ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &reg32);
1309         if (ret < 0) {
1310                 dev_err(mhdp->dev,
1311                         "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1312                         ret);
1313                 return ret;
1314         }
1315         reg32 &= ~GENMASK(1, 0);
1316         reg32 |= CDNS_DP_NUM_LANES(mhdp->link.num_lanes);
1317         reg32 |= CDNS_DP_WR_FAILING_EDGE_VSYNC;
1318         reg32 |= CDNS_DP_FRAMER_EN;
1319         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, reg32);
1320
1321         /* Reset PHY config */
1322         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1323         if (!mhdp->host.scrambler)
1324                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1325         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1326
1327         return 0;
1328 err:
1329         /* Reset PHY config */
1330         reg32 = CDNS_PHY_COMMON_CONFIG | CDNS_PHY_TRAINING_TYPE(1);
1331         if (!mhdp->host.scrambler)
1332                 reg32 |= CDNS_PHY_SCRAMBLER_BYPASS;
1333         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_PHY_CONFIG, reg32);
1334
1335         drm_dp_dpcd_writeb(&mhdp->aux, DP_TRAINING_PATTERN_SET,
1336                            DP_TRAINING_PATTERN_DISABLE);
1337
1338         return -EIO;
1339 }
1340
1341 static u32 cdns_mhdp_get_training_interval_us(struct cdns_mhdp_device *mhdp,
1342                                               u32 interval)
1343 {
1344         if (interval == 0)
1345                 return 400;
1346         if (interval < 5)
1347                 return 4000 << (interval - 1);
1348         dev_err(mhdp->dev,
1349                 "wrong training interval returned by DPCD: %d\n", interval);
1350         return 0;
1351 }
1352
1353 static void cdns_mhdp_fill_host_caps(struct cdns_mhdp_device *mhdp)
1354 {
1355         unsigned int link_rate;
1356
1357         /* Get source capabilities based on PHY attributes */
1358
1359         mhdp->host.lanes_cnt = mhdp->phy->attrs.bus_width;
1360         if (!mhdp->host.lanes_cnt)
1361                 mhdp->host.lanes_cnt = 4;
1362
1363         link_rate = mhdp->phy->attrs.max_link_rate;
1364         if (!link_rate)
1365                 link_rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_8_1);
1366         else
1367                 /* PHY uses Mb/s, DRM uses tens of kb/s. */
1368                 link_rate *= 100;
1369
1370         mhdp->host.link_rate = link_rate;
1371         mhdp->host.volt_swing = CDNS_VOLT_SWING(3);
1372         mhdp->host.pre_emphasis = CDNS_PRE_EMPHASIS(3);
1373         mhdp->host.pattern_supp = CDNS_SUPPORT_TPS(1) |
1374                                   CDNS_SUPPORT_TPS(2) | CDNS_SUPPORT_TPS(3) |
1375                                   CDNS_SUPPORT_TPS(4);
1376         mhdp->host.lane_mapping = CDNS_LANE_MAPPING_NORMAL;
1377         mhdp->host.fast_link = false;
1378         mhdp->host.enhanced = true;
1379         mhdp->host.scrambler = true;
1380         mhdp->host.ssc = false;
1381 }
1382
1383 static void cdns_mhdp_fill_sink_caps(struct cdns_mhdp_device *mhdp,
1384                                      u8 dpcd[DP_RECEIVER_CAP_SIZE])
1385 {
1386         mhdp->sink.link_rate = mhdp->link.rate;
1387         mhdp->sink.lanes_cnt = mhdp->link.num_lanes;
1388         mhdp->sink.enhanced = !!(mhdp->link.capabilities &
1389                                  DP_LINK_CAP_ENHANCED_FRAMING);
1390
1391         /* Set SSC support */
1392         mhdp->sink.ssc = !!(dpcd[DP_MAX_DOWNSPREAD] &
1393                                   DP_MAX_DOWNSPREAD_0_5);
1394
1395         /* Set TPS support */
1396         mhdp->sink.pattern_supp = CDNS_SUPPORT_TPS(1) | CDNS_SUPPORT_TPS(2);
1397         if (drm_dp_tps3_supported(dpcd))
1398                 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(3);
1399         if (drm_dp_tps4_supported(dpcd))
1400                 mhdp->sink.pattern_supp |= CDNS_SUPPORT_TPS(4);
1401
1402         /* Set fast link support */
1403         mhdp->sink.fast_link = !!(dpcd[DP_MAX_DOWNSPREAD] &
1404                                   DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
1405 }
1406
1407 static int cdns_mhdp_link_up(struct cdns_mhdp_device *mhdp)
1408 {
1409         u8 dpcd[DP_RECEIVER_CAP_SIZE], amp[2];
1410         u32 resp, interval, interval_us;
1411         u8 ext_cap_chk = 0;
1412         unsigned int addr;
1413         int err;
1414
1415         WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1416
1417         drm_dp_dpcd_readb(&mhdp->aux, DP_TRAINING_AUX_RD_INTERVAL,
1418                           &ext_cap_chk);
1419
1420         if (ext_cap_chk & DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)
1421                 addr = DP_DP13_DPCD_REV;
1422         else
1423                 addr = DP_DPCD_REV;
1424
1425         err = drm_dp_dpcd_read(&mhdp->aux, addr, dpcd, DP_RECEIVER_CAP_SIZE);
1426         if (err < 0) {
1427                 dev_err(mhdp->dev, "Failed to read receiver capabilities\n");
1428                 return err;
1429         }
1430
1431         mhdp->link.revision = dpcd[0];
1432         mhdp->link.rate = drm_dp_bw_code_to_link_rate(dpcd[1]);
1433         mhdp->link.num_lanes = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
1434
1435         if (dpcd[2] & DP_ENHANCED_FRAME_CAP)
1436                 mhdp->link.capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
1437
1438         dev_dbg(mhdp->dev, "Set sink device power state via DPCD\n");
1439         cdns_mhdp_link_power_up(&mhdp->aux, &mhdp->link);
1440
1441         cdns_mhdp_fill_sink_caps(mhdp, dpcd);
1442
1443         mhdp->link.rate = cdns_mhdp_max_link_rate(mhdp);
1444         mhdp->link.num_lanes = cdns_mhdp_max_num_lanes(mhdp);
1445
1446         /* Disable framer for link training */
1447         err = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
1448         if (err < 0) {
1449                 dev_err(mhdp->dev,
1450                         "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1451                         err);
1452                 return err;
1453         }
1454
1455         resp &= ~CDNS_DP_FRAMER_EN;
1456         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
1457
1458         /* Spread AMP if required, enable 8b/10b coding */
1459         amp[0] = cdns_mhdp_get_ssc_supported(mhdp) ? DP_SPREAD_AMP_0_5 : 0;
1460         amp[1] = DP_SET_ANSI_8B10B;
1461         drm_dp_dpcd_write(&mhdp->aux, DP_DOWNSPREAD_CTRL, amp, 2);
1462
1463         if (mhdp->host.fast_link & mhdp->sink.fast_link) {
1464                 dev_err(mhdp->dev, "fastlink not supported\n");
1465                 return -EOPNOTSUPP;
1466         }
1467
1468         interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & DP_TRAINING_AUX_RD_MASK;
1469         interval_us = cdns_mhdp_get_training_interval_us(mhdp, interval);
1470         if (!interval_us ||
1471             cdns_mhdp_link_training(mhdp, interval_us)) {
1472                 dev_err(mhdp->dev, "Link training failed. Exiting.\n");
1473                 return -EIO;
1474         }
1475
1476         mhdp->link_up = true;
1477
1478         return 0;
1479 }
1480
1481 static void cdns_mhdp_link_down(struct cdns_mhdp_device *mhdp)
1482 {
1483         WARN_ON(!mutex_is_locked(&mhdp->link_mutex));
1484
1485         if (mhdp->plugged)
1486                 cdns_mhdp_link_power_down(&mhdp->aux, &mhdp->link);
1487
1488         mhdp->link_up = false;
1489 }
1490
1491 static struct edid *cdns_mhdp_get_edid(struct cdns_mhdp_device *mhdp,
1492                                        struct drm_connector *connector)
1493 {
1494         if (!mhdp->plugged)
1495                 return NULL;
1496
1497         return drm_do_get_edid(connector, cdns_mhdp_get_edid_block, mhdp);
1498 }
1499
1500 static int cdns_mhdp_get_modes(struct drm_connector *connector)
1501 {
1502         struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
1503         struct edid *edid;
1504         int num_modes;
1505
1506         if (!mhdp->plugged)
1507                 return 0;
1508
1509         edid = cdns_mhdp_get_edid(mhdp, connector);
1510         if (!edid) {
1511                 dev_err(mhdp->dev, "Failed to read EDID\n");
1512                 return 0;
1513         }
1514
1515         drm_connector_update_edid_property(connector, edid);
1516         num_modes = drm_add_edid_modes(connector, edid);
1517         kfree(edid);
1518
1519         /*
1520          * HACK: Warn about unsupported display formats until we deal
1521          *       with them correctly.
1522          */
1523         if (connector->display_info.color_formats &&
1524             !(connector->display_info.color_formats &
1525               mhdp->display_fmt.color_format))
1526                 dev_warn(mhdp->dev,
1527                          "%s: No supported color_format found (0x%08x)\n",
1528                         __func__, connector->display_info.color_formats);
1529
1530         if (connector->display_info.bpc &&
1531             connector->display_info.bpc < mhdp->display_fmt.bpc)
1532                 dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
1533                          __func__, connector->display_info.bpc,
1534                          mhdp->display_fmt.bpc);
1535
1536         return num_modes;
1537 }
1538
1539 static int cdns_mhdp_connector_detect(struct drm_connector *conn,
1540                                       struct drm_modeset_acquire_ctx *ctx,
1541                                       bool force)
1542 {
1543         struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1544
1545         return cdns_mhdp_detect(mhdp);
1546 }
1547
1548 static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
1549 {
1550         u32 bpp;
1551
1552         if (fmt->y_only)
1553                 return fmt->bpc;
1554
1555         switch (fmt->color_format) {
1556         case DRM_COLOR_FORMAT_RGB444:
1557         case DRM_COLOR_FORMAT_YCBCR444:
1558                 bpp = fmt->bpc * 3;
1559                 break;
1560         case DRM_COLOR_FORMAT_YCBCR422:
1561                 bpp = fmt->bpc * 2;
1562                 break;
1563         case DRM_COLOR_FORMAT_YCBCR420:
1564                 bpp = fmt->bpc * 3 / 2;
1565                 break;
1566         default:
1567                 bpp = fmt->bpc * 3;
1568                 WARN_ON(1);
1569         }
1570         return bpp;
1571 }
1572
1573 static
1574 bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
1575                             const struct drm_display_mode *mode,
1576                             unsigned int lanes, unsigned int rate)
1577 {
1578         u32 max_bw, req_bw, bpp;
1579
1580         /*
1581          * mode->clock is expressed in kHz. Multiplying by bpp and dividing by 8
1582          * we get the number of kB/s. DisplayPort applies a 8b-10b encoding, the
1583          * value thus equals the bandwidth in 10kb/s units, which matches the
1584          * units of the rate parameter.
1585          */
1586
1587         bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1588         req_bw = mode->clock * bpp / 8;
1589         max_bw = lanes * rate;
1590         if (req_bw > max_bw) {
1591                 dev_dbg(mhdp->dev,
1592                         "Unsupported Mode: %s, Req BW: %u, Available Max BW:%u\n",
1593                         mode->name, req_bw, max_bw);
1594
1595                 return false;
1596         }
1597
1598         return true;
1599 }
1600
1601 static
1602 enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
1603                                           struct drm_display_mode *mode)
1604 {
1605         struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1606
1607         mutex_lock(&mhdp->link_mutex);
1608
1609         if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
1610                                     mhdp->link.rate)) {
1611                 mutex_unlock(&mhdp->link_mutex);
1612                 return MODE_CLOCK_HIGH;
1613         }
1614
1615         mutex_unlock(&mhdp->link_mutex);
1616         return MODE_OK;
1617 }
1618
1619 static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
1620                                             struct drm_atomic_state *state)
1621 {
1622         struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
1623         struct drm_connector_state *old_state, *new_state;
1624         struct drm_crtc_state *crtc_state;
1625         u64 old_cp, new_cp;
1626
1627         if (!mhdp->hdcp_supported)
1628                 return 0;
1629
1630         old_state = drm_atomic_get_old_connector_state(state, conn);
1631         new_state = drm_atomic_get_new_connector_state(state, conn);
1632         old_cp = old_state->content_protection;
1633         new_cp = new_state->content_protection;
1634
1635         if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
1636             new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1637                 new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1638                 goto mode_changed;
1639         }
1640
1641         if (!new_state->crtc) {
1642                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
1643                         new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
1644                 return 0;
1645         }
1646
1647         if (old_cp == new_cp ||
1648             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
1649              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
1650                 return 0;
1651
1652 mode_changed:
1653         crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
1654         crtc_state->mode_changed = true;
1655
1656         return 0;
1657 }
1658
1659 static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
1660         .detect_ctx = cdns_mhdp_connector_detect,
1661         .get_modes = cdns_mhdp_get_modes,
1662         .mode_valid = cdns_mhdp_mode_valid,
1663         .atomic_check = cdns_mhdp_connector_atomic_check,
1664 };
1665
1666 static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
1667         .fill_modes = drm_helper_probe_single_connector_modes,
1668         .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1669         .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1670         .reset = drm_atomic_helper_connector_reset,
1671         .destroy = drm_connector_cleanup,
1672 };
1673
1674 static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
1675 {
1676         u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
1677         struct drm_connector *conn = &mhdp->connector;
1678         struct drm_bridge *bridge = &mhdp->bridge;
1679         int ret;
1680
1681         if (!bridge->encoder) {
1682                 dev_err(mhdp->dev, "Parent encoder object not found");
1683                 return -ENODEV;
1684         }
1685
1686         conn->polled = DRM_CONNECTOR_POLL_HPD;
1687
1688         ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
1689                                  DRM_MODE_CONNECTOR_DisplayPort);
1690         if (ret) {
1691                 dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
1692                 return ret;
1693         }
1694
1695         drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
1696
1697         ret = drm_display_info_set_bus_formats(&conn->display_info,
1698                                                &bus_format, 1);
1699         if (ret)
1700                 return ret;
1701
1702         ret = drm_connector_attach_encoder(conn, bridge->encoder);
1703         if (ret) {
1704                 dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
1705                 return ret;
1706         }
1707
1708         if (mhdp->hdcp_supported)
1709                 ret = drm_connector_attach_content_protection_property(conn, true);
1710
1711         return ret;
1712 }
1713
1714 static int cdns_mhdp_attach(struct drm_bridge *bridge,
1715                             enum drm_bridge_attach_flags flags)
1716 {
1717         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1718         bool hw_ready;
1719         int ret;
1720
1721         dev_dbg(mhdp->dev, "%s\n", __func__);
1722
1723         mhdp->aux.drm_dev = bridge->dev;
1724         ret = drm_dp_aux_register(&mhdp->aux);
1725         if (ret < 0)
1726                 return ret;
1727
1728         if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
1729                 ret = cdns_mhdp_connector_init(mhdp);
1730                 if (ret)
1731                         goto aux_unregister;
1732         }
1733
1734         spin_lock(&mhdp->start_lock);
1735
1736         mhdp->bridge_attached = true;
1737         hw_ready = mhdp->hw_state == MHDP_HW_READY;
1738
1739         spin_unlock(&mhdp->start_lock);
1740
1741         /* Enable SW event interrupts */
1742         if (hw_ready)
1743                 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
1744                        mhdp->regs + CDNS_APB_INT_MASK);
1745
1746         return 0;
1747 aux_unregister:
1748         drm_dp_aux_unregister(&mhdp->aux);
1749         return ret;
1750 }
1751
1752 static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
1753                                       const struct drm_display_mode *mode)
1754 {
1755         unsigned int dp_framer_sp = 0, msa_horizontal_1,
1756                 msa_vertical_1, bnd_hsync2vsync, hsync2vsync_pol_ctrl,
1757                 misc0 = 0, misc1 = 0, pxl_repr,
1758                 front_porch, back_porch, msa_h0, msa_v0, hsync, vsync,
1759                 dp_vertical_1;
1760         u8 stream_id = mhdp->stream_id;
1761         u32 bpp, bpc, pxlfmt, framer;
1762         int ret;
1763
1764         pxlfmt = mhdp->display_fmt.color_format;
1765         bpc = mhdp->display_fmt.bpc;
1766
1767         /*
1768          * If YCBCR supported and stream not SD, use ITU709
1769          * Need to handle ITU version with YCBCR420 when supported
1770          */
1771         if ((pxlfmt == DRM_COLOR_FORMAT_YCBCR444 ||
1772              pxlfmt == DRM_COLOR_FORMAT_YCBCR422) && mode->crtc_vdisplay >= 720)
1773                 misc0 = DP_YCBCR_COEFFICIENTS_ITU709;
1774
1775         bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1776
1777         switch (pxlfmt) {
1778         case DRM_COLOR_FORMAT_RGB444:
1779                 pxl_repr = CDNS_DP_FRAMER_RGB << CDNS_DP_FRAMER_PXL_FORMAT;
1780                 misc0 |= DP_COLOR_FORMAT_RGB;
1781                 break;
1782         case DRM_COLOR_FORMAT_YCBCR444:
1783                 pxl_repr = CDNS_DP_FRAMER_YCBCR444 << CDNS_DP_FRAMER_PXL_FORMAT;
1784                 misc0 |= DP_COLOR_FORMAT_YCbCr444 | DP_TEST_DYNAMIC_RANGE_CEA;
1785                 break;
1786         case DRM_COLOR_FORMAT_YCBCR422:
1787                 pxl_repr = CDNS_DP_FRAMER_YCBCR422 << CDNS_DP_FRAMER_PXL_FORMAT;
1788                 misc0 |= DP_COLOR_FORMAT_YCbCr422 | DP_TEST_DYNAMIC_RANGE_CEA;
1789                 break;
1790         case DRM_COLOR_FORMAT_YCBCR420:
1791                 pxl_repr = CDNS_DP_FRAMER_YCBCR420 << CDNS_DP_FRAMER_PXL_FORMAT;
1792                 break;
1793         default:
1794                 pxl_repr = CDNS_DP_FRAMER_Y_ONLY << CDNS_DP_FRAMER_PXL_FORMAT;
1795         }
1796
1797         switch (bpc) {
1798         case 6:
1799                 misc0 |= DP_TEST_BIT_DEPTH_6;
1800                 pxl_repr |= CDNS_DP_FRAMER_6_BPC;
1801                 break;
1802         case 8:
1803                 misc0 |= DP_TEST_BIT_DEPTH_8;
1804                 pxl_repr |= CDNS_DP_FRAMER_8_BPC;
1805                 break;
1806         case 10:
1807                 misc0 |= DP_TEST_BIT_DEPTH_10;
1808                 pxl_repr |= CDNS_DP_FRAMER_10_BPC;
1809                 break;
1810         case 12:
1811                 misc0 |= DP_TEST_BIT_DEPTH_12;
1812                 pxl_repr |= CDNS_DP_FRAMER_12_BPC;
1813                 break;
1814         case 16:
1815                 misc0 |= DP_TEST_BIT_DEPTH_16;
1816                 pxl_repr |= CDNS_DP_FRAMER_16_BPC;
1817                 break;
1818         }
1819
1820         bnd_hsync2vsync = CDNS_IP_BYPASS_V_INTERFACE;
1821         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1822                 bnd_hsync2vsync |= CDNS_IP_DET_INTERLACE_FORMAT;
1823
1824         cdns_mhdp_reg_write(mhdp, CDNS_BND_HSYNC2VSYNC(stream_id),
1825                             bnd_hsync2vsync);
1826
1827         hsync2vsync_pol_ctrl = 0;
1828         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1829                 hsync2vsync_pol_ctrl |= CDNS_H2V_HSYNC_POL_ACTIVE_LOW;
1830         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1831                 hsync2vsync_pol_ctrl |= CDNS_H2V_VSYNC_POL_ACTIVE_LOW;
1832         cdns_mhdp_reg_write(mhdp, CDNS_HSYNC2VSYNC_POL_CTRL(stream_id),
1833                             hsync2vsync_pol_ctrl);
1834
1835         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_PXL_REPR(stream_id), pxl_repr);
1836
1837         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1838                 dp_framer_sp |= CDNS_DP_FRAMER_INTERLACE;
1839         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1840                 dp_framer_sp |= CDNS_DP_FRAMER_HSYNC_POL_LOW;
1841         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1842                 dp_framer_sp |= CDNS_DP_FRAMER_VSYNC_POL_LOW;
1843         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_SP(stream_id), dp_framer_sp);
1844
1845         front_porch = mode->crtc_hsync_start - mode->crtc_hdisplay;
1846         back_porch = mode->crtc_htotal - mode->crtc_hsync_end;
1847         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRONT_BACK_PORCH(stream_id),
1848                             CDNS_DP_FRONT_PORCH(front_porch) |
1849                             CDNS_DP_BACK_PORCH(back_porch));
1850
1851         cdns_mhdp_reg_write(mhdp, CDNS_DP_BYTE_COUNT(stream_id),
1852                             mode->crtc_hdisplay * bpp / 8);
1853
1854         msa_h0 = mode->crtc_htotal - mode->crtc_hsync_start;
1855         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_0(stream_id),
1856                             CDNS_DP_MSAH0_H_TOTAL(mode->crtc_htotal) |
1857                             CDNS_DP_MSAH0_HSYNC_START(msa_h0));
1858
1859         hsync = mode->crtc_hsync_end - mode->crtc_hsync_start;
1860         msa_horizontal_1 = CDNS_DP_MSAH1_HSYNC_WIDTH(hsync) |
1861                            CDNS_DP_MSAH1_HDISP_WIDTH(mode->crtc_hdisplay);
1862         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1863                 msa_horizontal_1 |= CDNS_DP_MSAH1_HSYNC_POL_LOW;
1864         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_HORIZONTAL_1(stream_id),
1865                             msa_horizontal_1);
1866
1867         msa_v0 = mode->crtc_vtotal - mode->crtc_vsync_start;
1868         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_0(stream_id),
1869                             CDNS_DP_MSAV0_V_TOTAL(mode->crtc_vtotal) |
1870                             CDNS_DP_MSAV0_VSYNC_START(msa_v0));
1871
1872         vsync = mode->crtc_vsync_end - mode->crtc_vsync_start;
1873         msa_vertical_1 = CDNS_DP_MSAV1_VSYNC_WIDTH(vsync) |
1874                          CDNS_DP_MSAV1_VDISP_WIDTH(mode->crtc_vdisplay);
1875         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1876                 msa_vertical_1 |= CDNS_DP_MSAV1_VSYNC_POL_LOW;
1877         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_VERTICAL_1(stream_id),
1878                             msa_vertical_1);
1879
1880         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1881             mode->crtc_vtotal % 2 == 0)
1882                 misc1 = DP_TEST_INTERLACED;
1883         if (mhdp->display_fmt.y_only)
1884                 misc1 |= CDNS_DP_TEST_COLOR_FORMAT_RAW_Y_ONLY;
1885         /* Use VSC SDP for Y420 */
1886         if (pxlfmt == DRM_COLOR_FORMAT_YCBCR420)
1887                 misc1 = CDNS_DP_TEST_VSC_SDP;
1888
1889         cdns_mhdp_reg_write(mhdp, CDNS_DP_MSA_MISC(stream_id),
1890                             misc0 | (misc1 << 8));
1891
1892         cdns_mhdp_reg_write(mhdp, CDNS_DP_HORIZONTAL(stream_id),
1893                             CDNS_DP_H_HSYNC_WIDTH(hsync) |
1894                             CDNS_DP_H_H_TOTAL(mode->crtc_hdisplay));
1895
1896         cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_0(stream_id),
1897                             CDNS_DP_V0_VHEIGHT(mode->crtc_vdisplay) |
1898                             CDNS_DP_V0_VSTART(msa_v0));
1899
1900         dp_vertical_1 = CDNS_DP_V1_VTOTAL(mode->crtc_vtotal);
1901         if ((mode->flags & DRM_MODE_FLAG_INTERLACE) &&
1902             mode->crtc_vtotal % 2 == 0)
1903                 dp_vertical_1 |= CDNS_DP_V1_VTOTAL_EVEN;
1904
1905         cdns_mhdp_reg_write(mhdp, CDNS_DP_VERTICAL_1(stream_id), dp_vertical_1);
1906
1907         cdns_mhdp_reg_write_bit(mhdp, CDNS_DP_VB_ID(stream_id), 2, 1,
1908                                 (mode->flags & DRM_MODE_FLAG_INTERLACE) ?
1909                                 CDNS_DP_VB_ID_INTERLACED : 0);
1910
1911         ret = cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &framer);
1912         if (ret < 0) {
1913                 dev_err(mhdp->dev,
1914                         "Failed to read CDNS_DP_FRAMER_GLOBAL_CONFIG %d\n",
1915                         ret);
1916                 return;
1917         }
1918         framer |= CDNS_DP_FRAMER_EN;
1919         framer &= ~CDNS_DP_NO_VIDEO_MODE;
1920         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, framer);
1921 }
1922
1923 static void cdns_mhdp_sst_enable(struct cdns_mhdp_device *mhdp,
1924                                  const struct drm_display_mode *mode)
1925 {
1926         u32 rate, vs, required_bandwidth, available_bandwidth;
1927         s32 line_thresh1, line_thresh2, line_thresh = 0;
1928         int pxlclock = mode->crtc_clock;
1929         u32 tu_size = 64;
1930         u32 bpp;
1931
1932         /* Get rate in MSymbols per second per lane */
1933         rate = mhdp->link.rate / 1000;
1934
1935         bpp = cdns_mhdp_get_bpp(&mhdp->display_fmt);
1936
1937         required_bandwidth = pxlclock * bpp / 8;
1938         available_bandwidth = mhdp->link.num_lanes * rate;
1939
1940         vs = tu_size * required_bandwidth / available_bandwidth;
1941         vs /= 1000;
1942
1943         if (vs == tu_size)
1944                 vs = tu_size - 1;
1945
1946         line_thresh1 = ((vs + 1) << 5) * 8 / bpp;
1947         line_thresh2 = (pxlclock << 5) / 1000 / rate * (vs + 1) - (1 << 5);
1948         line_thresh = line_thresh1 - line_thresh2 / (s32)mhdp->link.num_lanes;
1949         line_thresh = (line_thresh >> 5) + 2;
1950
1951         mhdp->stream_id = 0;
1952
1953         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_TU,
1954                             CDNS_DP_FRAMER_TU_VS(vs) |
1955                             CDNS_DP_FRAMER_TU_SIZE(tu_size) |
1956                             CDNS_DP_FRAMER_TU_CNT_RST_EN);
1957
1958         cdns_mhdp_reg_write(mhdp, CDNS_DP_LINE_THRESH(0),
1959                             line_thresh & GENMASK(5, 0));
1960
1961         cdns_mhdp_reg_write(mhdp, CDNS_DP_STREAM_CONFIG_2(0),
1962                             CDNS_DP_SC2_TU_VS_DIFF((tu_size - vs > 3) ?
1963                                                    0 : tu_size - vs));
1964
1965         cdns_mhdp_configure_video(mhdp, mode);
1966 }
1967
1968 static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
1969                                     struct drm_bridge_state *bridge_state)
1970 {
1971         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
1972         struct drm_atomic_state *state = bridge_state->base.state;
1973         struct cdns_mhdp_bridge_state *mhdp_state;
1974         struct drm_crtc_state *crtc_state;
1975         struct drm_connector *connector;
1976         struct drm_connector_state *conn_state;
1977         struct drm_bridge_state *new_state;
1978         const struct drm_display_mode *mode;
1979         u32 resp;
1980         int ret;
1981
1982         dev_dbg(mhdp->dev, "bridge enable\n");
1983
1984         mutex_lock(&mhdp->link_mutex);
1985
1986         if (mhdp->plugged && !mhdp->link_up) {
1987                 ret = cdns_mhdp_link_up(mhdp);
1988                 if (ret < 0)
1989                         goto out;
1990         }
1991
1992         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->enable)
1993                 mhdp->info->ops->enable(mhdp);
1994
1995         /* Enable VIF clock for stream 0 */
1996         ret = cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
1997         if (ret < 0) {
1998                 dev_err(mhdp->dev, "Failed to read CDNS_DPTX_CAR %d\n", ret);
1999                 goto out;
2000         }
2001
2002         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2003                             resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
2004
2005         connector = drm_atomic_get_new_connector_for_encoder(state,
2006                                                              bridge->encoder);
2007         if (WARN_ON(!connector))
2008                 goto out;
2009
2010         conn_state = drm_atomic_get_new_connector_state(state, connector);
2011         if (WARN_ON(!conn_state))
2012                 goto out;
2013
2014         if (mhdp->hdcp_supported &&
2015             mhdp->hw_state == MHDP_HW_READY &&
2016             conn_state->content_protection ==
2017             DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2018                 mutex_unlock(&mhdp->link_mutex);
2019                 cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
2020                 mutex_lock(&mhdp->link_mutex);
2021         }
2022
2023         crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
2024         if (WARN_ON(!crtc_state))
2025                 goto out;
2026
2027         mode = &crtc_state->adjusted_mode;
2028
2029         new_state = drm_atomic_get_new_bridge_state(state, bridge);
2030         if (WARN_ON(!new_state))
2031                 goto out;
2032
2033         if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2034                                     mhdp->link.rate)) {
2035                 ret = -EINVAL;
2036                 goto out;
2037         }
2038
2039         cdns_mhdp_sst_enable(mhdp, mode);
2040
2041         mhdp_state = to_cdns_mhdp_bridge_state(new_state);
2042
2043         mhdp_state->current_mode = drm_mode_duplicate(bridge->dev, mode);
2044         drm_mode_set_name(mhdp_state->current_mode);
2045
2046         dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__, mode->name);
2047
2048         mhdp->bridge_enabled = true;
2049
2050 out:
2051         mutex_unlock(&mhdp->link_mutex);
2052         if (ret < 0)
2053                 schedule_work(&mhdp->modeset_retry_work);
2054 }
2055
2056 static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
2057                                      struct drm_bridge_state *bridge_state)
2058 {
2059         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2060         u32 resp;
2061
2062         dev_dbg(mhdp->dev, "%s\n", __func__);
2063
2064         mutex_lock(&mhdp->link_mutex);
2065
2066         if (mhdp->hdcp_supported)
2067                 cdns_mhdp_hdcp_disable(mhdp);
2068
2069         mhdp->bridge_enabled = false;
2070         cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
2071         resp &= ~CDNS_DP_FRAMER_EN;
2072         resp |= CDNS_DP_NO_VIDEO_MODE;
2073         cdns_mhdp_reg_write(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, resp);
2074
2075         cdns_mhdp_link_down(mhdp);
2076
2077         /* Disable VIF clock for stream 0 */
2078         cdns_mhdp_reg_read(mhdp, CDNS_DPTX_CAR, &resp);
2079         cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
2080                             resp & ~(CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN));
2081
2082         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
2083                 mhdp->info->ops->disable(mhdp);
2084
2085         mutex_unlock(&mhdp->link_mutex);
2086 }
2087
2088 static void cdns_mhdp_detach(struct drm_bridge *bridge)
2089 {
2090         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2091
2092         dev_dbg(mhdp->dev, "%s\n", __func__);
2093
2094         drm_dp_aux_unregister(&mhdp->aux);
2095
2096         spin_lock(&mhdp->start_lock);
2097
2098         mhdp->bridge_attached = false;
2099
2100         spin_unlock(&mhdp->start_lock);
2101
2102         writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2103 }
2104
2105 static struct drm_bridge_state *
2106 cdns_mhdp_bridge_atomic_duplicate_state(struct drm_bridge *bridge)
2107 {
2108         struct cdns_mhdp_bridge_state *state;
2109
2110         state = kzalloc(sizeof(*state), GFP_KERNEL);
2111         if (!state)
2112                 return NULL;
2113
2114         __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base);
2115
2116         return &state->base;
2117 }
2118
2119 static void
2120 cdns_mhdp_bridge_atomic_destroy_state(struct drm_bridge *bridge,
2121                                       struct drm_bridge_state *state)
2122 {
2123         struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2124
2125         cdns_mhdp_state = to_cdns_mhdp_bridge_state(state);
2126
2127         if (cdns_mhdp_state->current_mode) {
2128                 drm_mode_destroy(bridge->dev, cdns_mhdp_state->current_mode);
2129                 cdns_mhdp_state->current_mode = NULL;
2130         }
2131
2132         kfree(cdns_mhdp_state);
2133 }
2134
2135 static struct drm_bridge_state *
2136 cdns_mhdp_bridge_atomic_reset(struct drm_bridge *bridge)
2137 {
2138         struct cdns_mhdp_bridge_state *cdns_mhdp_state;
2139
2140         cdns_mhdp_state = kzalloc(sizeof(*cdns_mhdp_state), GFP_KERNEL);
2141         if (!cdns_mhdp_state)
2142                 return NULL;
2143
2144         __drm_atomic_helper_bridge_reset(bridge, &cdns_mhdp_state->base);
2145
2146         return &cdns_mhdp_state->base;
2147 }
2148
2149 static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
2150                                   struct drm_bridge_state *bridge_state,
2151                                   struct drm_crtc_state *crtc_state,
2152                                   struct drm_connector_state *conn_state)
2153 {
2154         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2155         const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
2156
2157         mutex_lock(&mhdp->link_mutex);
2158
2159         if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
2160                                     mhdp->link.rate)) {
2161                 dev_err(mhdp->dev, "%s: Not enough BW for %s (%u lanes at %u Mbps)\n",
2162                         __func__, mode->name, mhdp->link.num_lanes,
2163                         mhdp->link.rate / 100);
2164                 mutex_unlock(&mhdp->link_mutex);
2165                 return -EINVAL;
2166         }
2167
2168         mutex_unlock(&mhdp->link_mutex);
2169         return 0;
2170 }
2171
2172 static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge)
2173 {
2174         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2175
2176         return cdns_mhdp_detect(mhdp);
2177 }
2178
2179 static struct edid *cdns_mhdp_bridge_get_edid(struct drm_bridge *bridge,
2180                                               struct drm_connector *connector)
2181 {
2182         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2183
2184         return cdns_mhdp_get_edid(mhdp, connector);
2185 }
2186
2187 static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
2188 {
2189         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2190
2191         /* Enable SW event interrupts */
2192         if (mhdp->bridge_attached)
2193                 writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
2194                        mhdp->regs + CDNS_APB_INT_MASK);
2195 }
2196
2197 static void cdns_mhdp_bridge_hpd_disable(struct drm_bridge *bridge)
2198 {
2199         struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
2200
2201         writel(CDNS_APB_INT_MASK_SW_EVENT_INT, mhdp->regs + CDNS_APB_INT_MASK);
2202 }
2203
2204 static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
2205         .atomic_enable = cdns_mhdp_atomic_enable,
2206         .atomic_disable = cdns_mhdp_atomic_disable,
2207         .atomic_check = cdns_mhdp_atomic_check,
2208         .attach = cdns_mhdp_attach,
2209         .detach = cdns_mhdp_detach,
2210         .atomic_duplicate_state = cdns_mhdp_bridge_atomic_duplicate_state,
2211         .atomic_destroy_state = cdns_mhdp_bridge_atomic_destroy_state,
2212         .atomic_reset = cdns_mhdp_bridge_atomic_reset,
2213         .detect = cdns_mhdp_bridge_detect,
2214         .get_edid = cdns_mhdp_bridge_get_edid,
2215         .hpd_enable = cdns_mhdp_bridge_hpd_enable,
2216         .hpd_disable = cdns_mhdp_bridge_hpd_disable,
2217 };
2218
2219 static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
2220 {
2221         int hpd_event, hpd_status;
2222
2223         *hpd_pulse = false;
2224
2225         hpd_event = cdns_mhdp_read_hpd_event(mhdp);
2226
2227         /* Getting event bits failed, bail out */
2228         if (hpd_event < 0) {
2229                 dev_warn(mhdp->dev, "%s: read event failed: %d\n",
2230                          __func__, hpd_event);
2231                 return false;
2232         }
2233
2234         hpd_status = cdns_mhdp_get_hpd_status(mhdp);
2235         if (hpd_status < 0) {
2236                 dev_warn(mhdp->dev, "%s: get hpd status failed: %d\n",
2237                          __func__, hpd_status);
2238                 return false;
2239         }
2240
2241         if (hpd_event & DPTX_READ_EVENT_HPD_PULSE)
2242                 *hpd_pulse = true;
2243
2244         return !!hpd_status;
2245 }
2246
2247 static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp)
2248 {
2249         struct cdns_mhdp_bridge_state *cdns_bridge_state;
2250         struct drm_display_mode *current_mode;
2251         bool old_plugged = mhdp->plugged;
2252         struct drm_bridge_state *state;
2253         u8 status[DP_LINK_STATUS_SIZE];
2254         bool hpd_pulse;
2255         int ret = 0;
2256
2257         mutex_lock(&mhdp->link_mutex);
2258
2259         mhdp->plugged = cdns_mhdp_detect_hpd(mhdp, &hpd_pulse);
2260
2261         if (!mhdp->plugged) {
2262                 cdns_mhdp_link_down(mhdp);
2263                 mhdp->link.rate = mhdp->host.link_rate;
2264                 mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2265                 goto out;
2266         }
2267
2268         /*
2269          * If we get a HPD pulse event and we were and still are connected,
2270          * check the link status. If link status is ok, there's nothing to do
2271          * as we don't handle DP interrupts. If link status is bad, continue
2272          * with full link setup.
2273          */
2274         if (hpd_pulse && old_plugged == mhdp->plugged) {
2275                 ret = drm_dp_dpcd_read_link_status(&mhdp->aux, status);
2276
2277                 /*
2278                  * If everything looks fine, just return, as we don't handle
2279                  * DP IRQs.
2280                  */
2281                 if (ret > 0 &&
2282                     drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) &&
2283                     drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes))
2284                         goto out;
2285
2286                 /* If link is bad, mark link as down so that we do a new LT */
2287                 mhdp->link_up = false;
2288         }
2289
2290         if (!mhdp->link_up) {
2291                 ret = cdns_mhdp_link_up(mhdp);
2292                 if (ret < 0)
2293                         goto out;
2294         }
2295
2296         if (mhdp->bridge_enabled) {
2297                 state = drm_priv_to_bridge_state(mhdp->bridge.base.state);
2298                 if (!state) {
2299                         ret = -EINVAL;
2300                         goto out;
2301                 }
2302
2303                 cdns_bridge_state = to_cdns_mhdp_bridge_state(state);
2304                 if (!cdns_bridge_state) {
2305                         ret = -EINVAL;
2306                         goto out;
2307                 }
2308
2309                 current_mode = cdns_bridge_state->current_mode;
2310                 if (!current_mode) {
2311                         ret = -EINVAL;
2312                         goto out;
2313                 }
2314
2315                 if (!cdns_mhdp_bandwidth_ok(mhdp, current_mode, mhdp->link.num_lanes,
2316                                             mhdp->link.rate)) {
2317                         ret = -EINVAL;
2318                         goto out;
2319                 }
2320
2321                 dev_dbg(mhdp->dev, "%s: Enabling mode %s\n", __func__,
2322                         current_mode->name);
2323
2324                 cdns_mhdp_sst_enable(mhdp, current_mode);
2325         }
2326 out:
2327         mutex_unlock(&mhdp->link_mutex);
2328         return ret;
2329 }
2330
2331 static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
2332 {
2333         struct cdns_mhdp_device *mhdp;
2334         struct drm_connector *conn;
2335
2336         mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
2337
2338         conn = &mhdp->connector;
2339
2340         /* Grab the locks before changing connector property */
2341         mutex_lock(&conn->dev->mode_config.mutex);
2342
2343         /*
2344          * Set connector link status to BAD and send a Uevent to notify
2345          * userspace to do a modeset.
2346          */
2347         drm_connector_set_link_status_property(conn, DRM_MODE_LINK_STATUS_BAD);
2348         mutex_unlock(&conn->dev->mode_config.mutex);
2349
2350         /* Send Hotplug uevent so userspace can reprobe */
2351         drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2352 }
2353
2354 static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
2355 {
2356         struct cdns_mhdp_device *mhdp = data;
2357         u32 apb_stat, sw_ev0;
2358         bool bridge_attached;
2359
2360         apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
2361         if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
2362                 return IRQ_NONE;
2363
2364         sw_ev0 = readl(mhdp->regs + CDNS_SW_EVENT0);
2365
2366         /*
2367          *  Calling drm_kms_helper_hotplug_event() when not attached
2368          *  to drm device causes an oops because the drm_bridge->dev
2369          *  is NULL. See cdns_mhdp_fw_cb() comments for details about the
2370          *  problems related drm_kms_helper_hotplug_event() call.
2371          */
2372         spin_lock(&mhdp->start_lock);
2373         bridge_attached = mhdp->bridge_attached;
2374         spin_unlock(&mhdp->start_lock);
2375
2376         if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
2377                 schedule_work(&mhdp->hpd_work);
2378         }
2379
2380         if (sw_ev0 & ~CDNS_DPTX_HPD) {
2381                 mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
2382                 wake_up(&mhdp->sw_events_wq);
2383         }
2384
2385         return IRQ_HANDLED;
2386 }
2387
2388 u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
2389 {
2390         u32 ret;
2391
2392         ret = wait_event_timeout(mhdp->sw_events_wq,
2393                                  mhdp->sw_events & event,
2394                                  msecs_to_jiffies(500));
2395         if (!ret) {
2396                 dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
2397                 goto sw_event_out;
2398         }
2399
2400         ret = mhdp->sw_events;
2401         mhdp->sw_events &= ~event;
2402
2403 sw_event_out:
2404         return ret;
2405 }
2406
2407 static void cdns_mhdp_hpd_work(struct work_struct *work)
2408 {
2409         struct cdns_mhdp_device *mhdp = container_of(work,
2410                                                      struct cdns_mhdp_device,
2411                                                      hpd_work);
2412         int ret;
2413
2414         ret = cdns_mhdp_update_link_status(mhdp);
2415         if (mhdp->connector.dev) {
2416                 if (ret < 0)
2417                         schedule_work(&mhdp->modeset_retry_work);
2418                 else
2419                         drm_kms_helper_hotplug_event(mhdp->bridge.dev);
2420         } else {
2421                 drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
2422         }
2423 }
2424
2425 static int cdns_mhdp_probe(struct platform_device *pdev)
2426 {
2427         struct device *dev = &pdev->dev;
2428         struct cdns_mhdp_device *mhdp;
2429         unsigned long rate;
2430         struct clk *clk;
2431         int ret;
2432         int irq;
2433
2434         mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
2435         if (!mhdp)
2436                 return -ENOMEM;
2437
2438         clk = devm_clk_get(dev, NULL);
2439         if (IS_ERR(clk)) {
2440                 dev_err(dev, "couldn't get clk: %ld\n", PTR_ERR(clk));
2441                 return PTR_ERR(clk);
2442         }
2443
2444         mhdp->clk = clk;
2445         mhdp->dev = dev;
2446         mutex_init(&mhdp->mbox_mutex);
2447         mutex_init(&mhdp->link_mutex);
2448         spin_lock_init(&mhdp->start_lock);
2449
2450         drm_dp_aux_init(&mhdp->aux);
2451         mhdp->aux.dev = dev;
2452         mhdp->aux.transfer = cdns_mhdp_transfer;
2453
2454         mhdp->regs = devm_platform_ioremap_resource(pdev, 0);
2455         if (IS_ERR(mhdp->regs)) {
2456                 dev_err(dev, "Failed to get memory resource\n");
2457                 return PTR_ERR(mhdp->regs);
2458         }
2459
2460         mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
2461         if (IS_ERR(mhdp->sapb_regs)) {
2462                 mhdp->hdcp_supported = false;
2463                 dev_warn(dev,
2464                          "Failed to get SAPB memory resource, HDCP not supported\n");
2465         } else {
2466                 mhdp->hdcp_supported = true;
2467         }
2468
2469         mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
2470         if (IS_ERR(mhdp->phy)) {
2471                 dev_err(dev, "no PHY configured\n");
2472                 return PTR_ERR(mhdp->phy);
2473         }
2474
2475         platform_set_drvdata(pdev, mhdp);
2476
2477         mhdp->info = of_device_get_match_data(dev);
2478
2479         clk_prepare_enable(clk);
2480
2481         pm_runtime_enable(dev);
2482         ret = pm_runtime_resume_and_get(dev);
2483         if (ret < 0) {
2484                 dev_err(dev, "pm_runtime_resume_and_get failed\n");
2485                 pm_runtime_disable(dev);
2486                 goto clk_disable;
2487         }
2488
2489         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->init) {
2490                 ret = mhdp->info->ops->init(mhdp);
2491                 if (ret != 0) {
2492                         dev_err(dev, "MHDP platform initialization failed: %d\n",
2493                                 ret);
2494                         goto runtime_put;
2495                 }
2496         }
2497
2498         rate = clk_get_rate(clk);
2499         writel(rate % 1000000, mhdp->regs + CDNS_SW_CLK_L);
2500         writel(rate / 1000000, mhdp->regs + CDNS_SW_CLK_H);
2501
2502         dev_dbg(dev, "func clk rate %lu Hz\n", rate);
2503
2504         writel(~0, mhdp->regs + CDNS_APB_INT_MASK);
2505
2506         irq = platform_get_irq(pdev, 0);
2507         ret = devm_request_threaded_irq(mhdp->dev, irq, NULL,
2508                                         cdns_mhdp_irq_handler, IRQF_ONESHOT,
2509                                         "mhdp8546", mhdp);
2510         if (ret) {
2511                 dev_err(dev, "cannot install IRQ %d\n", irq);
2512                 ret = -EIO;
2513                 goto plat_fini;
2514         }
2515
2516         cdns_mhdp_fill_host_caps(mhdp);
2517
2518         /* Initialize link rate and num of lanes to host values */
2519         mhdp->link.rate = mhdp->host.link_rate;
2520         mhdp->link.num_lanes = mhdp->host.lanes_cnt;
2521
2522         /* The only currently supported format */
2523         mhdp->display_fmt.y_only = false;
2524         mhdp->display_fmt.color_format = DRM_COLOR_FORMAT_RGB444;
2525         mhdp->display_fmt.bpc = 8;
2526
2527         mhdp->bridge.of_node = pdev->dev.of_node;
2528         mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
2529         mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
2530                            DRM_BRIDGE_OP_HPD;
2531         mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
2532         if (mhdp->info)
2533                 mhdp->bridge.timings = mhdp->info->timings;
2534
2535         ret = phy_init(mhdp->phy);
2536         if (ret) {
2537                 dev_err(mhdp->dev, "Failed to initialize PHY: %d\n", ret);
2538                 goto plat_fini;
2539         }
2540
2541         /* Initialize the work for modeset in case of link train failure */
2542         INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
2543         INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
2544
2545         init_waitqueue_head(&mhdp->fw_load_wq);
2546         init_waitqueue_head(&mhdp->sw_events_wq);
2547
2548         ret = cdns_mhdp_load_firmware(mhdp);
2549         if (ret)
2550                 goto phy_exit;
2551
2552         if (mhdp->hdcp_supported)
2553                 cdns_mhdp_hdcp_init(mhdp);
2554
2555         drm_bridge_add(&mhdp->bridge);
2556
2557         return 0;
2558
2559 phy_exit:
2560         phy_exit(mhdp->phy);
2561 plat_fini:
2562         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2563                 mhdp->info->ops->exit(mhdp);
2564 runtime_put:
2565         pm_runtime_put_sync(dev);
2566         pm_runtime_disable(dev);
2567 clk_disable:
2568         clk_disable_unprepare(mhdp->clk);
2569
2570         return ret;
2571 }
2572
2573 static int cdns_mhdp_remove(struct platform_device *pdev)
2574 {
2575         struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
2576         unsigned long timeout = msecs_to_jiffies(100);
2577         bool stop_fw = false;
2578         int ret;
2579
2580         drm_bridge_remove(&mhdp->bridge);
2581
2582         ret = wait_event_timeout(mhdp->fw_load_wq,
2583                                  mhdp->hw_state == MHDP_HW_READY,
2584                                  timeout);
2585         if (ret == 0)
2586                 dev_err(mhdp->dev, "%s: Timeout waiting for fw loading\n",
2587                         __func__);
2588         else
2589                 stop_fw = true;
2590
2591         spin_lock(&mhdp->start_lock);
2592         mhdp->hw_state = MHDP_HW_STOPPED;
2593         spin_unlock(&mhdp->start_lock);
2594
2595         if (stop_fw)
2596                 ret = cdns_mhdp_set_firmware_active(mhdp, false);
2597
2598         phy_exit(mhdp->phy);
2599
2600         if (mhdp->info && mhdp->info->ops && mhdp->info->ops->exit)
2601                 mhdp->info->ops->exit(mhdp);
2602
2603         pm_runtime_put_sync(&pdev->dev);
2604         pm_runtime_disable(&pdev->dev);
2605
2606         cancel_work_sync(&mhdp->modeset_retry_work);
2607         flush_work(&mhdp->hpd_work);
2608         /* Ignoring mhdp->hdcp.check_work and mhdp->hdcp.prop_work here. */
2609
2610         clk_disable_unprepare(mhdp->clk);
2611
2612         return ret;
2613 }
2614
2615 static const struct of_device_id mhdp_ids[] = {
2616         { .compatible = "cdns,mhdp8546", },
2617 #ifdef CONFIG_DRM_CDNS_MHDP8546_J721E
2618         { .compatible = "ti,j721e-mhdp8546",
2619           .data = &(const struct cdns_mhdp_platform_info) {
2620                   .timings = &mhdp_ti_j721e_bridge_timings,
2621                   .ops = &mhdp_ti_j721e_ops,
2622           },
2623         },
2624 #endif
2625         { /* sentinel */ }
2626 };
2627 MODULE_DEVICE_TABLE(of, mhdp_ids);
2628
2629 static struct platform_driver mhdp_driver = {
2630         .driver = {
2631                 .name           = "cdns-mhdp8546",
2632                 .of_match_table = of_match_ptr(mhdp_ids),
2633         },
2634         .probe  = cdns_mhdp_probe,
2635         .remove = cdns_mhdp_remove,
2636 };
2637 module_platform_driver(mhdp_driver);
2638
2639 MODULE_FIRMWARE(FW_NAME);
2640
2641 MODULE_AUTHOR("Quentin Schulz <[email protected]>");
2642 MODULE_AUTHOR("Swapnil Jakhade <[email protected]>");
2643 MODULE_AUTHOR("Yuti Amonkar <[email protected]>");
2644 MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
2645 MODULE_AUTHOR("Jyri Sarha <[email protected]>");
2646 MODULE_DESCRIPTION("Cadence MHDP8546 DP bridge driver");
2647 MODULE_LICENSE("GPL");
2648 MODULE_ALIAS("platform:cdns-mhdp8546");
This page took 0.200114 seconds and 4 git commands to generate.