]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/display/intel_hdcp.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / drivers / gpu / drm / i915 / display / intel_hdcp.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <[email protected]>
8  * Ramalingam C <[email protected]>
9  */
10
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14
15 #include <drm/display/drm_hdcp_helper.h>
16 #include <drm/i915_component.h>
17
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_power_well.h"
24 #include "intel_display_types.h"
25 #include "intel_hdcp.h"
26 #include "intel_hdcp_gsc.h"
27 #include "intel_hdcp_regs.h"
28 #include "intel_pcode.h"
29
30 #define KEY_LOAD_TRIES  5
31 #define HDCP2_LC_RETRY_CNT                      3
32
33 static int intel_conn_to_vcpi(struct drm_atomic_state *state,
34                               struct intel_connector *connector)
35 {
36         struct drm_dp_mst_topology_mgr *mgr;
37         struct drm_dp_mst_atomic_payload *payload;
38         struct drm_dp_mst_topology_state *mst_state;
39         int vcpi = 0;
40
41         /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
42         if (!connector->port)
43                 return 0;
44         mgr = connector->port->mgr;
45
46         drm_modeset_lock(&mgr->base.lock, state->acquire_ctx);
47         mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
48         payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
49         if (drm_WARN_ON(mgr->dev, !payload))
50                 goto out;
51
52         vcpi = payload->vcpi;
53         if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
54                 vcpi = 0;
55                 goto out;
56         }
57 out:
58         return vcpi;
59 }
60
61 /*
62  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
63  * content_type for all streams in DP MST topology because security f/w doesn't
64  * have any provision to mark content_type for each stream separately, it marks
65  * all available streams with the content_type proivided at the time of port
66  * authentication. This may prohibit the userspace to use type1 content on
67  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
68  * DP MST topology. Though it is not compulsory, security fw should change its
69  * policy to mark different content_types for different streams.
70  */
71 static void
72 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
73 {
74         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
75         bool enforce_type0 = false;
76         int k;
77
78         if (dig_port->hdcp_auth_status)
79                 return;
80
81         if (!dig_port->hdcp_mst_type1_capable)
82                 enforce_type0 = true;
83
84         /*
85          * Apply common protection level across all streams in DP MST Topology.
86          * Use highest supported content type for all streams in DP MST Topology.
87          */
88         for (k = 0; k < data->k; k++)
89                 data->streams[k].stream_type =
90                         enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
91 }
92
93 static void intel_hdcp_prepare_streams(struct intel_connector *connector)
94 {
95         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
96         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
97         struct intel_hdcp *hdcp = &connector->hdcp;
98
99         if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
100                 data->streams[0].stream_type = hdcp->content_type;
101         } else {
102                 intel_hdcp_required_content_stream(dig_port);
103         }
104 }
105
106 static
107 bool intel_hdcp_is_ksv_valid(u8 *ksv)
108 {
109         int i, ones = 0;
110         /* KSV has 20 1's and 20 0's */
111         for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
112                 ones += hweight8(ksv[i]);
113         if (ones != 20)
114                 return false;
115
116         return true;
117 }
118
119 static
120 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
121                                const struct intel_hdcp_shim *shim, u8 *bksv)
122 {
123         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
124         int ret, i, tries = 2;
125
126         /* HDCP spec states that we must retry the bksv if it is invalid */
127         for (i = 0; i < tries; i++) {
128                 ret = shim->read_bksv(dig_port, bksv);
129                 if (ret)
130                         return ret;
131                 if (intel_hdcp_is_ksv_valid(bksv))
132                         break;
133         }
134         if (i == tries) {
135                 drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
136                 return -ENODEV;
137         }
138
139         return 0;
140 }
141
142 /* Is HDCP1.4 capable on Platform and Sink */
143 bool intel_hdcp_capable(struct intel_connector *connector)
144 {
145         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
146         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
147         bool capable = false;
148         u8 bksv[5];
149
150         if (!shim)
151                 return capable;
152
153         if (shim->hdcp_capable) {
154                 shim->hdcp_capable(dig_port, &capable);
155         } else {
156                 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
157                         capable = true;
158         }
159
160         return capable;
161 }
162
163 /* Is HDCP2.2 capable on Platform and Sink */
164 bool intel_hdcp2_capable(struct intel_connector *connector)
165 {
166         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
167         struct drm_i915_private *i915 = to_i915(connector->base.dev);
168         struct intel_hdcp *hdcp = &connector->hdcp;
169         bool capable = false;
170
171         /* I915 support for HDCP2.2 */
172         if (!hdcp->hdcp2_supported)
173                 return false;
174
175         /* If MTL+ make sure gsc is loaded and proxy is setup */
176         if (intel_hdcp_gsc_cs_required(i915)) {
177                 struct intel_gt *gt = i915->media_gt;
178                 struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
179
180                 if (!gsc || !intel_uc_fw_is_running(&gsc->fw))
181                         return false;
182         }
183
184         /* MEI/GSC interface is solid depending on which is used */
185         mutex_lock(&i915->display.hdcp.hdcp_mutex);
186         if (!i915->display.hdcp.comp_added ||  !i915->display.hdcp.arbiter) {
187                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
188                 return false;
189         }
190         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
191
192         /* Sink's capability for HDCP2.2 */
193         hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
194
195         return capable;
196 }
197
198 static bool intel_hdcp_in_use(struct drm_i915_private *i915,
199                               enum transcoder cpu_transcoder, enum port port)
200 {
201         return intel_de_read(i915,
202                              HDCP_STATUS(i915, cpu_transcoder, port)) &
203                 HDCP_STATUS_ENC;
204 }
205
206 static bool intel_hdcp2_in_use(struct drm_i915_private *i915,
207                                enum transcoder cpu_transcoder, enum port port)
208 {
209         return intel_de_read(i915,
210                              HDCP2_STATUS(i915, cpu_transcoder, port)) &
211                 LINK_ENCRYPTION_STATUS;
212 }
213
214 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
215                                     const struct intel_hdcp_shim *shim)
216 {
217         int ret, read_ret;
218         bool ksv_ready;
219
220         /* Poll for ksv list ready (spec says max time allowed is 5s) */
221         ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
222                                                          &ksv_ready),
223                          read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
224                          100 * 1000);
225         if (ret)
226                 return ret;
227         if (read_ret)
228                 return read_ret;
229         if (!ksv_ready)
230                 return -ETIMEDOUT;
231
232         return 0;
233 }
234
235 static bool hdcp_key_loadable(struct drm_i915_private *i915)
236 {
237         enum i915_power_well_id id;
238         intel_wakeref_t wakeref;
239         bool enabled = false;
240
241         /*
242          * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
243          * On all BXT+, SW can load the keys only when the PW#1 is turned on.
244          */
245         if (IS_HASWELL(i915) || IS_BROADWELL(i915))
246                 id = HSW_DISP_PW_GLOBAL;
247         else
248                 id = SKL_DISP_PW_1;
249
250         /* PG1 (power well #1) needs to be enabled */
251         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
252                 enabled = intel_display_power_well_is_enabled(i915, id);
253
254         /*
255          * Another req for hdcp key loadability is enabled state of pll for
256          * cdclk. Without active crtc we wont land here. So we are assuming that
257          * cdclk is already on.
258          */
259
260         return enabled;
261 }
262
263 static void intel_hdcp_clear_keys(struct drm_i915_private *i915)
264 {
265         intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
266         intel_de_write(i915, HDCP_KEY_STATUS,
267                        HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
268 }
269
270 static int intel_hdcp_load_keys(struct drm_i915_private *i915)
271 {
272         int ret;
273         u32 val;
274
275         val = intel_de_read(i915, HDCP_KEY_STATUS);
276         if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
277                 return 0;
278
279         /*
280          * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
281          * out of reset. So if Key is not already loaded, its an error state.
282          */
283         if (IS_HASWELL(i915) || IS_BROADWELL(i915))
284                 if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
285                         return -ENXIO;
286
287         /*
288          * Initiate loading the HDCP key from fuses.
289          *
290          * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
291          * version 9 platforms (minus BXT) differ in the key load trigger
292          * process from other platforms. These platforms use the GT Driver
293          * Mailbox interface.
294          */
295         if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) {
296                 ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
297                 if (ret) {
298                         drm_err(&i915->drm,
299                                 "Failed to initiate HDCP key load (%d)\n",
300                                 ret);
301                         return ret;
302                 }
303         } else {
304                 intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
305         }
306
307         /* Wait for the keys to load (500us) */
308         ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS,
309                                         HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
310                                         10, 1, &val);
311         if (ret)
312                 return ret;
313         else if (!(val & HDCP_KEY_LOAD_STATUS))
314                 return -ENXIO;
315
316         /* Send Aksv over to PCH display for use in authentication */
317         intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
318
319         return 0;
320 }
321
322 /* Returns updated SHA-1 index */
323 static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text)
324 {
325         intel_de_write(i915, HDCP_SHA_TEXT, sha_text);
326         if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
327                 drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n");
328                 return -ETIMEDOUT;
329         }
330         return 0;
331 }
332
333 static
334 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
335                                 enum transcoder cpu_transcoder, enum port port)
336 {
337         if (DISPLAY_VER(i915) >= 12) {
338                 switch (cpu_transcoder) {
339                 case TRANSCODER_A:
340                         return HDCP_TRANSA_REP_PRESENT |
341                                HDCP_TRANSA_SHA1_M0;
342                 case TRANSCODER_B:
343                         return HDCP_TRANSB_REP_PRESENT |
344                                HDCP_TRANSB_SHA1_M0;
345                 case TRANSCODER_C:
346                         return HDCP_TRANSC_REP_PRESENT |
347                                HDCP_TRANSC_SHA1_M0;
348                 case TRANSCODER_D:
349                         return HDCP_TRANSD_REP_PRESENT |
350                                HDCP_TRANSD_SHA1_M0;
351                 default:
352                         drm_err(&i915->drm, "Unknown transcoder %d\n",
353                                 cpu_transcoder);
354                         return -EINVAL;
355                 }
356         }
357
358         switch (port) {
359         case PORT_A:
360                 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
361         case PORT_B:
362                 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
363         case PORT_C:
364                 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
365         case PORT_D:
366                 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
367         case PORT_E:
368                 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
369         default:
370                 drm_err(&i915->drm, "Unknown port %d\n", port);
371                 return -EINVAL;
372         }
373 }
374
375 static
376 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
377                                 const struct intel_hdcp_shim *shim,
378                                 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
379 {
380         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
381         struct drm_i915_private *i915 = to_i915(connector->base.dev);
382         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
383         enum port port = dig_port->base.port;
384         u32 vprime, sha_text, sha_leftovers, rep_ctl;
385         int ret, i, j, sha_idx;
386
387         /* Process V' values from the receiver */
388         for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
389                 ret = shim->read_v_prime_part(dig_port, i, &vprime);
390                 if (ret)
391                         return ret;
392                 intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime);
393         }
394
395         /*
396          * We need to write the concatenation of all device KSVs, BINFO (DP) ||
397          * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
398          * stream is written via the HDCP_SHA_TEXT register in 32-bit
399          * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
400          * index will keep track of our progress through the 64 bytes as well as
401          * helping us work the 40-bit KSVs through our 32-bit register.
402          *
403          * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
404          */
405         sha_idx = 0;
406         sha_text = 0;
407         sha_leftovers = 0;
408         rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port);
409         intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
410         for (i = 0; i < num_downstream; i++) {
411                 unsigned int sha_empty;
412                 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
413
414                 /* Fill up the empty slots in sha_text and write it out */
415                 sha_empty = sizeof(sha_text) - sha_leftovers;
416                 for (j = 0; j < sha_empty; j++) {
417                         u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
418                         sha_text |= ksv[j] << off;
419                 }
420
421                 ret = intel_write_sha_text(i915, sha_text);
422                 if (ret < 0)
423                         return ret;
424
425                 /* Programming guide writes this every 64 bytes */
426                 sha_idx += sizeof(sha_text);
427                 if (!(sha_idx % 64))
428                         intel_de_write(i915, HDCP_REP_CTL,
429                                        rep_ctl | HDCP_SHA1_TEXT_32);
430
431                 /* Store the leftover bytes from the ksv in sha_text */
432                 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
433                 sha_text = 0;
434                 for (j = 0; j < sha_leftovers; j++)
435                         sha_text |= ksv[sha_empty + j] <<
436                                         ((sizeof(sha_text) - j - 1) * 8);
437
438                 /*
439                  * If we still have room in sha_text for more data, continue.
440                  * Otherwise, write it out immediately.
441                  */
442                 if (sizeof(sha_text) > sha_leftovers)
443                         continue;
444
445                 ret = intel_write_sha_text(i915, sha_text);
446                 if (ret < 0)
447                         return ret;
448                 sha_leftovers = 0;
449                 sha_text = 0;
450                 sha_idx += sizeof(sha_text);
451         }
452
453         /*
454          * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
455          * bytes are leftover from the last ksv, we might be able to fit them
456          * all in sha_text (first 2 cases), or we might need to split them up
457          * into 2 writes (last 2 cases).
458          */
459         if (sha_leftovers == 0) {
460                 /* Write 16 bits of text, 16 bits of M0 */
461                 intel_de_write(i915, HDCP_REP_CTL,
462                                rep_ctl | HDCP_SHA1_TEXT_16);
463                 ret = intel_write_sha_text(i915,
464                                            bstatus[0] << 8 | bstatus[1]);
465                 if (ret < 0)
466                         return ret;
467                 sha_idx += sizeof(sha_text);
468
469                 /* Write 32 bits of M0 */
470                 intel_de_write(i915, HDCP_REP_CTL,
471                                rep_ctl | HDCP_SHA1_TEXT_0);
472                 ret = intel_write_sha_text(i915, 0);
473                 if (ret < 0)
474                         return ret;
475                 sha_idx += sizeof(sha_text);
476
477                 /* Write 16 bits of M0 */
478                 intel_de_write(i915, HDCP_REP_CTL,
479                                rep_ctl | HDCP_SHA1_TEXT_16);
480                 ret = intel_write_sha_text(i915, 0);
481                 if (ret < 0)
482                         return ret;
483                 sha_idx += sizeof(sha_text);
484
485         } else if (sha_leftovers == 1) {
486                 /* Write 24 bits of text, 8 bits of M0 */
487                 intel_de_write(i915, HDCP_REP_CTL,
488                                rep_ctl | HDCP_SHA1_TEXT_24);
489                 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
490                 /* Only 24-bits of data, must be in the LSB */
491                 sha_text = (sha_text & 0xffffff00) >> 8;
492                 ret = intel_write_sha_text(i915, sha_text);
493                 if (ret < 0)
494                         return ret;
495                 sha_idx += sizeof(sha_text);
496
497                 /* Write 32 bits of M0 */
498                 intel_de_write(i915, HDCP_REP_CTL,
499                                rep_ctl | HDCP_SHA1_TEXT_0);
500                 ret = intel_write_sha_text(i915, 0);
501                 if (ret < 0)
502                         return ret;
503                 sha_idx += sizeof(sha_text);
504
505                 /* Write 24 bits of M0 */
506                 intel_de_write(i915, HDCP_REP_CTL,
507                                rep_ctl | HDCP_SHA1_TEXT_8);
508                 ret = intel_write_sha_text(i915, 0);
509                 if (ret < 0)
510                         return ret;
511                 sha_idx += sizeof(sha_text);
512
513         } else if (sha_leftovers == 2) {
514                 /* Write 32 bits of text */
515                 intel_de_write(i915, HDCP_REP_CTL,
516                                rep_ctl | HDCP_SHA1_TEXT_32);
517                 sha_text |= bstatus[0] << 8 | bstatus[1];
518                 ret = intel_write_sha_text(i915, sha_text);
519                 if (ret < 0)
520                         return ret;
521                 sha_idx += sizeof(sha_text);
522
523                 /* Write 64 bits of M0 */
524                 intel_de_write(i915, HDCP_REP_CTL,
525                                rep_ctl | HDCP_SHA1_TEXT_0);
526                 for (i = 0; i < 2; i++) {
527                         ret = intel_write_sha_text(i915, 0);
528                         if (ret < 0)
529                                 return ret;
530                         sha_idx += sizeof(sha_text);
531                 }
532
533                 /*
534                  * Terminate the SHA-1 stream by hand. For the other leftover
535                  * cases this is appended by the hardware.
536                  */
537                 intel_de_write(i915, HDCP_REP_CTL,
538                                rep_ctl | HDCP_SHA1_TEXT_32);
539                 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
540                 ret = intel_write_sha_text(i915, sha_text);
541                 if (ret < 0)
542                         return ret;
543                 sha_idx += sizeof(sha_text);
544         } else if (sha_leftovers == 3) {
545                 /* Write 32 bits of text (filled from LSB) */
546                 intel_de_write(i915, HDCP_REP_CTL,
547                                rep_ctl | HDCP_SHA1_TEXT_32);
548                 sha_text |= bstatus[0];
549                 ret = intel_write_sha_text(i915, sha_text);
550                 if (ret < 0)
551                         return ret;
552                 sha_idx += sizeof(sha_text);
553
554                 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
555                 intel_de_write(i915, HDCP_REP_CTL,
556                                rep_ctl | HDCP_SHA1_TEXT_8);
557                 ret = intel_write_sha_text(i915, bstatus[1]);
558                 if (ret < 0)
559                         return ret;
560                 sha_idx += sizeof(sha_text);
561
562                 /* Write 32 bits of M0 */
563                 intel_de_write(i915, HDCP_REP_CTL,
564                                rep_ctl | HDCP_SHA1_TEXT_0);
565                 ret = intel_write_sha_text(i915, 0);
566                 if (ret < 0)
567                         return ret;
568                 sha_idx += sizeof(sha_text);
569
570                 /* Write 8 bits of M0 */
571                 intel_de_write(i915, HDCP_REP_CTL,
572                                rep_ctl | HDCP_SHA1_TEXT_24);
573                 ret = intel_write_sha_text(i915, 0);
574                 if (ret < 0)
575                         return ret;
576                 sha_idx += sizeof(sha_text);
577         } else {
578                 drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n",
579                             sha_leftovers);
580                 return -EINVAL;
581         }
582
583         intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
584         /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
585         while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
586                 ret = intel_write_sha_text(i915, 0);
587                 if (ret < 0)
588                         return ret;
589                 sha_idx += sizeof(sha_text);
590         }
591
592         /*
593          * Last write gets the length of the concatenation in bits. That is:
594          *  - 5 bytes per device
595          *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
596          */
597         sha_text = (num_downstream * 5 + 10) * 8;
598         ret = intel_write_sha_text(i915, sha_text);
599         if (ret < 0)
600                 return ret;
601
602         /* Tell the HW we're done with the hash and wait for it to ACK */
603         intel_de_write(i915, HDCP_REP_CTL,
604                        rep_ctl | HDCP_SHA1_COMPLETE_HASH);
605         if (intel_de_wait_for_set(i915, HDCP_REP_CTL,
606                                   HDCP_SHA1_COMPLETE, 1)) {
607                 drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n");
608                 return -ETIMEDOUT;
609         }
610         if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
611                 drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n");
612                 return -ENXIO;
613         }
614
615         return 0;
616 }
617
618 /* Implements Part 2 of the HDCP authorization procedure */
619 static
620 int intel_hdcp_auth_downstream(struct intel_connector *connector)
621 {
622         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
623         struct drm_i915_private *i915 = to_i915(connector->base.dev);
624         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
625         u8 bstatus[2], num_downstream, *ksv_fifo;
626         int ret, i, tries = 3;
627
628         ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
629         if (ret) {
630                 drm_dbg_kms(&i915->drm,
631                             "KSV list failed to become ready (%d)\n", ret);
632                 return ret;
633         }
634
635         ret = shim->read_bstatus(dig_port, bstatus);
636         if (ret)
637                 return ret;
638
639         if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
640             DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
641                 drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n");
642                 return -EPERM;
643         }
644
645         /*
646          * When repeater reports 0 device count, HDCP1.4 spec allows disabling
647          * the HDCP encryption. That implies that repeater can't have its own
648          * display. As there is no consumption of encrypted content in the
649          * repeater with 0 downstream devices, we are failing the
650          * authentication.
651          */
652         num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
653         if (num_downstream == 0) {
654                 drm_dbg_kms(&i915->drm,
655                             "Repeater with zero downstream devices\n");
656                 return -EINVAL;
657         }
658
659         ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
660         if (!ksv_fifo) {
661                 drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n");
662                 return -ENOMEM;
663         }
664
665         ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
666         if (ret)
667                 goto err;
668
669         if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo,
670                                         num_downstream) > 0) {
671                 drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n");
672                 ret = -EPERM;
673                 goto err;
674         }
675
676         /*
677          * When V prime mismatches, DP Spec mandates re-read of
678          * V prime atleast twice.
679          */
680         for (i = 0; i < tries; i++) {
681                 ret = intel_hdcp_validate_v_prime(connector, shim,
682                                                   ksv_fifo, num_downstream,
683                                                   bstatus);
684                 if (!ret)
685                         break;
686         }
687
688         if (i == tries) {
689                 drm_dbg_kms(&i915->drm,
690                             "V Prime validation failed.(%d)\n", ret);
691                 goto err;
692         }
693
694         drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n",
695                     num_downstream);
696         ret = 0;
697 err:
698         kfree(ksv_fifo);
699         return ret;
700 }
701
702 /* Implements Part 1 of the HDCP authorization procedure */
703 static int intel_hdcp_auth(struct intel_connector *connector)
704 {
705         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
706         struct drm_i915_private *i915 = to_i915(connector->base.dev);
707         struct intel_hdcp *hdcp = &connector->hdcp;
708         const struct intel_hdcp_shim *shim = hdcp->shim;
709         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
710         enum port port = dig_port->base.port;
711         unsigned long r0_prime_gen_start;
712         int ret, i, tries = 2;
713         union {
714                 u32 reg[2];
715                 u8 shim[DRM_HDCP_AN_LEN];
716         } an;
717         union {
718                 u32 reg[2];
719                 u8 shim[DRM_HDCP_KSV_LEN];
720         } bksv;
721         union {
722                 u32 reg;
723                 u8 shim[DRM_HDCP_RI_LEN];
724         } ri;
725         bool repeater_present, hdcp_capable;
726
727         /*
728          * Detects whether the display is HDCP capable. Although we check for
729          * valid Bksv below, the HDCP over DP spec requires that we check
730          * whether the display supports HDCP before we write An. For HDMI
731          * displays, this is not necessary.
732          */
733         if (shim->hdcp_capable) {
734                 ret = shim->hdcp_capable(dig_port, &hdcp_capable);
735                 if (ret)
736                         return ret;
737                 if (!hdcp_capable) {
738                         drm_dbg_kms(&i915->drm,
739                                     "Panel is not HDCP capable\n");
740                         return -EINVAL;
741                 }
742         }
743
744         /* Initialize An with 2 random values and acquire it */
745         for (i = 0; i < 2; i++)
746                 intel_de_write(i915,
747                                HDCP_ANINIT(i915, cpu_transcoder, port),
748                                get_random_u32());
749         intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
750                        HDCP_CONF_CAPTURE_AN);
751
752         /* Wait for An to be acquired */
753         if (intel_de_wait_for_set(i915,
754                                   HDCP_STATUS(i915, cpu_transcoder, port),
755                                   HDCP_STATUS_AN_READY, 1)) {
756                 drm_err(&i915->drm, "Timed out waiting for An\n");
757                 return -ETIMEDOUT;
758         }
759
760         an.reg[0] = intel_de_read(i915,
761                                   HDCP_ANLO(i915, cpu_transcoder, port));
762         an.reg[1] = intel_de_read(i915,
763                                   HDCP_ANHI(i915, cpu_transcoder, port));
764         ret = shim->write_an_aksv(dig_port, an.shim);
765         if (ret)
766                 return ret;
767
768         r0_prime_gen_start = jiffies;
769
770         memset(&bksv, 0, sizeof(bksv));
771
772         ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
773         if (ret < 0)
774                 return ret;
775
776         if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) {
777                 drm_err(&i915->drm, "BKSV is revoked\n");
778                 return -EPERM;
779         }
780
781         intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port),
782                        bksv.reg[0]);
783         intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port),
784                        bksv.reg[1]);
785
786         ret = shim->repeater_present(dig_port, &repeater_present);
787         if (ret)
788                 return ret;
789         if (repeater_present)
790                 intel_de_write(i915, HDCP_REP_CTL,
791                                intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port));
792
793         ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
794         if (ret)
795                 return ret;
796
797         intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
798                        HDCP_CONF_AUTH_AND_ENC);
799
800         /* Wait for R0 ready */
801         if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
802                      (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
803                 drm_err(&i915->drm, "Timed out waiting for R0 ready\n");
804                 return -ETIMEDOUT;
805         }
806
807         /*
808          * Wait for R0' to become available. The spec says 100ms from Aksv, but
809          * some monitors can take longer than this. We'll set the timeout at
810          * 300ms just to be sure.
811          *
812          * On DP, there's an R0_READY bit available but no such bit
813          * exists on HDMI. Since the upper-bound is the same, we'll just do
814          * the stupid thing instead of polling on one and not the other.
815          */
816         wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
817
818         tries = 3;
819
820         /*
821          * DP HDCP Spec mandates the two more reattempt to read R0, incase
822          * of R0 mismatch.
823          */
824         for (i = 0; i < tries; i++) {
825                 ri.reg = 0;
826                 ret = shim->read_ri_prime(dig_port, ri.shim);
827                 if (ret)
828                         return ret;
829                 intel_de_write(i915,
830                                HDCP_RPRIME(i915, cpu_transcoder, port),
831                                ri.reg);
832
833                 /* Wait for Ri prime match */
834                 if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
835                               (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
836                         break;
837         }
838
839         if (i == tries) {
840                 drm_dbg_kms(&i915->drm,
841                             "Timed out waiting for Ri prime match (%x)\n",
842                             intel_de_read(i915,
843                                           HDCP_STATUS(i915, cpu_transcoder, port)));
844                 return -ETIMEDOUT;
845         }
846
847         /* Wait for encryption confirmation */
848         if (intel_de_wait_for_set(i915,
849                                   HDCP_STATUS(i915, cpu_transcoder, port),
850                                   HDCP_STATUS_ENC,
851                                   HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
852                 drm_err(&i915->drm, "Timed out waiting for encryption\n");
853                 return -ETIMEDOUT;
854         }
855
856         /* DP MST Auth Part 1 Step 2.a and Step 2.b */
857         if (shim->stream_encryption) {
858                 ret = shim->stream_encryption(connector, true);
859                 if (ret) {
860                         drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
861                                 connector->base.name, connector->base.base.id);
862                         return ret;
863                 }
864                 drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
865                             transcoder_name(hdcp->stream_transcoder));
866         }
867
868         if (repeater_present)
869                 return intel_hdcp_auth_downstream(connector);
870
871         drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n");
872         return 0;
873 }
874
875 static int _intel_hdcp_disable(struct intel_connector *connector)
876 {
877         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
878         struct drm_i915_private *i915 = to_i915(connector->base.dev);
879         struct intel_hdcp *hdcp = &connector->hdcp;
880         enum port port = dig_port->base.port;
881         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
882         u32 repeater_ctl;
883         int ret;
884
885         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n",
886                     connector->base.name, connector->base.base.id);
887
888         if (hdcp->shim->stream_encryption) {
889                 ret = hdcp->shim->stream_encryption(connector, false);
890                 if (ret) {
891                         drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
892                                 connector->base.name, connector->base.base.id);
893                         return ret;
894                 }
895                 drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
896                             transcoder_name(hdcp->stream_transcoder));
897                 /*
898                  * If there are other connectors on this port using HDCP,
899                  * don't disable it until it disabled HDCP encryption for
900                  * all connectors in MST topology.
901                  */
902                 if (dig_port->num_hdcp_streams > 0)
903                         return 0;
904         }
905
906         hdcp->hdcp_encrypted = false;
907         intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0);
908         if (intel_de_wait_for_clear(i915,
909                                     HDCP_STATUS(i915, cpu_transcoder, port),
910                                     ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
911                 drm_err(&i915->drm,
912                         "Failed to disable HDCP, timeout clearing status\n");
913                 return -ETIMEDOUT;
914         }
915
916         repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder,
917                                                    port);
918         intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0);
919
920         ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
921         if (ret) {
922                 drm_err(&i915->drm, "Failed to disable HDCP signalling\n");
923                 return ret;
924         }
925
926         drm_dbg_kms(&i915->drm, "HDCP is disabled\n");
927         return 0;
928 }
929
930 static int _intel_hdcp_enable(struct intel_connector *connector)
931 {
932         struct drm_i915_private *i915 = to_i915(connector->base.dev);
933         struct intel_hdcp *hdcp = &connector->hdcp;
934         int i, ret, tries = 3;
935
936         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n",
937                     connector->base.name, connector->base.base.id);
938
939         if (!hdcp_key_loadable(i915)) {
940                 drm_err(&i915->drm, "HDCP key Load is not possible\n");
941                 return -ENXIO;
942         }
943
944         for (i = 0; i < KEY_LOAD_TRIES; i++) {
945                 ret = intel_hdcp_load_keys(i915);
946                 if (!ret)
947                         break;
948                 intel_hdcp_clear_keys(i915);
949         }
950         if (ret) {
951                 drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n",
952                         ret);
953                 return ret;
954         }
955
956         /* Incase of authentication failures, HDCP spec expects reauth. */
957         for (i = 0; i < tries; i++) {
958                 ret = intel_hdcp_auth(connector);
959                 if (!ret) {
960                         hdcp->hdcp_encrypted = true;
961                         return 0;
962                 }
963
964                 drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret);
965
966                 /* Ensuring HDCP encryption and signalling are stopped. */
967                 _intel_hdcp_disable(connector);
968         }
969
970         drm_dbg_kms(&i915->drm,
971                     "HDCP authentication failed (%d tries/%d)\n", tries, ret);
972         return ret;
973 }
974
975 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
976 {
977         return container_of(hdcp, struct intel_connector, hdcp);
978 }
979
980 static void intel_hdcp_update_value(struct intel_connector *connector,
981                                     u64 value, bool update_property)
982 {
983         struct drm_device *dev = connector->base.dev;
984         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
985         struct intel_hdcp *hdcp = &connector->hdcp;
986
987         drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
988
989         if (hdcp->value == value)
990                 return;
991
992         drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
993
994         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
995                 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
996                         dig_port->num_hdcp_streams--;
997         } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
998                 dig_port->num_hdcp_streams++;
999         }
1000
1001         hdcp->value = value;
1002         if (update_property) {
1003                 drm_connector_get(&connector->base);
1004                 schedule_work(&hdcp->prop_work);
1005         }
1006 }
1007
1008 /* Implements Part 3 of the HDCP authorization procedure */
1009 static int intel_hdcp_check_link(struct intel_connector *connector)
1010 {
1011         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1012         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1013         struct intel_hdcp *hdcp = &connector->hdcp;
1014         enum port port = dig_port->base.port;
1015         enum transcoder cpu_transcoder;
1016         int ret = 0;
1017
1018         mutex_lock(&hdcp->mutex);
1019         mutex_lock(&dig_port->hdcp_mutex);
1020
1021         cpu_transcoder = hdcp->cpu_transcoder;
1022
1023         /* Check_link valid only when HDCP1.4 is enabled */
1024         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1025             !hdcp->hdcp_encrypted) {
1026                 ret = -EINVAL;
1027                 goto out;
1028         }
1029
1030         if (drm_WARN_ON(&i915->drm,
1031                         !intel_hdcp_in_use(i915, cpu_transcoder, port))) {
1032                 drm_err(&i915->drm,
1033                         "%s:%d HDCP link stopped encryption,%x\n",
1034                         connector->base.name, connector->base.base.id,
1035                         intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
1036                 ret = -ENXIO;
1037                 intel_hdcp_update_value(connector,
1038                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1039                                         true);
1040                 goto out;
1041         }
1042
1043         if (hdcp->shim->check_link(dig_port, connector)) {
1044                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1045                         intel_hdcp_update_value(connector,
1046                                 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1047                 }
1048                 goto out;
1049         }
1050
1051         drm_dbg_kms(&i915->drm,
1052                     "[%s:%d] HDCP link failed, retrying authentication\n",
1053                     connector->base.name, connector->base.base.id);
1054
1055         ret = _intel_hdcp_disable(connector);
1056         if (ret) {
1057                 drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret);
1058                 intel_hdcp_update_value(connector,
1059                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1060                                         true);
1061                 goto out;
1062         }
1063
1064         ret = _intel_hdcp_enable(connector);
1065         if (ret) {
1066                 drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret);
1067                 intel_hdcp_update_value(connector,
1068                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1069                                         true);
1070                 goto out;
1071         }
1072
1073 out:
1074         mutex_unlock(&dig_port->hdcp_mutex);
1075         mutex_unlock(&hdcp->mutex);
1076         return ret;
1077 }
1078
1079 static void intel_hdcp_prop_work(struct work_struct *work)
1080 {
1081         struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1082                                                prop_work);
1083         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1084         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1085
1086         drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
1087         mutex_lock(&hdcp->mutex);
1088
1089         /*
1090          * This worker is only used to flip between ENABLED/DESIRED. Either of
1091          * those to UNDESIRED is handled by core. If value == UNDESIRED,
1092          * we're running just after hdcp has been disabled, so just exit
1093          */
1094         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1095                 drm_hdcp_update_content_protection(&connector->base,
1096                                                    hdcp->value);
1097
1098         mutex_unlock(&hdcp->mutex);
1099         drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1100
1101         drm_connector_put(&connector->base);
1102 }
1103
1104 bool is_hdcp_supported(struct drm_i915_private *i915, enum port port)
1105 {
1106         return DISPLAY_RUNTIME_INFO(i915)->has_hdcp &&
1107                 (DISPLAY_VER(i915) >= 12 || port < PORT_E);
1108 }
1109
1110 static int
1111 hdcp2_prepare_ake_init(struct intel_connector *connector,
1112                        struct hdcp2_ake_init *ake_data)
1113 {
1114         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1115         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1116         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1117         struct i915_hdcp_arbiter *arbiter;
1118         int ret;
1119
1120         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1121         arbiter = i915->display.hdcp.arbiter;
1122
1123         if (!arbiter || !arbiter->ops) {
1124                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1125                 return -EINVAL;
1126         }
1127
1128         ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1129         if (ret)
1130                 drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n",
1131                             ret);
1132         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1133
1134         return ret;
1135 }
1136
1137 static int
1138 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1139                                 struct hdcp2_ake_send_cert *rx_cert,
1140                                 bool *paired,
1141                                 struct hdcp2_ake_no_stored_km *ek_pub_km,
1142                                 size_t *msg_sz)
1143 {
1144         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1145         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1146         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1147         struct i915_hdcp_arbiter *arbiter;
1148         int ret;
1149
1150         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1151         arbiter = i915->display.hdcp.arbiter;
1152
1153         if (!arbiter || !arbiter->ops) {
1154                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1155                 return -EINVAL;
1156         }
1157
1158         ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1159                                                          rx_cert, paired,
1160                                                          ek_pub_km, msg_sz);
1161         if (ret < 0)
1162                 drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n",
1163                             ret);
1164         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1165
1166         return ret;
1167 }
1168
1169 static int hdcp2_verify_hprime(struct intel_connector *connector,
1170                                struct hdcp2_ake_send_hprime *rx_hprime)
1171 {
1172         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1173         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1174         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1175         struct i915_hdcp_arbiter *arbiter;
1176         int ret;
1177
1178         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1179         arbiter = i915->display.hdcp.arbiter;
1180
1181         if (!arbiter || !arbiter->ops) {
1182                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1183                 return -EINVAL;
1184         }
1185
1186         ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1187         if (ret < 0)
1188                 drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret);
1189         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1190
1191         return ret;
1192 }
1193
1194 static int
1195 hdcp2_store_pairing_info(struct intel_connector *connector,
1196                          struct hdcp2_ake_send_pairing_info *pairing_info)
1197 {
1198         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1199         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1200         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1201         struct i915_hdcp_arbiter *arbiter;
1202         int ret;
1203
1204         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1205         arbiter = i915->display.hdcp.arbiter;
1206
1207         if (!arbiter || !arbiter->ops) {
1208                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1209                 return -EINVAL;
1210         }
1211
1212         ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1213         if (ret < 0)
1214                 drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n",
1215                             ret);
1216         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1217
1218         return ret;
1219 }
1220
1221 static int
1222 hdcp2_prepare_lc_init(struct intel_connector *connector,
1223                       struct hdcp2_lc_init *lc_init)
1224 {
1225         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1226         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1227         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1228         struct i915_hdcp_arbiter *arbiter;
1229         int ret;
1230
1231         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1232         arbiter = i915->display.hdcp.arbiter;
1233
1234         if (!arbiter || !arbiter->ops) {
1235                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1236                 return -EINVAL;
1237         }
1238
1239         ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1240         if (ret < 0)
1241                 drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n",
1242                             ret);
1243         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1244
1245         return ret;
1246 }
1247
1248 static int
1249 hdcp2_verify_lprime(struct intel_connector *connector,
1250                     struct hdcp2_lc_send_lprime *rx_lprime)
1251 {
1252         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1253         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1254         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1255         struct i915_hdcp_arbiter *arbiter;
1256         int ret;
1257
1258         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1259         arbiter = i915->display.hdcp.arbiter;
1260
1261         if (!arbiter || !arbiter->ops) {
1262                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1263                 return -EINVAL;
1264         }
1265
1266         ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1267         if (ret < 0)
1268                 drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n",
1269                             ret);
1270         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1271
1272         return ret;
1273 }
1274
1275 static int hdcp2_prepare_skey(struct intel_connector *connector,
1276                               struct hdcp2_ske_send_eks *ske_data)
1277 {
1278         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1279         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1280         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1281         struct i915_hdcp_arbiter *arbiter;
1282         int ret;
1283
1284         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1285         arbiter = i915->display.hdcp.arbiter;
1286
1287         if (!arbiter || !arbiter->ops) {
1288                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1289                 return -EINVAL;
1290         }
1291
1292         ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1293         if (ret < 0)
1294                 drm_dbg_kms(&i915->drm, "Get session key failed. %d\n",
1295                             ret);
1296         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1297
1298         return ret;
1299 }
1300
1301 static int
1302 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1303                                       struct hdcp2_rep_send_receiverid_list
1304                                                                 *rep_topology,
1305                                       struct hdcp2_rep_send_ack *rep_send_ack)
1306 {
1307         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1308         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1309         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1310         struct i915_hdcp_arbiter *arbiter;
1311         int ret;
1312
1313         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1314         arbiter = i915->display.hdcp.arbiter;
1315
1316         if (!arbiter || !arbiter->ops) {
1317                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1318                 return -EINVAL;
1319         }
1320
1321         ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1322                                                             data,
1323                                                             rep_topology,
1324                                                             rep_send_ack);
1325         if (ret < 0)
1326                 drm_dbg_kms(&i915->drm,
1327                             "Verify rep topology failed. %d\n", ret);
1328         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1329
1330         return ret;
1331 }
1332
1333 static int
1334 hdcp2_verify_mprime(struct intel_connector *connector,
1335                     struct hdcp2_rep_stream_ready *stream_ready)
1336 {
1337         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1338         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1339         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1340         struct i915_hdcp_arbiter *arbiter;
1341         int ret;
1342
1343         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1344         arbiter = i915->display.hdcp.arbiter;
1345
1346         if (!arbiter || !arbiter->ops) {
1347                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1348                 return -EINVAL;
1349         }
1350
1351         ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1352         if (ret < 0)
1353                 drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret);
1354         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1355
1356         return ret;
1357 }
1358
1359 static int hdcp2_authenticate_port(struct intel_connector *connector)
1360 {
1361         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1362         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1363         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1364         struct i915_hdcp_arbiter *arbiter;
1365         int ret;
1366
1367         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1368         arbiter = i915->display.hdcp.arbiter;
1369
1370         if (!arbiter || !arbiter->ops) {
1371                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1372                 return -EINVAL;
1373         }
1374
1375         ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1376         if (ret < 0)
1377                 drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n",
1378                             ret);
1379         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1380
1381         return ret;
1382 }
1383
1384 static int hdcp2_close_session(struct intel_connector *connector)
1385 {
1386         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1387         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1388         struct i915_hdcp_arbiter *arbiter;
1389         int ret;
1390
1391         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1392         arbiter = i915->display.hdcp.arbiter;
1393
1394         if (!arbiter || !arbiter->ops) {
1395                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1396                 return -EINVAL;
1397         }
1398
1399         ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1400                                              &dig_port->hdcp_port_data);
1401         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1402
1403         return ret;
1404 }
1405
1406 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1407 {
1408         return hdcp2_close_session(connector);
1409 }
1410
1411 /* Authentication flow starts from here */
1412 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1413 {
1414         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1415         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1416         struct intel_hdcp *hdcp = &connector->hdcp;
1417         union {
1418                 struct hdcp2_ake_init ake_init;
1419                 struct hdcp2_ake_send_cert send_cert;
1420                 struct hdcp2_ake_no_stored_km no_stored_km;
1421                 struct hdcp2_ake_send_hprime send_hprime;
1422                 struct hdcp2_ake_send_pairing_info pairing_info;
1423         } msgs;
1424         const struct intel_hdcp_shim *shim = hdcp->shim;
1425         size_t size;
1426         int ret;
1427
1428         /* Init for seq_num */
1429         hdcp->seq_num_v = 0;
1430         hdcp->seq_num_m = 0;
1431
1432         ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1433         if (ret < 0)
1434                 return ret;
1435
1436         ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1437                                   sizeof(msgs.ake_init));
1438         if (ret < 0)
1439                 return ret;
1440
1441         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1442                                  &msgs.send_cert, sizeof(msgs.send_cert));
1443         if (ret < 0)
1444                 return ret;
1445
1446         if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1447                 drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n");
1448                 return -EINVAL;
1449         }
1450
1451         hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1452
1453         if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1454                                         msgs.send_cert.cert_rx.receiver_id,
1455                                         1) > 0) {
1456                 drm_err(&i915->drm, "Receiver ID is revoked\n");
1457                 return -EPERM;
1458         }
1459
1460         /*
1461          * Here msgs.no_stored_km will hold msgs corresponding to the km
1462          * stored also.
1463          */
1464         ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1465                                               &hdcp->is_paired,
1466                                               &msgs.no_stored_km, &size);
1467         if (ret < 0)
1468                 return ret;
1469
1470         ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1471         if (ret < 0)
1472                 return ret;
1473
1474         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1475                                  &msgs.send_hprime, sizeof(msgs.send_hprime));
1476         if (ret < 0)
1477                 return ret;
1478
1479         ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1480         if (ret < 0)
1481                 return ret;
1482
1483         if (!hdcp->is_paired) {
1484                 /* Pairing is required */
1485                 ret = shim->read_2_2_msg(dig_port,
1486                                          HDCP_2_2_AKE_SEND_PAIRING_INFO,
1487                                          &msgs.pairing_info,
1488                                          sizeof(msgs.pairing_info));
1489                 if (ret < 0)
1490                         return ret;
1491
1492                 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1493                 if (ret < 0)
1494                         return ret;
1495                 hdcp->is_paired = true;
1496         }
1497
1498         return 0;
1499 }
1500
1501 static int hdcp2_locality_check(struct intel_connector *connector)
1502 {
1503         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1504         struct intel_hdcp *hdcp = &connector->hdcp;
1505         union {
1506                 struct hdcp2_lc_init lc_init;
1507                 struct hdcp2_lc_send_lprime send_lprime;
1508         } msgs;
1509         const struct intel_hdcp_shim *shim = hdcp->shim;
1510         int tries = HDCP2_LC_RETRY_CNT, ret, i;
1511
1512         for (i = 0; i < tries; i++) {
1513                 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1514                 if (ret < 0)
1515                         continue;
1516
1517                 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1518                                       sizeof(msgs.lc_init));
1519                 if (ret < 0)
1520                         continue;
1521
1522                 ret = shim->read_2_2_msg(dig_port,
1523                                          HDCP_2_2_LC_SEND_LPRIME,
1524                                          &msgs.send_lprime,
1525                                          sizeof(msgs.send_lprime));
1526                 if (ret < 0)
1527                         continue;
1528
1529                 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1530                 if (!ret)
1531                         break;
1532         }
1533
1534         return ret;
1535 }
1536
1537 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1538 {
1539         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1540         struct intel_hdcp *hdcp = &connector->hdcp;
1541         struct hdcp2_ske_send_eks send_eks;
1542         int ret;
1543
1544         ret = hdcp2_prepare_skey(connector, &send_eks);
1545         if (ret < 0)
1546                 return ret;
1547
1548         ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1549                                         sizeof(send_eks));
1550         if (ret < 0)
1551                 return ret;
1552
1553         return 0;
1554 }
1555
1556 static
1557 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1558 {
1559         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1560         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1561         struct intel_hdcp *hdcp = &connector->hdcp;
1562         union {
1563                 struct hdcp2_rep_stream_manage stream_manage;
1564                 struct hdcp2_rep_stream_ready stream_ready;
1565         } msgs;
1566         const struct intel_hdcp_shim *shim = hdcp->shim;
1567         int ret, streams_size_delta, i;
1568
1569         if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1570                 return -ERANGE;
1571
1572         /* Prepare RepeaterAuth_Stream_Manage msg */
1573         msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1574         drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1575
1576         msgs.stream_manage.k = cpu_to_be16(data->k);
1577
1578         for (i = 0; i < data->k; i++) {
1579                 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1580                 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1581         }
1582
1583         streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1584                                 sizeof(struct hdcp2_streamid_type);
1585         /* Send it to Repeater */
1586         ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1587                                   sizeof(msgs.stream_manage) - streams_size_delta);
1588         if (ret < 0)
1589                 goto out;
1590
1591         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1592                                  &msgs.stream_ready, sizeof(msgs.stream_ready));
1593         if (ret < 0)
1594                 goto out;
1595
1596         data->seq_num_m = hdcp->seq_num_m;
1597
1598         ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1599
1600 out:
1601         hdcp->seq_num_m++;
1602
1603         return ret;
1604 }
1605
1606 static
1607 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1608 {
1609         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1610         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1611         struct intel_hdcp *hdcp = &connector->hdcp;
1612         union {
1613                 struct hdcp2_rep_send_receiverid_list recvid_list;
1614                 struct hdcp2_rep_send_ack rep_ack;
1615         } msgs;
1616         const struct intel_hdcp_shim *shim = hdcp->shim;
1617         u32 seq_num_v, device_cnt;
1618         u8 *rx_info;
1619         int ret;
1620
1621         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1622                                  &msgs.recvid_list, sizeof(msgs.recvid_list));
1623         if (ret < 0)
1624                 return ret;
1625
1626         rx_info = msgs.recvid_list.rx_info;
1627
1628         if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1629             HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1630                 drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n");
1631                 return -EINVAL;
1632         }
1633
1634         /*
1635          * MST topology is not Type 1 capable if it contains a downstream
1636          * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1637          */
1638         dig_port->hdcp_mst_type1_capable =
1639                 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1640                 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1641
1642         /* Converting and Storing the seq_num_v to local variable as DWORD */
1643         seq_num_v =
1644                 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1645
1646         if (!hdcp->hdcp2_encrypted && seq_num_v) {
1647                 drm_dbg_kms(&i915->drm,
1648                             "Non zero Seq_num_v at first RecvId_List msg\n");
1649                 return -EINVAL;
1650         }
1651
1652         if (seq_num_v < hdcp->seq_num_v) {
1653                 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1654                 drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n");
1655                 return -EINVAL;
1656         }
1657
1658         device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1659                       HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1660         if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1661                                         msgs.recvid_list.receiver_ids,
1662                                         device_cnt) > 0) {
1663                 drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n");
1664                 return -EPERM;
1665         }
1666
1667         ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1668                                                     &msgs.recvid_list,
1669                                                     &msgs.rep_ack);
1670         if (ret < 0)
1671                 return ret;
1672
1673         hdcp->seq_num_v = seq_num_v;
1674         ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1675                                   sizeof(msgs.rep_ack));
1676         if (ret < 0)
1677                 return ret;
1678
1679         return 0;
1680 }
1681
1682 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1683 {
1684         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1685         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1686         struct intel_hdcp *hdcp = &connector->hdcp;
1687         const struct intel_hdcp_shim *shim = hdcp->shim;
1688         int ret;
1689
1690         ret = hdcp2_authentication_key_exchange(connector);
1691         if (ret < 0) {
1692                 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1693                 return ret;
1694         }
1695
1696         ret = hdcp2_locality_check(connector);
1697         if (ret < 0) {
1698                 drm_dbg_kms(&i915->drm,
1699                             "Locality Check failed. Err : %d\n", ret);
1700                 return ret;
1701         }
1702
1703         ret = hdcp2_session_key_exchange(connector);
1704         if (ret < 0) {
1705                 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1706                 return ret;
1707         }
1708
1709         if (shim->config_stream_type) {
1710                 ret = shim->config_stream_type(dig_port,
1711                                                hdcp->is_repeater,
1712                                                hdcp->content_type);
1713                 if (ret < 0)
1714                         return ret;
1715         }
1716
1717         if (hdcp->is_repeater) {
1718                 ret = hdcp2_authenticate_repeater_topology(connector);
1719                 if (ret < 0) {
1720                         drm_dbg_kms(&i915->drm,
1721                                     "Repeater Auth Failed. Err: %d\n", ret);
1722                         return ret;
1723                 }
1724         }
1725
1726         return ret;
1727 }
1728
1729 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1730 {
1731         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1732         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1733         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1734         struct intel_hdcp *hdcp = &connector->hdcp;
1735         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1736         enum port port = dig_port->base.port;
1737         int ret = 0;
1738
1739         if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1740                             LINK_ENCRYPTION_STATUS)) {
1741                 drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1742                         connector->base.name, connector->base.base.id);
1743                 ret = -EPERM;
1744                 goto link_recover;
1745         }
1746
1747         if (hdcp->shim->stream_2_2_encryption) {
1748                 ret = hdcp->shim->stream_2_2_encryption(connector, true);
1749                 if (ret) {
1750                         drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1751                                 connector->base.name, connector->base.base.id);
1752                         return ret;
1753                 }
1754                 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1755                             transcoder_name(hdcp->stream_transcoder));
1756         }
1757
1758         return 0;
1759
1760 link_recover:
1761         if (hdcp2_deauthenticate_port(connector) < 0)
1762                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1763
1764         dig_port->hdcp_auth_status = false;
1765         data->k = 0;
1766
1767         return ret;
1768 }
1769
1770 static int hdcp2_enable_encryption(struct intel_connector *connector)
1771 {
1772         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1773         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1774         struct intel_hdcp *hdcp = &connector->hdcp;
1775         enum port port = dig_port->base.port;
1776         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1777         int ret;
1778
1779         drm_WARN_ON(&i915->drm,
1780                     intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1781                     LINK_ENCRYPTION_STATUS);
1782         if (hdcp->shim->toggle_signalling) {
1783                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1784                                                     true);
1785                 if (ret) {
1786                         drm_err(&i915->drm,
1787                                 "Failed to enable HDCP signalling. %d\n",
1788                                 ret);
1789                         return ret;
1790                 }
1791         }
1792
1793         if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1794             LINK_AUTH_STATUS)
1795                 /* Link is Authenticated. Now set for Encryption */
1796                 intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1797                              0, CTL_LINK_ENCRYPTION_REQ);
1798
1799         ret = intel_de_wait_for_set(i915,
1800                                     HDCP2_STATUS(i915, cpu_transcoder,
1801                                                  port),
1802                                     LINK_ENCRYPTION_STATUS,
1803                                     HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1804         dig_port->hdcp_auth_status = true;
1805
1806         return ret;
1807 }
1808
1809 static int hdcp2_disable_encryption(struct intel_connector *connector)
1810 {
1811         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1812         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1813         struct intel_hdcp *hdcp = &connector->hdcp;
1814         enum port port = dig_port->base.port;
1815         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1816         int ret;
1817
1818         drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1819                                       LINK_ENCRYPTION_STATUS));
1820
1821         intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1822                      CTL_LINK_ENCRYPTION_REQ, 0);
1823
1824         ret = intel_de_wait_for_clear(i915,
1825                                       HDCP2_STATUS(i915, cpu_transcoder,
1826                                                    port),
1827                                       LINK_ENCRYPTION_STATUS,
1828                                       HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1829         if (ret == -ETIMEDOUT)
1830                 drm_dbg_kms(&i915->drm, "Disable Encryption Timedout");
1831
1832         if (hdcp->shim->toggle_signalling) {
1833                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1834                                                     false);
1835                 if (ret) {
1836                         drm_err(&i915->drm,
1837                                 "Failed to disable HDCP signalling. %d\n",
1838                                 ret);
1839                         return ret;
1840                 }
1841         }
1842
1843         return ret;
1844 }
1845
1846 static int
1847 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1848 {
1849         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1850         int i, tries = 3, ret;
1851
1852         if (!connector->hdcp.is_repeater)
1853                 return 0;
1854
1855         for (i = 0; i < tries; i++) {
1856                 ret = _hdcp2_propagate_stream_management_info(connector);
1857                 if (!ret)
1858                         break;
1859
1860                 /* Lets restart the auth incase of seq_num_m roll over */
1861                 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1862                         drm_dbg_kms(&i915->drm,
1863                                     "seq_num_m roll over.(%d)\n", ret);
1864                         break;
1865                 }
1866
1867                 drm_dbg_kms(&i915->drm,
1868                             "HDCP2 stream management %d of %d Failed.(%d)\n",
1869                             i + 1, tries, ret);
1870         }
1871
1872         return ret;
1873 }
1874
1875 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1876 {
1877         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1878         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1879         int ret = 0, i, tries = 3;
1880
1881         for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1882                 ret = hdcp2_authenticate_sink(connector);
1883                 if (!ret) {
1884                         intel_hdcp_prepare_streams(connector);
1885
1886                         ret = hdcp2_propagate_stream_management_info(connector);
1887                         if (ret) {
1888                                 drm_dbg_kms(&i915->drm,
1889                                             "Stream management failed.(%d)\n",
1890                                             ret);
1891                                 break;
1892                         }
1893
1894                         ret = hdcp2_authenticate_port(connector);
1895                         if (!ret)
1896                                 break;
1897                         drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1898                                     ret);
1899                 }
1900
1901                 /* Clearing the mei hdcp session */
1902                 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1903                             i + 1, tries, ret);
1904                 if (hdcp2_deauthenticate_port(connector) < 0)
1905                         drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1906         }
1907
1908         if (!ret && !dig_port->hdcp_auth_status) {
1909                 /*
1910                  * Ensuring the required 200mSec min time interval between
1911                  * Session Key Exchange and encryption.
1912                  */
1913                 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1914                 ret = hdcp2_enable_encryption(connector);
1915                 if (ret < 0) {
1916                         drm_dbg_kms(&i915->drm,
1917                                     "Encryption Enable Failed.(%d)\n", ret);
1918                         if (hdcp2_deauthenticate_port(connector) < 0)
1919                                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1920                 }
1921         }
1922
1923         if (!ret)
1924                 ret = hdcp2_enable_stream_encryption(connector);
1925
1926         return ret;
1927 }
1928
1929 static int _intel_hdcp2_enable(struct intel_connector *connector)
1930 {
1931         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1932         struct intel_hdcp *hdcp = &connector->hdcp;
1933         int ret;
1934
1935         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1936                     connector->base.name, connector->base.base.id,
1937                     hdcp->content_type);
1938
1939         ret = hdcp2_authenticate_and_encrypt(connector);
1940         if (ret) {
1941                 drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1942                             hdcp->content_type, ret);
1943                 return ret;
1944         }
1945
1946         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1947                     connector->base.name, connector->base.base.id,
1948                     hdcp->content_type);
1949
1950         hdcp->hdcp2_encrypted = true;
1951         return 0;
1952 }
1953
1954 static int
1955 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1956 {
1957         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1958         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1959         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1960         struct intel_hdcp *hdcp = &connector->hdcp;
1961         int ret;
1962
1963         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1964                     connector->base.name, connector->base.base.id);
1965
1966         if (hdcp->shim->stream_2_2_encryption) {
1967                 ret = hdcp->shim->stream_2_2_encryption(connector, false);
1968                 if (ret) {
1969                         drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
1970                                 connector->base.name, connector->base.base.id);
1971                         return ret;
1972                 }
1973                 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
1974                             transcoder_name(hdcp->stream_transcoder));
1975
1976                 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
1977                         return 0;
1978         }
1979
1980         ret = hdcp2_disable_encryption(connector);
1981
1982         if (hdcp2_deauthenticate_port(connector) < 0)
1983                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1984
1985         connector->hdcp.hdcp2_encrypted = false;
1986         dig_port->hdcp_auth_status = false;
1987         data->k = 0;
1988
1989         return ret;
1990 }
1991
1992 /* Implements the Link Integrity Check for HDCP2.2 */
1993 static int intel_hdcp2_check_link(struct intel_connector *connector)
1994 {
1995         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1996         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1997         struct intel_hdcp *hdcp = &connector->hdcp;
1998         enum port port = dig_port->base.port;
1999         enum transcoder cpu_transcoder;
2000         int ret = 0;
2001
2002         mutex_lock(&hdcp->mutex);
2003         mutex_lock(&dig_port->hdcp_mutex);
2004         cpu_transcoder = hdcp->cpu_transcoder;
2005
2006         /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2007         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2008             !hdcp->hdcp2_encrypted) {
2009                 ret = -EINVAL;
2010                 goto out;
2011         }
2012
2013         if (drm_WARN_ON(&i915->drm,
2014                         !intel_hdcp2_in_use(i915, cpu_transcoder, port))) {
2015                 drm_err(&i915->drm,
2016                         "HDCP2.2 link stopped the encryption, %x\n",
2017                         intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)));
2018                 ret = -ENXIO;
2019                 _intel_hdcp2_disable(connector, true);
2020                 intel_hdcp_update_value(connector,
2021                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2022                                         true);
2023                 goto out;
2024         }
2025
2026         ret = hdcp->shim->check_2_2_link(dig_port, connector);
2027         if (ret == HDCP_LINK_PROTECTED) {
2028                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2029                         intel_hdcp_update_value(connector,
2030                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2031                                         true);
2032                 }
2033                 goto out;
2034         }
2035
2036         if (ret == HDCP_TOPOLOGY_CHANGE) {
2037                 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2038                         goto out;
2039
2040                 drm_dbg_kms(&i915->drm,
2041                             "HDCP2.2 Downstream topology change\n");
2042                 ret = hdcp2_authenticate_repeater_topology(connector);
2043                 if (!ret) {
2044                         intel_hdcp_update_value(connector,
2045                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2046                                         true);
2047                         goto out;
2048                 }
2049                 drm_dbg_kms(&i915->drm,
2050                             "[%s:%d] Repeater topology auth failed.(%d)\n",
2051                             connector->base.name, connector->base.base.id,
2052                             ret);
2053         } else {
2054                 drm_dbg_kms(&i915->drm,
2055                             "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2056                             connector->base.name, connector->base.base.id);
2057         }
2058
2059         ret = _intel_hdcp2_disable(connector, true);
2060         if (ret) {
2061                 drm_err(&i915->drm,
2062                         "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2063                         connector->base.name, connector->base.base.id, ret);
2064                 intel_hdcp_update_value(connector,
2065                                 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2066                 goto out;
2067         }
2068
2069         ret = _intel_hdcp2_enable(connector);
2070         if (ret) {
2071                 drm_dbg_kms(&i915->drm,
2072                             "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2073                             connector->base.name, connector->base.base.id,
2074                             ret);
2075                 intel_hdcp_update_value(connector,
2076                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2077                                         true);
2078                 goto out;
2079         }
2080
2081 out:
2082         mutex_unlock(&dig_port->hdcp_mutex);
2083         mutex_unlock(&hdcp->mutex);
2084         return ret;
2085 }
2086
2087 static void intel_hdcp_check_work(struct work_struct *work)
2088 {
2089         struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2090                                                struct intel_hdcp,
2091                                                check_work);
2092         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2093
2094         if (drm_connector_is_unregistered(&connector->base))
2095                 return;
2096
2097         if (!intel_hdcp2_check_link(connector))
2098                 schedule_delayed_work(&hdcp->check_work,
2099                                       DRM_HDCP2_CHECK_PERIOD_MS);
2100         else if (!intel_hdcp_check_link(connector))
2101                 schedule_delayed_work(&hdcp->check_work,
2102                                       DRM_HDCP_CHECK_PERIOD_MS);
2103 }
2104
2105 static int i915_hdcp_component_bind(struct device *i915_kdev,
2106                                     struct device *mei_kdev, void *data)
2107 {
2108         struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
2109
2110         drm_dbg(&i915->drm, "I915 HDCP comp bind\n");
2111         mutex_lock(&i915->display.hdcp.hdcp_mutex);
2112         i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2113         i915->display.hdcp.arbiter->hdcp_dev = mei_kdev;
2114         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2115
2116         return 0;
2117 }
2118
2119 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2120                                        struct device *mei_kdev, void *data)
2121 {
2122         struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
2123
2124         drm_dbg(&i915->drm, "I915 HDCP comp unbind\n");
2125         mutex_lock(&i915->display.hdcp.hdcp_mutex);
2126         i915->display.hdcp.arbiter = NULL;
2127         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2128 }
2129
2130 static const struct component_ops i915_hdcp_ops = {
2131         .bind   = i915_hdcp_component_bind,
2132         .unbind = i915_hdcp_component_unbind,
2133 };
2134
2135 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2136 {
2137         switch (port) {
2138         case PORT_A:
2139                 return HDCP_DDI_A;
2140         case PORT_B ... PORT_F:
2141                 return (enum hdcp_ddi)port;
2142         default:
2143                 return HDCP_DDI_INVALID_PORT;
2144         }
2145 }
2146
2147 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2148 {
2149         switch (cpu_transcoder) {
2150         case TRANSCODER_A ... TRANSCODER_D:
2151                 return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2152         default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2153                 return HDCP_INVALID_TRANSCODER;
2154         }
2155 }
2156
2157 static int initialize_hdcp_port_data(struct intel_connector *connector,
2158                                      struct intel_digital_port *dig_port,
2159                                      const struct intel_hdcp_shim *shim)
2160 {
2161         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2162         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2163         enum port port = dig_port->base.port;
2164
2165         if (DISPLAY_VER(i915) < 12)
2166                 data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2167         else
2168                 /*
2169                  * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2170                  * with zero(INVALID PORT index).
2171                  */
2172                 data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2173
2174         /*
2175          * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2176          * is initialized to zero (invalid transcoder index). This will be
2177          * retained for <Gen12 forever.
2178          */
2179         data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2180
2181         data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2182         data->protocol = (u8)shim->protocol;
2183
2184         if (!data->streams)
2185                 data->streams = kcalloc(INTEL_NUM_PIPES(i915),
2186                                         sizeof(struct hdcp2_streamid_type),
2187                                         GFP_KERNEL);
2188         if (!data->streams) {
2189                 drm_err(&i915->drm, "Out of Memory\n");
2190                 return -ENOMEM;
2191         }
2192
2193         return 0;
2194 }
2195
2196 static bool is_hdcp2_supported(struct drm_i915_private *i915)
2197 {
2198         if (intel_hdcp_gsc_cs_required(i915))
2199                 return true;
2200
2201         if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2202                 return false;
2203
2204         return (DISPLAY_VER(i915) >= 10 ||
2205                 IS_KABYLAKE(i915) ||
2206                 IS_COFFEELAKE(i915) ||
2207                 IS_COMETLAKE(i915));
2208 }
2209
2210 void intel_hdcp_component_init(struct drm_i915_private *i915)
2211 {
2212         int ret;
2213
2214         if (!is_hdcp2_supported(i915))
2215                 return;
2216
2217         mutex_lock(&i915->display.hdcp.hdcp_mutex);
2218         drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added);
2219
2220         i915->display.hdcp.comp_added = true;
2221         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2222         if (intel_hdcp_gsc_cs_required(i915))
2223                 ret = intel_hdcp_gsc_init(i915);
2224         else
2225                 ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops,
2226                                           I915_COMPONENT_HDCP);
2227
2228         if (ret < 0) {
2229                 drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n",
2230                             ret);
2231                 mutex_lock(&i915->display.hdcp.hdcp_mutex);
2232                 i915->display.hdcp.comp_added = false;
2233                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2234                 return;
2235         }
2236 }
2237
2238 static void intel_hdcp2_init(struct intel_connector *connector,
2239                              struct intel_digital_port *dig_port,
2240                              const struct intel_hdcp_shim *shim)
2241 {
2242         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2243         struct intel_hdcp *hdcp = &connector->hdcp;
2244         int ret;
2245
2246         ret = initialize_hdcp_port_data(connector, dig_port, shim);
2247         if (ret) {
2248                 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2249                 return;
2250         }
2251
2252         hdcp->hdcp2_supported = true;
2253 }
2254
2255 int intel_hdcp_init(struct intel_connector *connector,
2256                     struct intel_digital_port *dig_port,
2257                     const struct intel_hdcp_shim *shim)
2258 {
2259         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2260         struct intel_hdcp *hdcp = &connector->hdcp;
2261         int ret;
2262
2263         if (!shim)
2264                 return -EINVAL;
2265
2266         if (is_hdcp2_supported(i915))
2267                 intel_hdcp2_init(connector, dig_port, shim);
2268
2269         ret =
2270         drm_connector_attach_content_protection_property(&connector->base,
2271                                                          hdcp->hdcp2_supported);
2272         if (ret) {
2273                 hdcp->hdcp2_supported = false;
2274                 kfree(dig_port->hdcp_port_data.streams);
2275                 return ret;
2276         }
2277
2278         hdcp->shim = shim;
2279         mutex_init(&hdcp->mutex);
2280         INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2281         INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2282         init_waitqueue_head(&hdcp->cp_irq_queue);
2283
2284         return 0;
2285 }
2286
2287 static int
2288 intel_hdcp_set_streams(struct intel_digital_port *dig_port,
2289                        struct intel_atomic_state *state)
2290 {
2291         struct drm_connector_list_iter conn_iter;
2292         struct intel_digital_port *conn_dig_port;
2293         struct intel_connector *connector;
2294         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
2295         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2296
2297         if (!intel_encoder_is_mst(&dig_port->base)) {
2298                 data->k = 1;
2299                 data->streams[0].stream_id = 0;
2300                 return 0;
2301         }
2302
2303         data->k = 0;
2304
2305         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
2306         for_each_intel_connector_iter(connector, &conn_iter) {
2307                 if (connector->base.status == connector_status_disconnected)
2308                         continue;
2309
2310                 if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
2311                         continue;
2312
2313                 conn_dig_port = intel_attached_dig_port(connector);
2314                 if (conn_dig_port != dig_port)
2315                         continue;
2316
2317                 data->streams[data->k].stream_id =
2318                         intel_conn_to_vcpi(&state->base, connector);
2319                 data->k++;
2320
2321                 /* if there is only one active stream */
2322                 if (dig_port->dp.active_mst_links <= 1)
2323                         break;
2324         }
2325         drm_connector_list_iter_end(&conn_iter);
2326
2327         if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
2328                 return -EINVAL;
2329
2330         return 0;
2331 }
2332
2333 int intel_hdcp_enable(struct intel_atomic_state *state,
2334                       struct intel_encoder *encoder,
2335                       const struct intel_crtc_state *pipe_config,
2336                       const struct drm_connector_state *conn_state)
2337 {
2338         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2339         struct intel_connector *connector =
2340                 to_intel_connector(conn_state->connector);
2341         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2342         struct intel_hdcp *hdcp = &connector->hdcp;
2343         unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2344         int ret = -EINVAL;
2345
2346         if (!hdcp->shim)
2347                 return -ENOENT;
2348
2349         if (!connector->encoder) {
2350                 drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n",
2351                         connector->base.name, connector->base.base.id);
2352                 return -ENODEV;
2353         }
2354
2355         mutex_lock(&hdcp->mutex);
2356         mutex_lock(&dig_port->hdcp_mutex);
2357         drm_WARN_ON(&i915->drm,
2358                     hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2359         hdcp->content_type = (u8)conn_state->content_type;
2360
2361         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2362                 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2363                 hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2364         } else {
2365                 hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2366                 hdcp->stream_transcoder = INVALID_TRANSCODER;
2367         }
2368
2369         if (DISPLAY_VER(i915) >= 12)
2370                 dig_port->hdcp_port_data.hdcp_transcoder =
2371                         intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2372
2373         /*
2374          * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2375          * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2376          */
2377         if (intel_hdcp2_capable(connector)) {
2378                 ret = intel_hdcp_set_streams(dig_port, state);
2379                 if (!ret) {
2380                         ret = _intel_hdcp2_enable(connector);
2381                         if (!ret)
2382                                 check_link_interval =
2383                                         DRM_HDCP2_CHECK_PERIOD_MS;
2384                 } else {
2385                         drm_dbg_kms(&i915->drm,
2386                                     "Set content streams failed: (%d)\n",
2387                                     ret);
2388                 }
2389         }
2390
2391         /*
2392          * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2393          * be attempted.
2394          */
2395         if (ret && intel_hdcp_capable(connector) &&
2396             hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2397                 ret = _intel_hdcp_enable(connector);
2398         }
2399
2400         if (!ret) {
2401                 schedule_delayed_work(&hdcp->check_work, check_link_interval);
2402                 intel_hdcp_update_value(connector,
2403                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2404                                         true);
2405         }
2406
2407         mutex_unlock(&dig_port->hdcp_mutex);
2408         mutex_unlock(&hdcp->mutex);
2409         return ret;
2410 }
2411
2412 int intel_hdcp_disable(struct intel_connector *connector)
2413 {
2414         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2415         struct intel_hdcp *hdcp = &connector->hdcp;
2416         int ret = 0;
2417
2418         if (!hdcp->shim)
2419                 return -ENOENT;
2420
2421         mutex_lock(&hdcp->mutex);
2422         mutex_lock(&dig_port->hdcp_mutex);
2423
2424         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2425                 goto out;
2426
2427         intel_hdcp_update_value(connector,
2428                                 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2429         if (hdcp->hdcp2_encrypted)
2430                 ret = _intel_hdcp2_disable(connector, false);
2431         else if (hdcp->hdcp_encrypted)
2432                 ret = _intel_hdcp_disable(connector);
2433
2434 out:
2435         mutex_unlock(&dig_port->hdcp_mutex);
2436         mutex_unlock(&hdcp->mutex);
2437         cancel_delayed_work_sync(&hdcp->check_work);
2438         return ret;
2439 }
2440
2441 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2442                             struct intel_encoder *encoder,
2443                             const struct intel_crtc_state *crtc_state,
2444                             const struct drm_connector_state *conn_state)
2445 {
2446         struct intel_connector *connector =
2447                                 to_intel_connector(conn_state->connector);
2448         struct intel_hdcp *hdcp = &connector->hdcp;
2449         bool content_protection_type_changed, desired_and_not_enabled = false;
2450
2451         if (!connector->hdcp.shim)
2452                 return;
2453
2454         content_protection_type_changed =
2455                 (conn_state->hdcp_content_type != hdcp->content_type &&
2456                  conn_state->content_protection !=
2457                  DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2458
2459         /*
2460          * During the HDCP encryption session if Type change is requested,
2461          * disable the HDCP and reenable it with new TYPE value.
2462          */
2463         if (conn_state->content_protection ==
2464             DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2465             content_protection_type_changed)
2466                 intel_hdcp_disable(connector);
2467
2468         /*
2469          * Mark the hdcp state as DESIRED after the hdcp disable of type
2470          * change procedure.
2471          */
2472         if (content_protection_type_changed) {
2473                 mutex_lock(&hdcp->mutex);
2474                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2475                 drm_connector_get(&connector->base);
2476                 schedule_work(&hdcp->prop_work);
2477                 mutex_unlock(&hdcp->mutex);
2478         }
2479
2480         if (conn_state->content_protection ==
2481             DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2482                 mutex_lock(&hdcp->mutex);
2483                 /* Avoid enabling hdcp, if it already ENABLED */
2484                 desired_and_not_enabled =
2485                         hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2486                 mutex_unlock(&hdcp->mutex);
2487                 /*
2488                  * If HDCP already ENABLED and CP property is DESIRED, schedule
2489                  * prop_work to update correct CP property to user space.
2490                  */
2491                 if (!desired_and_not_enabled && !content_protection_type_changed) {
2492                         drm_connector_get(&connector->base);
2493                         schedule_work(&hdcp->prop_work);
2494                 }
2495         }
2496
2497         if (desired_and_not_enabled || content_protection_type_changed)
2498                 intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2499 }
2500
2501 void intel_hdcp_component_fini(struct drm_i915_private *i915)
2502 {
2503         mutex_lock(&i915->display.hdcp.hdcp_mutex);
2504         if (!i915->display.hdcp.comp_added) {
2505                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2506                 return;
2507         }
2508
2509         i915->display.hdcp.comp_added = false;
2510         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2511
2512         if (intel_hdcp_gsc_cs_required(i915))
2513                 intel_hdcp_gsc_fini(i915);
2514         else
2515                 component_del(i915->drm.dev, &i915_hdcp_ops);
2516 }
2517
2518 void intel_hdcp_cleanup(struct intel_connector *connector)
2519 {
2520         struct intel_hdcp *hdcp = &connector->hdcp;
2521
2522         if (!hdcp->shim)
2523                 return;
2524
2525         /*
2526          * If the connector is registered, it's possible userspace could kick
2527          * off another HDCP enable, which would re-spawn the workers.
2528          */
2529         drm_WARN_ON(connector->base.dev,
2530                 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2531
2532         /*
2533          * Now that the connector is not registered, check_work won't be run,
2534          * but cancel any outstanding instances of it
2535          */
2536         cancel_delayed_work_sync(&hdcp->check_work);
2537
2538         /*
2539          * We don't cancel prop_work in the same way as check_work since it
2540          * requires connection_mutex which could be held while calling this
2541          * function. Instead, we rely on the connector references grabbed before
2542          * scheduling prop_work to ensure the connector is alive when prop_work
2543          * is run. So if we're in the destroy path (which is where this
2544          * function should be called), we're "guaranteed" that prop_work is not
2545          * active (tl;dr This Should Never Happen).
2546          */
2547         drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2548
2549         mutex_lock(&hdcp->mutex);
2550         hdcp->shim = NULL;
2551         mutex_unlock(&hdcp->mutex);
2552 }
2553
2554 void intel_hdcp_atomic_check(struct drm_connector *connector,
2555                              struct drm_connector_state *old_state,
2556                              struct drm_connector_state *new_state)
2557 {
2558         u64 old_cp = old_state->content_protection;
2559         u64 new_cp = new_state->content_protection;
2560         struct drm_crtc_state *crtc_state;
2561
2562         if (!new_state->crtc) {
2563                 /*
2564                  * If the connector is being disabled with CP enabled, mark it
2565                  * desired so it's re-enabled when the connector is brought back
2566                  */
2567                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2568                         new_state->content_protection =
2569                                 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2570                 return;
2571         }
2572
2573         crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2574                                                    new_state->crtc);
2575         /*
2576          * Fix the HDCP uapi content protection state in case of modeset.
2577          * FIXME: As per HDCP content protection property uapi doc, an uevent()
2578          * need to be sent if there is transition from ENABLED->DESIRED.
2579          */
2580         if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2581             (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2582             new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2583                 new_state->content_protection =
2584                         DRM_MODE_CONTENT_PROTECTION_DESIRED;
2585
2586         /*
2587          * Nothing to do if the state didn't change, or HDCP was activated since
2588          * the last commit. And also no change in hdcp content type.
2589          */
2590         if (old_cp == new_cp ||
2591             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2592              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2593                 if (old_state->hdcp_content_type ==
2594                                 new_state->hdcp_content_type)
2595                         return;
2596         }
2597
2598         crtc_state->mode_changed = true;
2599 }
2600
2601 /* Handles the CP_IRQ raised from the DP HDCP sink */
2602 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2603 {
2604         struct intel_hdcp *hdcp = &connector->hdcp;
2605
2606         if (!hdcp->shim)
2607                 return;
2608
2609         atomic_inc(&connector->hdcp.cp_irq_count);
2610         wake_up_all(&connector->hdcp.cp_irq_queue);
2611
2612         schedule_delayed_work(&hdcp->check_work, 0);
2613 }
This page took 0.197952 seconds and 4 git commands to generate.