]> Git Repo - linux.git/blob - drivers/gpu/drm/i915/display/intel_hdcp.c
net: wan: Add framer framework support
[linux.git] / drivers / gpu / drm / i915 / display / intel_hdcp.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <[email protected]>
8  * Ramalingam C <[email protected]>
9  */
10
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14
15 #include <drm/display/drm_hdcp_helper.h>
16 #include <drm/i915_component.h>
17
18 #include "i915_drv.h"
19 #include "i915_reg.h"
20 #include "intel_connector.h"
21 #include "intel_de.h"
22 #include "intel_display_power.h"
23 #include "intel_display_power_well.h"
24 #include "intel_display_types.h"
25 #include "intel_hdcp.h"
26 #include "intel_hdcp_gsc.h"
27 #include "intel_hdcp_regs.h"
28 #include "intel_pcode.h"
29
30 #define KEY_LOAD_TRIES  5
31 #define HDCP2_LC_RETRY_CNT                      3
32
33 static int intel_conn_to_vcpi(struct drm_atomic_state *state,
34                               struct intel_connector *connector)
35 {
36         struct drm_dp_mst_topology_mgr *mgr;
37         struct drm_dp_mst_atomic_payload *payload;
38         struct drm_dp_mst_topology_state *mst_state;
39         int vcpi = 0;
40
41         /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
42         if (!connector->port)
43                 return 0;
44         mgr = connector->port->mgr;
45
46         drm_modeset_lock(&mgr->base.lock, state->acquire_ctx);
47         mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
48         payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
49         if (drm_WARN_ON(mgr->dev, !payload))
50                 goto out;
51
52         vcpi = payload->vcpi;
53         if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
54                 vcpi = 0;
55                 goto out;
56         }
57 out:
58         return vcpi;
59 }
60
61 /*
62  * intel_hdcp_required_content_stream selects the most highest common possible HDCP
63  * content_type for all streams in DP MST topology because security f/w doesn't
64  * have any provision to mark content_type for each stream separately, it marks
65  * all available streams with the content_type proivided at the time of port
66  * authentication. This may prohibit the userspace to use type1 content on
67  * HDCP 2.2 capable sink because of other sink are not capable of HDCP 2.2 in
68  * DP MST topology. Though it is not compulsory, security fw should change its
69  * policy to mark different content_types for different streams.
70  */
71 static void
72 intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
73 {
74         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
75         bool enforce_type0 = false;
76         int k;
77
78         if (dig_port->hdcp_auth_status)
79                 return;
80
81         if (!dig_port->hdcp_mst_type1_capable)
82                 enforce_type0 = true;
83
84         /*
85          * Apply common protection level across all streams in DP MST Topology.
86          * Use highest supported content type for all streams in DP MST Topology.
87          */
88         for (k = 0; k < data->k; k++)
89                 data->streams[k].stream_type =
90                         enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
91 }
92
93 static void intel_hdcp_prepare_streams(struct intel_connector *connector)
94 {
95         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
96         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
97         struct intel_hdcp *hdcp = &connector->hdcp;
98
99         if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
100                 data->streams[0].stream_type = hdcp->content_type;
101         } else {
102                 intel_hdcp_required_content_stream(dig_port);
103         }
104 }
105
106 static
107 bool intel_hdcp_is_ksv_valid(u8 *ksv)
108 {
109         int i, ones = 0;
110         /* KSV has 20 1's and 20 0's */
111         for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
112                 ones += hweight8(ksv[i]);
113         if (ones != 20)
114                 return false;
115
116         return true;
117 }
118
119 static
120 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
121                                const struct intel_hdcp_shim *shim, u8 *bksv)
122 {
123         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
124         int ret, i, tries = 2;
125
126         /* HDCP spec states that we must retry the bksv if it is invalid */
127         for (i = 0; i < tries; i++) {
128                 ret = shim->read_bksv(dig_port, bksv);
129                 if (ret)
130                         return ret;
131                 if (intel_hdcp_is_ksv_valid(bksv))
132                         break;
133         }
134         if (i == tries) {
135                 drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
136                 return -ENODEV;
137         }
138
139         return 0;
140 }
141
142 /* Is HDCP1.4 capable on Platform and Sink */
143 bool intel_hdcp_capable(struct intel_connector *connector)
144 {
145         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
146         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
147         bool capable = false;
148         u8 bksv[5];
149
150         if (!shim)
151                 return capable;
152
153         if (shim->hdcp_capable) {
154                 shim->hdcp_capable(dig_port, &capable);
155         } else {
156                 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
157                         capable = true;
158         }
159
160         return capable;
161 }
162
163 /* Is HDCP2.2 capable on Platform and Sink */
164 bool intel_hdcp2_capable(struct intel_connector *connector)
165 {
166         struct drm_i915_private *i915 = to_i915(connector->base.dev);
167         struct intel_hdcp *hdcp = &connector->hdcp;
168         bool capable = false;
169
170         /* I915 support for HDCP2.2 */
171         if (!hdcp->hdcp2_supported)
172                 return false;
173
174         /* If MTL+ make sure gsc is loaded and proxy is setup */
175         if (intel_hdcp_gsc_cs_required(i915)) {
176                 if (!intel_hdcp_gsc_check_status(i915))
177                         return false;
178         }
179
180         /* MEI/GSC interface is solid depending on which is used */
181         mutex_lock(&i915->display.hdcp.hdcp_mutex);
182         if (!i915->display.hdcp.comp_added ||  !i915->display.hdcp.arbiter) {
183                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
184                 return false;
185         }
186         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
187
188         /* Sink's capability for HDCP2.2 */
189         hdcp->shim->hdcp_2_2_capable(connector, &capable);
190
191         return capable;
192 }
193
194 static bool intel_hdcp_in_use(struct drm_i915_private *i915,
195                               enum transcoder cpu_transcoder, enum port port)
196 {
197         return intel_de_read(i915,
198                              HDCP_STATUS(i915, cpu_transcoder, port)) &
199                 HDCP_STATUS_ENC;
200 }
201
202 static bool intel_hdcp2_in_use(struct drm_i915_private *i915,
203                                enum transcoder cpu_transcoder, enum port port)
204 {
205         return intel_de_read(i915,
206                              HDCP2_STATUS(i915, cpu_transcoder, port)) &
207                 LINK_ENCRYPTION_STATUS;
208 }
209
210 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
211                                     const struct intel_hdcp_shim *shim)
212 {
213         int ret, read_ret;
214         bool ksv_ready;
215
216         /* Poll for ksv list ready (spec says max time allowed is 5s) */
217         ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
218                                                          &ksv_ready),
219                          read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
220                          100 * 1000);
221         if (ret)
222                 return ret;
223         if (read_ret)
224                 return read_ret;
225         if (!ksv_ready)
226                 return -ETIMEDOUT;
227
228         return 0;
229 }
230
231 static bool hdcp_key_loadable(struct drm_i915_private *i915)
232 {
233         enum i915_power_well_id id;
234         intel_wakeref_t wakeref;
235         bool enabled = false;
236
237         /*
238          * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
239          * On all BXT+, SW can load the keys only when the PW#1 is turned on.
240          */
241         if (IS_HASWELL(i915) || IS_BROADWELL(i915))
242                 id = HSW_DISP_PW_GLOBAL;
243         else
244                 id = SKL_DISP_PW_1;
245
246         /* PG1 (power well #1) needs to be enabled */
247         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
248                 enabled = intel_display_power_well_is_enabled(i915, id);
249
250         /*
251          * Another req for hdcp key loadability is enabled state of pll for
252          * cdclk. Without active crtc we wont land here. So we are assuming that
253          * cdclk is already on.
254          */
255
256         return enabled;
257 }
258
259 static void intel_hdcp_clear_keys(struct drm_i915_private *i915)
260 {
261         intel_de_write(i915, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
262         intel_de_write(i915, HDCP_KEY_STATUS,
263                        HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
264 }
265
266 static int intel_hdcp_load_keys(struct drm_i915_private *i915)
267 {
268         int ret;
269         u32 val;
270
271         val = intel_de_read(i915, HDCP_KEY_STATUS);
272         if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
273                 return 0;
274
275         /*
276          * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
277          * out of reset. So if Key is not already loaded, its an error state.
278          */
279         if (IS_HASWELL(i915) || IS_BROADWELL(i915))
280                 if (!(intel_de_read(i915, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
281                         return -ENXIO;
282
283         /*
284          * Initiate loading the HDCP key from fuses.
285          *
286          * BXT+ platforms, HDCP key needs to be loaded by SW. Only display
287          * version 9 platforms (minus BXT) differ in the key load trigger
288          * process from other platforms. These platforms use the GT Driver
289          * Mailbox interface.
290          */
291         if (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915)) {
292                 ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
293                 if (ret) {
294                         drm_err(&i915->drm,
295                                 "Failed to initiate HDCP key load (%d)\n",
296                                 ret);
297                         return ret;
298                 }
299         } else {
300                 intel_de_write(i915, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
301         }
302
303         /* Wait for the keys to load (500us) */
304         ret = __intel_wait_for_register(&i915->uncore, HDCP_KEY_STATUS,
305                                         HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
306                                         10, 1, &val);
307         if (ret)
308                 return ret;
309         else if (!(val & HDCP_KEY_LOAD_STATUS))
310                 return -ENXIO;
311
312         /* Send Aksv over to PCH display for use in authentication */
313         intel_de_write(i915, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
314
315         return 0;
316 }
317
318 /* Returns updated SHA-1 index */
319 static int intel_write_sha_text(struct drm_i915_private *i915, u32 sha_text)
320 {
321         intel_de_write(i915, HDCP_SHA_TEXT, sha_text);
322         if (intel_de_wait_for_set(i915, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
323                 drm_err(&i915->drm, "Timed out waiting for SHA1 ready\n");
324                 return -ETIMEDOUT;
325         }
326         return 0;
327 }
328
329 static
330 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *i915,
331                                 enum transcoder cpu_transcoder, enum port port)
332 {
333         if (DISPLAY_VER(i915) >= 12) {
334                 switch (cpu_transcoder) {
335                 case TRANSCODER_A:
336                         return HDCP_TRANSA_REP_PRESENT |
337                                HDCP_TRANSA_SHA1_M0;
338                 case TRANSCODER_B:
339                         return HDCP_TRANSB_REP_PRESENT |
340                                HDCP_TRANSB_SHA1_M0;
341                 case TRANSCODER_C:
342                         return HDCP_TRANSC_REP_PRESENT |
343                                HDCP_TRANSC_SHA1_M0;
344                 case TRANSCODER_D:
345                         return HDCP_TRANSD_REP_PRESENT |
346                                HDCP_TRANSD_SHA1_M0;
347                 default:
348                         drm_err(&i915->drm, "Unknown transcoder %d\n",
349                                 cpu_transcoder);
350                         return -EINVAL;
351                 }
352         }
353
354         switch (port) {
355         case PORT_A:
356                 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
357         case PORT_B:
358                 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
359         case PORT_C:
360                 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
361         case PORT_D:
362                 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
363         case PORT_E:
364                 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
365         default:
366                 drm_err(&i915->drm, "Unknown port %d\n", port);
367                 return -EINVAL;
368         }
369 }
370
371 static
372 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
373                                 const struct intel_hdcp_shim *shim,
374                                 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
375 {
376         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
377         struct drm_i915_private *i915 = to_i915(connector->base.dev);
378         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
379         enum port port = dig_port->base.port;
380         u32 vprime, sha_text, sha_leftovers, rep_ctl;
381         int ret, i, j, sha_idx;
382
383         /* Process V' values from the receiver */
384         for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
385                 ret = shim->read_v_prime_part(dig_port, i, &vprime);
386                 if (ret)
387                         return ret;
388                 intel_de_write(i915, HDCP_SHA_V_PRIME(i), vprime);
389         }
390
391         /*
392          * We need to write the concatenation of all device KSVs, BINFO (DP) ||
393          * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
394          * stream is written via the HDCP_SHA_TEXT register in 32-bit
395          * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
396          * index will keep track of our progress through the 64 bytes as well as
397          * helping us work the 40-bit KSVs through our 32-bit register.
398          *
399          * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
400          */
401         sha_idx = 0;
402         sha_text = 0;
403         sha_leftovers = 0;
404         rep_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port);
405         intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
406         for (i = 0; i < num_downstream; i++) {
407                 unsigned int sha_empty;
408                 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
409
410                 /* Fill up the empty slots in sha_text and write it out */
411                 sha_empty = sizeof(sha_text) - sha_leftovers;
412                 for (j = 0; j < sha_empty; j++) {
413                         u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
414                         sha_text |= ksv[j] << off;
415                 }
416
417                 ret = intel_write_sha_text(i915, sha_text);
418                 if (ret < 0)
419                         return ret;
420
421                 /* Programming guide writes this every 64 bytes */
422                 sha_idx += sizeof(sha_text);
423                 if (!(sha_idx % 64))
424                         intel_de_write(i915, HDCP_REP_CTL,
425                                        rep_ctl | HDCP_SHA1_TEXT_32);
426
427                 /* Store the leftover bytes from the ksv in sha_text */
428                 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
429                 sha_text = 0;
430                 for (j = 0; j < sha_leftovers; j++)
431                         sha_text |= ksv[sha_empty + j] <<
432                                         ((sizeof(sha_text) - j - 1) * 8);
433
434                 /*
435                  * If we still have room in sha_text for more data, continue.
436                  * Otherwise, write it out immediately.
437                  */
438                 if (sizeof(sha_text) > sha_leftovers)
439                         continue;
440
441                 ret = intel_write_sha_text(i915, sha_text);
442                 if (ret < 0)
443                         return ret;
444                 sha_leftovers = 0;
445                 sha_text = 0;
446                 sha_idx += sizeof(sha_text);
447         }
448
449         /*
450          * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
451          * bytes are leftover from the last ksv, we might be able to fit them
452          * all in sha_text (first 2 cases), or we might need to split them up
453          * into 2 writes (last 2 cases).
454          */
455         if (sha_leftovers == 0) {
456                 /* Write 16 bits of text, 16 bits of M0 */
457                 intel_de_write(i915, HDCP_REP_CTL,
458                                rep_ctl | HDCP_SHA1_TEXT_16);
459                 ret = intel_write_sha_text(i915,
460                                            bstatus[0] << 8 | bstatus[1]);
461                 if (ret < 0)
462                         return ret;
463                 sha_idx += sizeof(sha_text);
464
465                 /* Write 32 bits of M0 */
466                 intel_de_write(i915, HDCP_REP_CTL,
467                                rep_ctl | HDCP_SHA1_TEXT_0);
468                 ret = intel_write_sha_text(i915, 0);
469                 if (ret < 0)
470                         return ret;
471                 sha_idx += sizeof(sha_text);
472
473                 /* Write 16 bits of M0 */
474                 intel_de_write(i915, HDCP_REP_CTL,
475                                rep_ctl | HDCP_SHA1_TEXT_16);
476                 ret = intel_write_sha_text(i915, 0);
477                 if (ret < 0)
478                         return ret;
479                 sha_idx += sizeof(sha_text);
480
481         } else if (sha_leftovers == 1) {
482                 /* Write 24 bits of text, 8 bits of M0 */
483                 intel_de_write(i915, HDCP_REP_CTL,
484                                rep_ctl | HDCP_SHA1_TEXT_24);
485                 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
486                 /* Only 24-bits of data, must be in the LSB */
487                 sha_text = (sha_text & 0xffffff00) >> 8;
488                 ret = intel_write_sha_text(i915, sha_text);
489                 if (ret < 0)
490                         return ret;
491                 sha_idx += sizeof(sha_text);
492
493                 /* Write 32 bits of M0 */
494                 intel_de_write(i915, HDCP_REP_CTL,
495                                rep_ctl | HDCP_SHA1_TEXT_0);
496                 ret = intel_write_sha_text(i915, 0);
497                 if (ret < 0)
498                         return ret;
499                 sha_idx += sizeof(sha_text);
500
501                 /* Write 24 bits of M0 */
502                 intel_de_write(i915, HDCP_REP_CTL,
503                                rep_ctl | HDCP_SHA1_TEXT_8);
504                 ret = intel_write_sha_text(i915, 0);
505                 if (ret < 0)
506                         return ret;
507                 sha_idx += sizeof(sha_text);
508
509         } else if (sha_leftovers == 2) {
510                 /* Write 32 bits of text */
511                 intel_de_write(i915, HDCP_REP_CTL,
512                                rep_ctl | HDCP_SHA1_TEXT_32);
513                 sha_text |= bstatus[0] << 8 | bstatus[1];
514                 ret = intel_write_sha_text(i915, sha_text);
515                 if (ret < 0)
516                         return ret;
517                 sha_idx += sizeof(sha_text);
518
519                 /* Write 64 bits of M0 */
520                 intel_de_write(i915, HDCP_REP_CTL,
521                                rep_ctl | HDCP_SHA1_TEXT_0);
522                 for (i = 0; i < 2; i++) {
523                         ret = intel_write_sha_text(i915, 0);
524                         if (ret < 0)
525                                 return ret;
526                         sha_idx += sizeof(sha_text);
527                 }
528
529                 /*
530                  * Terminate the SHA-1 stream by hand. For the other leftover
531                  * cases this is appended by the hardware.
532                  */
533                 intel_de_write(i915, HDCP_REP_CTL,
534                                rep_ctl | HDCP_SHA1_TEXT_32);
535                 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
536                 ret = intel_write_sha_text(i915, sha_text);
537                 if (ret < 0)
538                         return ret;
539                 sha_idx += sizeof(sha_text);
540         } else if (sha_leftovers == 3) {
541                 /* Write 32 bits of text (filled from LSB) */
542                 intel_de_write(i915, HDCP_REP_CTL,
543                                rep_ctl | HDCP_SHA1_TEXT_32);
544                 sha_text |= bstatus[0];
545                 ret = intel_write_sha_text(i915, sha_text);
546                 if (ret < 0)
547                         return ret;
548                 sha_idx += sizeof(sha_text);
549
550                 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
551                 intel_de_write(i915, HDCP_REP_CTL,
552                                rep_ctl | HDCP_SHA1_TEXT_8);
553                 ret = intel_write_sha_text(i915, bstatus[1]);
554                 if (ret < 0)
555                         return ret;
556                 sha_idx += sizeof(sha_text);
557
558                 /* Write 32 bits of M0 */
559                 intel_de_write(i915, HDCP_REP_CTL,
560                                rep_ctl | HDCP_SHA1_TEXT_0);
561                 ret = intel_write_sha_text(i915, 0);
562                 if (ret < 0)
563                         return ret;
564                 sha_idx += sizeof(sha_text);
565
566                 /* Write 8 bits of M0 */
567                 intel_de_write(i915, HDCP_REP_CTL,
568                                rep_ctl | HDCP_SHA1_TEXT_24);
569                 ret = intel_write_sha_text(i915, 0);
570                 if (ret < 0)
571                         return ret;
572                 sha_idx += sizeof(sha_text);
573         } else {
574                 drm_dbg_kms(&i915->drm, "Invalid number of leftovers %d\n",
575                             sha_leftovers);
576                 return -EINVAL;
577         }
578
579         intel_de_write(i915, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
580         /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
581         while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
582                 ret = intel_write_sha_text(i915, 0);
583                 if (ret < 0)
584                         return ret;
585                 sha_idx += sizeof(sha_text);
586         }
587
588         /*
589          * Last write gets the length of the concatenation in bits. That is:
590          *  - 5 bytes per device
591          *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
592          */
593         sha_text = (num_downstream * 5 + 10) * 8;
594         ret = intel_write_sha_text(i915, sha_text);
595         if (ret < 0)
596                 return ret;
597
598         /* Tell the HW we're done with the hash and wait for it to ACK */
599         intel_de_write(i915, HDCP_REP_CTL,
600                        rep_ctl | HDCP_SHA1_COMPLETE_HASH);
601         if (intel_de_wait_for_set(i915, HDCP_REP_CTL,
602                                   HDCP_SHA1_COMPLETE, 1)) {
603                 drm_err(&i915->drm, "Timed out waiting for SHA1 complete\n");
604                 return -ETIMEDOUT;
605         }
606         if (!(intel_de_read(i915, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
607                 drm_dbg_kms(&i915->drm, "SHA-1 mismatch, HDCP failed\n");
608                 return -ENXIO;
609         }
610
611         return 0;
612 }
613
614 /* Implements Part 2 of the HDCP authorization procedure */
615 static
616 int intel_hdcp_auth_downstream(struct intel_connector *connector)
617 {
618         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
619         struct drm_i915_private *i915 = to_i915(connector->base.dev);
620         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
621         u8 bstatus[2], num_downstream, *ksv_fifo;
622         int ret, i, tries = 3;
623
624         ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
625         if (ret) {
626                 drm_dbg_kms(&i915->drm,
627                             "KSV list failed to become ready (%d)\n", ret);
628                 return ret;
629         }
630
631         ret = shim->read_bstatus(dig_port, bstatus);
632         if (ret)
633                 return ret;
634
635         if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
636             DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
637                 drm_dbg_kms(&i915->drm, "Max Topology Limit Exceeded\n");
638                 return -EPERM;
639         }
640
641         /*
642          * When repeater reports 0 device count, HDCP1.4 spec allows disabling
643          * the HDCP encryption. That implies that repeater can't have its own
644          * display. As there is no consumption of encrypted content in the
645          * repeater with 0 downstream devices, we are failing the
646          * authentication.
647          */
648         num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
649         if (num_downstream == 0) {
650                 drm_dbg_kms(&i915->drm,
651                             "Repeater with zero downstream devices\n");
652                 return -EINVAL;
653         }
654
655         ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
656         if (!ksv_fifo) {
657                 drm_dbg_kms(&i915->drm, "Out of mem: ksv_fifo\n");
658                 return -ENOMEM;
659         }
660
661         ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
662         if (ret)
663                 goto err;
664
665         if (drm_hdcp_check_ksvs_revoked(&i915->drm, ksv_fifo,
666                                         num_downstream) > 0) {
667                 drm_err(&i915->drm, "Revoked Ksv(s) in ksv_fifo\n");
668                 ret = -EPERM;
669                 goto err;
670         }
671
672         /*
673          * When V prime mismatches, DP Spec mandates re-read of
674          * V prime atleast twice.
675          */
676         for (i = 0; i < tries; i++) {
677                 ret = intel_hdcp_validate_v_prime(connector, shim,
678                                                   ksv_fifo, num_downstream,
679                                                   bstatus);
680                 if (!ret)
681                         break;
682         }
683
684         if (i == tries) {
685                 drm_dbg_kms(&i915->drm,
686                             "V Prime validation failed.(%d)\n", ret);
687                 goto err;
688         }
689
690         drm_dbg_kms(&i915->drm, "HDCP is enabled (%d downstream devices)\n",
691                     num_downstream);
692         ret = 0;
693 err:
694         kfree(ksv_fifo);
695         return ret;
696 }
697
698 /* Implements Part 1 of the HDCP authorization procedure */
699 static int intel_hdcp_auth(struct intel_connector *connector)
700 {
701         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
702         struct drm_i915_private *i915 = to_i915(connector->base.dev);
703         struct intel_hdcp *hdcp = &connector->hdcp;
704         const struct intel_hdcp_shim *shim = hdcp->shim;
705         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
706         enum port port = dig_port->base.port;
707         unsigned long r0_prime_gen_start;
708         int ret, i, tries = 2;
709         union {
710                 u32 reg[2];
711                 u8 shim[DRM_HDCP_AN_LEN];
712         } an;
713         union {
714                 u32 reg[2];
715                 u8 shim[DRM_HDCP_KSV_LEN];
716         } bksv;
717         union {
718                 u32 reg;
719                 u8 shim[DRM_HDCP_RI_LEN];
720         } ri;
721         bool repeater_present, hdcp_capable;
722
723         /*
724          * Detects whether the display is HDCP capable. Although we check for
725          * valid Bksv below, the HDCP over DP spec requires that we check
726          * whether the display supports HDCP before we write An. For HDMI
727          * displays, this is not necessary.
728          */
729         if (shim->hdcp_capable) {
730                 ret = shim->hdcp_capable(dig_port, &hdcp_capable);
731                 if (ret)
732                         return ret;
733                 if (!hdcp_capable) {
734                         drm_dbg_kms(&i915->drm,
735                                     "Panel is not HDCP capable\n");
736                         return -EINVAL;
737                 }
738         }
739
740         /* Initialize An with 2 random values and acquire it */
741         for (i = 0; i < 2; i++)
742                 intel_de_write(i915,
743                                HDCP_ANINIT(i915, cpu_transcoder, port),
744                                get_random_u32());
745         intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
746                        HDCP_CONF_CAPTURE_AN);
747
748         /* Wait for An to be acquired */
749         if (intel_de_wait_for_set(i915,
750                                   HDCP_STATUS(i915, cpu_transcoder, port),
751                                   HDCP_STATUS_AN_READY, 1)) {
752                 drm_err(&i915->drm, "Timed out waiting for An\n");
753                 return -ETIMEDOUT;
754         }
755
756         an.reg[0] = intel_de_read(i915,
757                                   HDCP_ANLO(i915, cpu_transcoder, port));
758         an.reg[1] = intel_de_read(i915,
759                                   HDCP_ANHI(i915, cpu_transcoder, port));
760         ret = shim->write_an_aksv(dig_port, an.shim);
761         if (ret)
762                 return ret;
763
764         r0_prime_gen_start = jiffies;
765
766         memset(&bksv, 0, sizeof(bksv));
767
768         ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
769         if (ret < 0)
770                 return ret;
771
772         if (drm_hdcp_check_ksvs_revoked(&i915->drm, bksv.shim, 1) > 0) {
773                 drm_err(&i915->drm, "BKSV is revoked\n");
774                 return -EPERM;
775         }
776
777         intel_de_write(i915, HDCP_BKSVLO(i915, cpu_transcoder, port),
778                        bksv.reg[0]);
779         intel_de_write(i915, HDCP_BKSVHI(i915, cpu_transcoder, port),
780                        bksv.reg[1]);
781
782         ret = shim->repeater_present(dig_port, &repeater_present);
783         if (ret)
784                 return ret;
785         if (repeater_present)
786                 intel_de_write(i915, HDCP_REP_CTL,
787                                intel_hdcp_get_repeater_ctl(i915, cpu_transcoder, port));
788
789         ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
790         if (ret)
791                 return ret;
792
793         intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port),
794                        HDCP_CONF_AUTH_AND_ENC);
795
796         /* Wait for R0 ready */
797         if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
798                      (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
799                 drm_err(&i915->drm, "Timed out waiting for R0 ready\n");
800                 return -ETIMEDOUT;
801         }
802
803         /*
804          * Wait for R0' to become available. The spec says 100ms from Aksv, but
805          * some monitors can take longer than this. We'll set the timeout at
806          * 300ms just to be sure.
807          *
808          * On DP, there's an R0_READY bit available but no such bit
809          * exists on HDMI. Since the upper-bound is the same, we'll just do
810          * the stupid thing instead of polling on one and not the other.
811          */
812         wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
813
814         tries = 3;
815
816         /*
817          * DP HDCP Spec mandates the two more reattempt to read R0, incase
818          * of R0 mismatch.
819          */
820         for (i = 0; i < tries; i++) {
821                 ri.reg = 0;
822                 ret = shim->read_ri_prime(dig_port, ri.shim);
823                 if (ret)
824                         return ret;
825                 intel_de_write(i915,
826                                HDCP_RPRIME(i915, cpu_transcoder, port),
827                                ri.reg);
828
829                 /* Wait for Ri prime match */
830                 if (!wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
831                               (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
832                         break;
833         }
834
835         if (i == tries) {
836                 drm_dbg_kms(&i915->drm,
837                             "Timed out waiting for Ri prime match (%x)\n",
838                             intel_de_read(i915,
839                                           HDCP_STATUS(i915, cpu_transcoder, port)));
840                 return -ETIMEDOUT;
841         }
842
843         /* Wait for encryption confirmation */
844         if (intel_de_wait_for_set(i915,
845                                   HDCP_STATUS(i915, cpu_transcoder, port),
846                                   HDCP_STATUS_ENC,
847                                   HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
848                 drm_err(&i915->drm, "Timed out waiting for encryption\n");
849                 return -ETIMEDOUT;
850         }
851
852         /* DP MST Auth Part 1 Step 2.a and Step 2.b */
853         if (shim->stream_encryption) {
854                 ret = shim->stream_encryption(connector, true);
855                 if (ret) {
856                         drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 1.4 stream enc\n",
857                                 connector->base.name, connector->base.base.id);
858                         return ret;
859                 }
860                 drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encrypted\n",
861                             transcoder_name(hdcp->stream_transcoder));
862         }
863
864         if (repeater_present)
865                 return intel_hdcp_auth_downstream(connector);
866
867         drm_dbg_kms(&i915->drm, "HDCP is enabled (no repeater present)\n");
868         return 0;
869 }
870
871 static int _intel_hdcp_disable(struct intel_connector *connector)
872 {
873         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
874         struct drm_i915_private *i915 = to_i915(connector->base.dev);
875         struct intel_hdcp *hdcp = &connector->hdcp;
876         enum port port = dig_port->base.port;
877         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
878         u32 repeater_ctl;
879         int ret;
880
881         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being disabled...\n",
882                     connector->base.name, connector->base.base.id);
883
884         if (hdcp->shim->stream_encryption) {
885                 ret = hdcp->shim->stream_encryption(connector, false);
886                 if (ret) {
887                         drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 1.4 stream enc\n",
888                                 connector->base.name, connector->base.base.id);
889                         return ret;
890                 }
891                 drm_dbg_kms(&i915->drm, "HDCP 1.4 transcoder: %s stream encryption disabled\n",
892                             transcoder_name(hdcp->stream_transcoder));
893                 /*
894                  * If there are other connectors on this port using HDCP,
895                  * don't disable it until it disabled HDCP encryption for
896                  * all connectors in MST topology.
897                  */
898                 if (dig_port->num_hdcp_streams > 0)
899                         return 0;
900         }
901
902         hdcp->hdcp_encrypted = false;
903         intel_de_write(i915, HDCP_CONF(i915, cpu_transcoder, port), 0);
904         if (intel_de_wait_for_clear(i915,
905                                     HDCP_STATUS(i915, cpu_transcoder, port),
906                                     ~0, HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
907                 drm_err(&i915->drm,
908                         "Failed to disable HDCP, timeout clearing status\n");
909                 return -ETIMEDOUT;
910         }
911
912         repeater_ctl = intel_hdcp_get_repeater_ctl(i915, cpu_transcoder,
913                                                    port);
914         intel_de_rmw(i915, HDCP_REP_CTL, repeater_ctl, 0);
915
916         ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
917         if (ret) {
918                 drm_err(&i915->drm, "Failed to disable HDCP signalling\n");
919                 return ret;
920         }
921
922         drm_dbg_kms(&i915->drm, "HDCP is disabled\n");
923         return 0;
924 }
925
926 static int _intel_hdcp_enable(struct intel_connector *connector)
927 {
928         struct drm_i915_private *i915 = to_i915(connector->base.dev);
929         struct intel_hdcp *hdcp = &connector->hdcp;
930         int i, ret, tries = 3;
931
932         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP is being enabled...\n",
933                     connector->base.name, connector->base.base.id);
934
935         if (!hdcp_key_loadable(i915)) {
936                 drm_err(&i915->drm, "HDCP key Load is not possible\n");
937                 return -ENXIO;
938         }
939
940         for (i = 0; i < KEY_LOAD_TRIES; i++) {
941                 ret = intel_hdcp_load_keys(i915);
942                 if (!ret)
943                         break;
944                 intel_hdcp_clear_keys(i915);
945         }
946         if (ret) {
947                 drm_err(&i915->drm, "Could not load HDCP keys, (%d)\n",
948                         ret);
949                 return ret;
950         }
951
952         /* Incase of authentication failures, HDCP spec expects reauth. */
953         for (i = 0; i < tries; i++) {
954                 ret = intel_hdcp_auth(connector);
955                 if (!ret) {
956                         hdcp->hdcp_encrypted = true;
957                         return 0;
958                 }
959
960                 drm_dbg_kms(&i915->drm, "HDCP Auth failure (%d)\n", ret);
961
962                 /* Ensuring HDCP encryption and signalling are stopped. */
963                 _intel_hdcp_disable(connector);
964         }
965
966         drm_dbg_kms(&i915->drm,
967                     "HDCP authentication failed (%d tries/%d)\n", tries, ret);
968         return ret;
969 }
970
971 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
972 {
973         return container_of(hdcp, struct intel_connector, hdcp);
974 }
975
976 static void intel_hdcp_update_value(struct intel_connector *connector,
977                                     u64 value, bool update_property)
978 {
979         struct drm_device *dev = connector->base.dev;
980         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
981         struct intel_hdcp *hdcp = &connector->hdcp;
982         struct drm_i915_private *i915 = to_i915(connector->base.dev);
983
984         drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
985
986         if (hdcp->value == value)
987                 return;
988
989         drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
990
991         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
992                 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
993                         dig_port->num_hdcp_streams--;
994         } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
995                 dig_port->num_hdcp_streams++;
996         }
997
998         hdcp->value = value;
999         if (update_property) {
1000                 drm_connector_get(&connector->base);
1001                 queue_work(i915->unordered_wq, &hdcp->prop_work);
1002         }
1003 }
1004
1005 /* Implements Part 3 of the HDCP authorization procedure */
1006 static int intel_hdcp_check_link(struct intel_connector *connector)
1007 {
1008         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1009         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1010         struct intel_hdcp *hdcp = &connector->hdcp;
1011         enum port port = dig_port->base.port;
1012         enum transcoder cpu_transcoder;
1013         int ret = 0;
1014
1015         mutex_lock(&hdcp->mutex);
1016         mutex_lock(&dig_port->hdcp_mutex);
1017
1018         cpu_transcoder = hdcp->cpu_transcoder;
1019
1020         /* Check_link valid only when HDCP1.4 is enabled */
1021         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1022             !hdcp->hdcp_encrypted) {
1023                 ret = -EINVAL;
1024                 goto out;
1025         }
1026
1027         if (drm_WARN_ON(&i915->drm,
1028                         !intel_hdcp_in_use(i915, cpu_transcoder, port))) {
1029                 drm_err(&i915->drm,
1030                         "%s:%d HDCP link stopped encryption,%x\n",
1031                         connector->base.name, connector->base.base.id,
1032                         intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
1033                 ret = -ENXIO;
1034                 intel_hdcp_update_value(connector,
1035                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1036                                         true);
1037                 goto out;
1038         }
1039
1040         if (hdcp->shim->check_link(dig_port, connector)) {
1041                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1042                         intel_hdcp_update_value(connector,
1043                                 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
1044                 }
1045                 goto out;
1046         }
1047
1048         drm_dbg_kms(&i915->drm,
1049                     "[%s:%d] HDCP link failed, retrying authentication\n",
1050                     connector->base.name, connector->base.base.id);
1051
1052         ret = _intel_hdcp_disable(connector);
1053         if (ret) {
1054                 drm_err(&i915->drm, "Failed to disable hdcp (%d)\n", ret);
1055                 intel_hdcp_update_value(connector,
1056                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1057                                         true);
1058                 goto out;
1059         }
1060
1061         ret = _intel_hdcp_enable(connector);
1062         if (ret) {
1063                 drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret);
1064                 intel_hdcp_update_value(connector,
1065                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1066                                         true);
1067                 goto out;
1068         }
1069
1070 out:
1071         mutex_unlock(&dig_port->hdcp_mutex);
1072         mutex_unlock(&hdcp->mutex);
1073         return ret;
1074 }
1075
1076 static void intel_hdcp_prop_work(struct work_struct *work)
1077 {
1078         struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
1079                                                prop_work);
1080         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1081         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1082
1083         drm_modeset_lock(&i915->drm.mode_config.connection_mutex, NULL);
1084         mutex_lock(&hdcp->mutex);
1085
1086         /*
1087          * This worker is only used to flip between ENABLED/DESIRED. Either of
1088          * those to UNDESIRED is handled by core. If value == UNDESIRED,
1089          * we're running just after hdcp has been disabled, so just exit
1090          */
1091         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1092                 drm_hdcp_update_content_protection(&connector->base,
1093                                                    hdcp->value);
1094
1095         mutex_unlock(&hdcp->mutex);
1096         drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
1097
1098         drm_connector_put(&connector->base);
1099 }
1100
1101 bool is_hdcp_supported(struct drm_i915_private *i915, enum port port)
1102 {
1103         return DISPLAY_RUNTIME_INFO(i915)->has_hdcp &&
1104                 (DISPLAY_VER(i915) >= 12 || port < PORT_E);
1105 }
1106
1107 static int
1108 hdcp2_prepare_ake_init(struct intel_connector *connector,
1109                        struct hdcp2_ake_init *ake_data)
1110 {
1111         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1112         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1113         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1114         struct i915_hdcp_arbiter *arbiter;
1115         int ret;
1116
1117         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1118         arbiter = i915->display.hdcp.arbiter;
1119
1120         if (!arbiter || !arbiter->ops) {
1121                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1122                 return -EINVAL;
1123         }
1124
1125         ret = arbiter->ops->initiate_hdcp2_session(arbiter->hdcp_dev, data, ake_data);
1126         if (ret)
1127                 drm_dbg_kms(&i915->drm, "Prepare_ake_init failed. %d\n",
1128                             ret);
1129         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1130
1131         return ret;
1132 }
1133
1134 static int
1135 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1136                                 struct hdcp2_ake_send_cert *rx_cert,
1137                                 bool *paired,
1138                                 struct hdcp2_ake_no_stored_km *ek_pub_km,
1139                                 size_t *msg_sz)
1140 {
1141         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1142         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1143         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1144         struct i915_hdcp_arbiter *arbiter;
1145         int ret;
1146
1147         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1148         arbiter = i915->display.hdcp.arbiter;
1149
1150         if (!arbiter || !arbiter->ops) {
1151                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1152                 return -EINVAL;
1153         }
1154
1155         ret = arbiter->ops->verify_receiver_cert_prepare_km(arbiter->hdcp_dev, data,
1156                                                          rx_cert, paired,
1157                                                          ek_pub_km, msg_sz);
1158         if (ret < 0)
1159                 drm_dbg_kms(&i915->drm, "Verify rx_cert failed. %d\n",
1160                             ret);
1161         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1162
1163         return ret;
1164 }
1165
1166 static int hdcp2_verify_hprime(struct intel_connector *connector,
1167                                struct hdcp2_ake_send_hprime *rx_hprime)
1168 {
1169         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1170         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1171         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1172         struct i915_hdcp_arbiter *arbiter;
1173         int ret;
1174
1175         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1176         arbiter = i915->display.hdcp.arbiter;
1177
1178         if (!arbiter || !arbiter->ops) {
1179                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1180                 return -EINVAL;
1181         }
1182
1183         ret = arbiter->ops->verify_hprime(arbiter->hdcp_dev, data, rx_hprime);
1184         if (ret < 0)
1185                 drm_dbg_kms(&i915->drm, "Verify hprime failed. %d\n", ret);
1186         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1187
1188         return ret;
1189 }
1190
1191 static int
1192 hdcp2_store_pairing_info(struct intel_connector *connector,
1193                          struct hdcp2_ake_send_pairing_info *pairing_info)
1194 {
1195         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1196         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1197         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1198         struct i915_hdcp_arbiter *arbiter;
1199         int ret;
1200
1201         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1202         arbiter = i915->display.hdcp.arbiter;
1203
1204         if (!arbiter || !arbiter->ops) {
1205                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1206                 return -EINVAL;
1207         }
1208
1209         ret = arbiter->ops->store_pairing_info(arbiter->hdcp_dev, data, pairing_info);
1210         if (ret < 0)
1211                 drm_dbg_kms(&i915->drm, "Store pairing info failed. %d\n",
1212                             ret);
1213         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1214
1215         return ret;
1216 }
1217
1218 static int
1219 hdcp2_prepare_lc_init(struct intel_connector *connector,
1220                       struct hdcp2_lc_init *lc_init)
1221 {
1222         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1223         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1224         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1225         struct i915_hdcp_arbiter *arbiter;
1226         int ret;
1227
1228         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1229         arbiter = i915->display.hdcp.arbiter;
1230
1231         if (!arbiter || !arbiter->ops) {
1232                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1233                 return -EINVAL;
1234         }
1235
1236         ret = arbiter->ops->initiate_locality_check(arbiter->hdcp_dev, data, lc_init);
1237         if (ret < 0)
1238                 drm_dbg_kms(&i915->drm, "Prepare lc_init failed. %d\n",
1239                             ret);
1240         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1241
1242         return ret;
1243 }
1244
1245 static int
1246 hdcp2_verify_lprime(struct intel_connector *connector,
1247                     struct hdcp2_lc_send_lprime *rx_lprime)
1248 {
1249         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1250         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1251         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1252         struct i915_hdcp_arbiter *arbiter;
1253         int ret;
1254
1255         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1256         arbiter = i915->display.hdcp.arbiter;
1257
1258         if (!arbiter || !arbiter->ops) {
1259                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1260                 return -EINVAL;
1261         }
1262
1263         ret = arbiter->ops->verify_lprime(arbiter->hdcp_dev, data, rx_lprime);
1264         if (ret < 0)
1265                 drm_dbg_kms(&i915->drm, "Verify L_Prime failed. %d\n",
1266                             ret);
1267         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1268
1269         return ret;
1270 }
1271
1272 static int hdcp2_prepare_skey(struct intel_connector *connector,
1273                               struct hdcp2_ske_send_eks *ske_data)
1274 {
1275         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1276         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1277         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1278         struct i915_hdcp_arbiter *arbiter;
1279         int ret;
1280
1281         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1282         arbiter = i915->display.hdcp.arbiter;
1283
1284         if (!arbiter || !arbiter->ops) {
1285                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1286                 return -EINVAL;
1287         }
1288
1289         ret = arbiter->ops->get_session_key(arbiter->hdcp_dev, data, ske_data);
1290         if (ret < 0)
1291                 drm_dbg_kms(&i915->drm, "Get session key failed. %d\n",
1292                             ret);
1293         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1294
1295         return ret;
1296 }
1297
1298 static int
1299 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1300                                       struct hdcp2_rep_send_receiverid_list
1301                                                                 *rep_topology,
1302                                       struct hdcp2_rep_send_ack *rep_send_ack)
1303 {
1304         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1305         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1306         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1307         struct i915_hdcp_arbiter *arbiter;
1308         int ret;
1309
1310         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1311         arbiter = i915->display.hdcp.arbiter;
1312
1313         if (!arbiter || !arbiter->ops) {
1314                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1315                 return -EINVAL;
1316         }
1317
1318         ret = arbiter->ops->repeater_check_flow_prepare_ack(arbiter->hdcp_dev,
1319                                                             data,
1320                                                             rep_topology,
1321                                                             rep_send_ack);
1322         if (ret < 0)
1323                 drm_dbg_kms(&i915->drm,
1324                             "Verify rep topology failed. %d\n", ret);
1325         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1326
1327         return ret;
1328 }
1329
1330 static int
1331 hdcp2_verify_mprime(struct intel_connector *connector,
1332                     struct hdcp2_rep_stream_ready *stream_ready)
1333 {
1334         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1335         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1336         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1337         struct i915_hdcp_arbiter *arbiter;
1338         int ret;
1339
1340         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1341         arbiter = i915->display.hdcp.arbiter;
1342
1343         if (!arbiter || !arbiter->ops) {
1344                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1345                 return -EINVAL;
1346         }
1347
1348         ret = arbiter->ops->verify_mprime(arbiter->hdcp_dev, data, stream_ready);
1349         if (ret < 0)
1350                 drm_dbg_kms(&i915->drm, "Verify mprime failed. %d\n", ret);
1351         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1352
1353         return ret;
1354 }
1355
1356 static int hdcp2_authenticate_port(struct intel_connector *connector)
1357 {
1358         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1359         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1360         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1361         struct i915_hdcp_arbiter *arbiter;
1362         int ret;
1363
1364         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1365         arbiter = i915->display.hdcp.arbiter;
1366
1367         if (!arbiter || !arbiter->ops) {
1368                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1369                 return -EINVAL;
1370         }
1371
1372         ret = arbiter->ops->enable_hdcp_authentication(arbiter->hdcp_dev, data);
1373         if (ret < 0)
1374                 drm_dbg_kms(&i915->drm, "Enable hdcp auth failed. %d\n",
1375                             ret);
1376         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1377
1378         return ret;
1379 }
1380
1381 static int hdcp2_close_session(struct intel_connector *connector)
1382 {
1383         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1384         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1385         struct i915_hdcp_arbiter *arbiter;
1386         int ret;
1387
1388         mutex_lock(&i915->display.hdcp.hdcp_mutex);
1389         arbiter = i915->display.hdcp.arbiter;
1390
1391         if (!arbiter || !arbiter->ops) {
1392                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1393                 return -EINVAL;
1394         }
1395
1396         ret = arbiter->ops->close_hdcp_session(arbiter->hdcp_dev,
1397                                              &dig_port->hdcp_port_data);
1398         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
1399
1400         return ret;
1401 }
1402
1403 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1404 {
1405         return hdcp2_close_session(connector);
1406 }
1407
1408 /* Authentication flow starts from here */
1409 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1410 {
1411         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1412         struct intel_hdcp *hdcp = &connector->hdcp;
1413         union {
1414                 struct hdcp2_ake_init ake_init;
1415                 struct hdcp2_ake_send_cert send_cert;
1416                 struct hdcp2_ake_no_stored_km no_stored_km;
1417                 struct hdcp2_ake_send_hprime send_hprime;
1418                 struct hdcp2_ake_send_pairing_info pairing_info;
1419         } msgs;
1420         const struct intel_hdcp_shim *shim = hdcp->shim;
1421         size_t size;
1422         int ret;
1423
1424         /* Init for seq_num */
1425         hdcp->seq_num_v = 0;
1426         hdcp->seq_num_m = 0;
1427
1428         ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1429         if (ret < 0)
1430                 return ret;
1431
1432         ret = shim->write_2_2_msg(connector, &msgs.ake_init,
1433                                   sizeof(msgs.ake_init));
1434         if (ret < 0)
1435                 return ret;
1436
1437         ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_CERT,
1438                                  &msgs.send_cert, sizeof(msgs.send_cert));
1439         if (ret < 0)
1440                 return ret;
1441
1442         if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1443                 drm_dbg_kms(&i915->drm, "cert.rx_caps dont claim HDCP2.2\n");
1444                 return -EINVAL;
1445         }
1446
1447         hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1448
1449         if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1450                                         msgs.send_cert.cert_rx.receiver_id,
1451                                         1) > 0) {
1452                 drm_err(&i915->drm, "Receiver ID is revoked\n");
1453                 return -EPERM;
1454         }
1455
1456         /*
1457          * Here msgs.no_stored_km will hold msgs corresponding to the km
1458          * stored also.
1459          */
1460         ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1461                                               &hdcp->is_paired,
1462                                               &msgs.no_stored_km, &size);
1463         if (ret < 0)
1464                 return ret;
1465
1466         ret = shim->write_2_2_msg(connector, &msgs.no_stored_km, size);
1467         if (ret < 0)
1468                 return ret;
1469
1470         ret = shim->read_2_2_msg(connector, HDCP_2_2_AKE_SEND_HPRIME,
1471                                  &msgs.send_hprime, sizeof(msgs.send_hprime));
1472         if (ret < 0)
1473                 return ret;
1474
1475         ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1476         if (ret < 0)
1477                 return ret;
1478
1479         if (!hdcp->is_paired) {
1480                 /* Pairing is required */
1481                 ret = shim->read_2_2_msg(connector,
1482                                          HDCP_2_2_AKE_SEND_PAIRING_INFO,
1483                                          &msgs.pairing_info,
1484                                          sizeof(msgs.pairing_info));
1485                 if (ret < 0)
1486                         return ret;
1487
1488                 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1489                 if (ret < 0)
1490                         return ret;
1491                 hdcp->is_paired = true;
1492         }
1493
1494         return 0;
1495 }
1496
1497 static int hdcp2_locality_check(struct intel_connector *connector)
1498 {
1499         struct intel_hdcp *hdcp = &connector->hdcp;
1500         union {
1501                 struct hdcp2_lc_init lc_init;
1502                 struct hdcp2_lc_send_lprime send_lprime;
1503         } msgs;
1504         const struct intel_hdcp_shim *shim = hdcp->shim;
1505         int tries = HDCP2_LC_RETRY_CNT, ret, i;
1506
1507         for (i = 0; i < tries; i++) {
1508                 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1509                 if (ret < 0)
1510                         continue;
1511
1512                 ret = shim->write_2_2_msg(connector, &msgs.lc_init,
1513                                       sizeof(msgs.lc_init));
1514                 if (ret < 0)
1515                         continue;
1516
1517                 ret = shim->read_2_2_msg(connector,
1518                                          HDCP_2_2_LC_SEND_LPRIME,
1519                                          &msgs.send_lprime,
1520                                          sizeof(msgs.send_lprime));
1521                 if (ret < 0)
1522                         continue;
1523
1524                 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1525                 if (!ret)
1526                         break;
1527         }
1528
1529         return ret;
1530 }
1531
1532 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1533 {
1534         struct intel_hdcp *hdcp = &connector->hdcp;
1535         struct hdcp2_ske_send_eks send_eks;
1536         int ret;
1537
1538         ret = hdcp2_prepare_skey(connector, &send_eks);
1539         if (ret < 0)
1540                 return ret;
1541
1542         ret = hdcp->shim->write_2_2_msg(connector, &send_eks,
1543                                         sizeof(send_eks));
1544         if (ret < 0)
1545                 return ret;
1546
1547         return 0;
1548 }
1549
1550 static
1551 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1552 {
1553         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1554         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1555         struct intel_hdcp *hdcp = &connector->hdcp;
1556         union {
1557                 struct hdcp2_rep_stream_manage stream_manage;
1558                 struct hdcp2_rep_stream_ready stream_ready;
1559         } msgs;
1560         const struct intel_hdcp_shim *shim = hdcp->shim;
1561         int ret, streams_size_delta, i;
1562
1563         if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1564                 return -ERANGE;
1565
1566         /* Prepare RepeaterAuth_Stream_Manage msg */
1567         msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1568         drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1569
1570         msgs.stream_manage.k = cpu_to_be16(data->k);
1571
1572         for (i = 0; i < data->k; i++) {
1573                 msgs.stream_manage.streams[i].stream_id = data->streams[i].stream_id;
1574                 msgs.stream_manage.streams[i].stream_type = data->streams[i].stream_type;
1575         }
1576
1577         streams_size_delta = (HDCP_2_2_MAX_CONTENT_STREAMS_CNT - data->k) *
1578                                 sizeof(struct hdcp2_streamid_type);
1579         /* Send it to Repeater */
1580         ret = shim->write_2_2_msg(connector, &msgs.stream_manage,
1581                                   sizeof(msgs.stream_manage) - streams_size_delta);
1582         if (ret < 0)
1583                 goto out;
1584
1585         ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_STREAM_READY,
1586                                  &msgs.stream_ready, sizeof(msgs.stream_ready));
1587         if (ret < 0)
1588                 goto out;
1589
1590         data->seq_num_m = hdcp->seq_num_m;
1591
1592         ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1593
1594 out:
1595         hdcp->seq_num_m++;
1596
1597         return ret;
1598 }
1599
1600 static
1601 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1602 {
1603         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1604         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1605         struct intel_hdcp *hdcp = &connector->hdcp;
1606         union {
1607                 struct hdcp2_rep_send_receiverid_list recvid_list;
1608                 struct hdcp2_rep_send_ack rep_ack;
1609         } msgs;
1610         const struct intel_hdcp_shim *shim = hdcp->shim;
1611         u32 seq_num_v, device_cnt;
1612         u8 *rx_info;
1613         int ret;
1614
1615         ret = shim->read_2_2_msg(connector, HDCP_2_2_REP_SEND_RECVID_LIST,
1616                                  &msgs.recvid_list, sizeof(msgs.recvid_list));
1617         if (ret < 0)
1618                 return ret;
1619
1620         rx_info = msgs.recvid_list.rx_info;
1621
1622         if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1623             HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1624                 drm_dbg_kms(&i915->drm, "Topology Max Size Exceeded\n");
1625                 return -EINVAL;
1626         }
1627
1628         /*
1629          * MST topology is not Type 1 capable if it contains a downstream
1630          * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
1631          */
1632         dig_port->hdcp_mst_type1_capable =
1633                 !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
1634                 !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
1635
1636         /* Converting and Storing the seq_num_v to local variable as DWORD */
1637         seq_num_v =
1638                 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1639
1640         if (!hdcp->hdcp2_encrypted && seq_num_v) {
1641                 drm_dbg_kms(&i915->drm,
1642                             "Non zero Seq_num_v at first RecvId_List msg\n");
1643                 return -EINVAL;
1644         }
1645
1646         if (seq_num_v < hdcp->seq_num_v) {
1647                 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1648                 drm_dbg_kms(&i915->drm, "Seq_num_v roll over.\n");
1649                 return -EINVAL;
1650         }
1651
1652         device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1653                       HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1654         if (drm_hdcp_check_ksvs_revoked(&i915->drm,
1655                                         msgs.recvid_list.receiver_ids,
1656                                         device_cnt) > 0) {
1657                 drm_err(&i915->drm, "Revoked receiver ID(s) is in list\n");
1658                 return -EPERM;
1659         }
1660
1661         ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1662                                                     &msgs.recvid_list,
1663                                                     &msgs.rep_ack);
1664         if (ret < 0)
1665                 return ret;
1666
1667         hdcp->seq_num_v = seq_num_v;
1668         ret = shim->write_2_2_msg(connector, &msgs.rep_ack,
1669                                   sizeof(msgs.rep_ack));
1670         if (ret < 0)
1671                 return ret;
1672
1673         return 0;
1674 }
1675
1676 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1677 {
1678         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1679         struct intel_hdcp *hdcp = &connector->hdcp;
1680         const struct intel_hdcp_shim *shim = hdcp->shim;
1681         int ret;
1682
1683         ret = hdcp2_authentication_key_exchange(connector);
1684         if (ret < 0) {
1685                 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1686                 return ret;
1687         }
1688
1689         ret = hdcp2_locality_check(connector);
1690         if (ret < 0) {
1691                 drm_dbg_kms(&i915->drm,
1692                             "Locality Check failed. Err : %d\n", ret);
1693                 return ret;
1694         }
1695
1696         ret = hdcp2_session_key_exchange(connector);
1697         if (ret < 0) {
1698                 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1699                 return ret;
1700         }
1701
1702         if (shim->config_stream_type) {
1703                 ret = shim->config_stream_type(connector,
1704                                                hdcp->is_repeater,
1705                                                hdcp->content_type);
1706                 if (ret < 0)
1707                         return ret;
1708         }
1709
1710         if (hdcp->is_repeater) {
1711                 ret = hdcp2_authenticate_repeater_topology(connector);
1712                 if (ret < 0) {
1713                         drm_dbg_kms(&i915->drm,
1714                                     "Repeater Auth Failed. Err: %d\n", ret);
1715                         return ret;
1716                 }
1717         }
1718
1719         return ret;
1720 }
1721
1722 static int hdcp2_enable_stream_encryption(struct intel_connector *connector)
1723 {
1724         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1725         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1726         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1727         struct intel_hdcp *hdcp = &connector->hdcp;
1728         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1729         enum port port = dig_port->base.port;
1730         int ret = 0;
1731
1732         if (!(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1733                             LINK_ENCRYPTION_STATUS)) {
1734                 drm_err(&i915->drm, "[%s:%d] HDCP 2.2 Link is not encrypted\n",
1735                         connector->base.name, connector->base.base.id);
1736                 ret = -EPERM;
1737                 goto link_recover;
1738         }
1739
1740         if (hdcp->shim->stream_2_2_encryption) {
1741                 ret = hdcp->shim->stream_2_2_encryption(connector, true);
1742                 if (ret) {
1743                         drm_err(&i915->drm, "[%s:%d] Failed to enable HDCP 2.2 stream enc\n",
1744                                 connector->base.name, connector->base.base.id);
1745                         return ret;
1746                 }
1747                 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encrypted\n",
1748                             transcoder_name(hdcp->stream_transcoder));
1749         }
1750
1751         return 0;
1752
1753 link_recover:
1754         if (hdcp2_deauthenticate_port(connector) < 0)
1755                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1756
1757         dig_port->hdcp_auth_status = false;
1758         data->k = 0;
1759
1760         return ret;
1761 }
1762
1763 static int hdcp2_enable_encryption(struct intel_connector *connector)
1764 {
1765         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1766         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1767         struct intel_hdcp *hdcp = &connector->hdcp;
1768         enum port port = dig_port->base.port;
1769         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1770         int ret;
1771
1772         drm_WARN_ON(&i915->drm,
1773                     intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1774                     LINK_ENCRYPTION_STATUS);
1775         if (hdcp->shim->toggle_signalling) {
1776                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1777                                                     true);
1778                 if (ret) {
1779                         drm_err(&i915->drm,
1780                                 "Failed to enable HDCP signalling. %d\n",
1781                                 ret);
1782                         return ret;
1783                 }
1784         }
1785
1786         if (intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1787             LINK_AUTH_STATUS)
1788                 /* Link is Authenticated. Now set for Encryption */
1789                 intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1790                              0, CTL_LINK_ENCRYPTION_REQ);
1791
1792         ret = intel_de_wait_for_set(i915,
1793                                     HDCP2_STATUS(i915, cpu_transcoder,
1794                                                  port),
1795                                     LINK_ENCRYPTION_STATUS,
1796                                     HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1797         dig_port->hdcp_auth_status = true;
1798
1799         return ret;
1800 }
1801
1802 static int hdcp2_disable_encryption(struct intel_connector *connector)
1803 {
1804         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1805         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1806         struct intel_hdcp *hdcp = &connector->hdcp;
1807         enum port port = dig_port->base.port;
1808         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1809         int ret;
1810
1811         drm_WARN_ON(&i915->drm, !(intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)) &
1812                                       LINK_ENCRYPTION_STATUS));
1813
1814         intel_de_rmw(i915, HDCP2_CTL(i915, cpu_transcoder, port),
1815                      CTL_LINK_ENCRYPTION_REQ, 0);
1816
1817         ret = intel_de_wait_for_clear(i915,
1818                                       HDCP2_STATUS(i915, cpu_transcoder,
1819                                                    port),
1820                                       LINK_ENCRYPTION_STATUS,
1821                                       HDCP_ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1822         if (ret == -ETIMEDOUT)
1823                 drm_dbg_kms(&i915->drm, "Disable Encryption Timedout");
1824
1825         if (hdcp->shim->toggle_signalling) {
1826                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1827                                                     false);
1828                 if (ret) {
1829                         drm_err(&i915->drm,
1830                                 "Failed to disable HDCP signalling. %d\n",
1831                                 ret);
1832                         return ret;
1833                 }
1834         }
1835
1836         return ret;
1837 }
1838
1839 static int
1840 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1841 {
1842         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1843         int i, tries = 3, ret;
1844
1845         if (!connector->hdcp.is_repeater)
1846                 return 0;
1847
1848         for (i = 0; i < tries; i++) {
1849                 ret = _hdcp2_propagate_stream_management_info(connector);
1850                 if (!ret)
1851                         break;
1852
1853                 /* Lets restart the auth incase of seq_num_m roll over */
1854                 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1855                         drm_dbg_kms(&i915->drm,
1856                                     "seq_num_m roll over.(%d)\n", ret);
1857                         break;
1858                 }
1859
1860                 drm_dbg_kms(&i915->drm,
1861                             "HDCP2 stream management %d of %d Failed.(%d)\n",
1862                             i + 1, tries, ret);
1863         }
1864
1865         return ret;
1866 }
1867
1868 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1869 {
1870         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1871         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1872         int ret = 0, i, tries = 3;
1873
1874         for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
1875                 ret = hdcp2_authenticate_sink(connector);
1876                 if (!ret) {
1877                         intel_hdcp_prepare_streams(connector);
1878
1879                         ret = hdcp2_propagate_stream_management_info(connector);
1880                         if (ret) {
1881                                 drm_dbg_kms(&i915->drm,
1882                                             "Stream management failed.(%d)\n",
1883                                             ret);
1884                                 break;
1885                         }
1886
1887                         ret = hdcp2_authenticate_port(connector);
1888                         if (!ret)
1889                                 break;
1890                         drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1891                                     ret);
1892                 }
1893
1894                 /* Clearing the mei hdcp session */
1895                 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1896                             i + 1, tries, ret);
1897                 if (hdcp2_deauthenticate_port(connector) < 0)
1898                         drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1899         }
1900
1901         if (!ret && !dig_port->hdcp_auth_status) {
1902                 /*
1903                  * Ensuring the required 200mSec min time interval between
1904                  * Session Key Exchange and encryption.
1905                  */
1906                 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1907                 ret = hdcp2_enable_encryption(connector);
1908                 if (ret < 0) {
1909                         drm_dbg_kms(&i915->drm,
1910                                     "Encryption Enable Failed.(%d)\n", ret);
1911                         if (hdcp2_deauthenticate_port(connector) < 0)
1912                                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1913                 }
1914         }
1915
1916         if (!ret)
1917                 ret = hdcp2_enable_stream_encryption(connector);
1918
1919         return ret;
1920 }
1921
1922 static int _intel_hdcp2_enable(struct intel_connector *connector)
1923 {
1924         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1925         struct intel_hdcp *hdcp = &connector->hdcp;
1926         int ret;
1927
1928         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1929                     connector->base.name, connector->base.base.id,
1930                     hdcp->content_type);
1931
1932         ret = hdcp2_authenticate_and_encrypt(connector);
1933         if (ret) {
1934                 drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1935                             hdcp->content_type, ret);
1936                 return ret;
1937         }
1938
1939         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1940                     connector->base.name, connector->base.base.id,
1941                     hdcp->content_type);
1942
1943         hdcp->hdcp2_encrypted = true;
1944         return 0;
1945 }
1946
1947 static int
1948 _intel_hdcp2_disable(struct intel_connector *connector, bool hdcp2_link_recovery)
1949 {
1950         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1951         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1952         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
1953         struct intel_hdcp *hdcp = &connector->hdcp;
1954         int ret;
1955
1956         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1957                     connector->base.name, connector->base.base.id);
1958
1959         if (hdcp->shim->stream_2_2_encryption) {
1960                 ret = hdcp->shim->stream_2_2_encryption(connector, false);
1961                 if (ret) {
1962                         drm_err(&i915->drm, "[%s:%d] Failed to disable HDCP 2.2 stream enc\n",
1963                                 connector->base.name, connector->base.base.id);
1964                         return ret;
1965                 }
1966                 drm_dbg_kms(&i915->drm, "HDCP 2.2 transcoder: %s stream encryption disabled\n",
1967                             transcoder_name(hdcp->stream_transcoder));
1968
1969                 if (dig_port->num_hdcp_streams > 0 && !hdcp2_link_recovery)
1970                         return 0;
1971         }
1972
1973         ret = hdcp2_disable_encryption(connector);
1974
1975         if (hdcp2_deauthenticate_port(connector) < 0)
1976                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1977
1978         connector->hdcp.hdcp2_encrypted = false;
1979         dig_port->hdcp_auth_status = false;
1980         data->k = 0;
1981
1982         return ret;
1983 }
1984
1985 /* Implements the Link Integrity Check for HDCP2.2 */
1986 static int intel_hdcp2_check_link(struct intel_connector *connector)
1987 {
1988         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1989         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1990         struct intel_hdcp *hdcp = &connector->hdcp;
1991         enum port port = dig_port->base.port;
1992         enum transcoder cpu_transcoder;
1993         int ret = 0;
1994
1995         mutex_lock(&hdcp->mutex);
1996         mutex_lock(&dig_port->hdcp_mutex);
1997         cpu_transcoder = hdcp->cpu_transcoder;
1998
1999         /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
2000         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
2001             !hdcp->hdcp2_encrypted) {
2002                 ret = -EINVAL;
2003                 goto out;
2004         }
2005
2006         if (drm_WARN_ON(&i915->drm,
2007                         !intel_hdcp2_in_use(i915, cpu_transcoder, port))) {
2008                 drm_err(&i915->drm,
2009                         "HDCP2.2 link stopped the encryption, %x\n",
2010                         intel_de_read(i915, HDCP2_STATUS(i915, cpu_transcoder, port)));
2011                 ret = -ENXIO;
2012                 _intel_hdcp2_disable(connector, true);
2013                 intel_hdcp_update_value(connector,
2014                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2015                                         true);
2016                 goto out;
2017         }
2018
2019         ret = hdcp->shim->check_2_2_link(dig_port, connector);
2020         if (ret == HDCP_LINK_PROTECTED) {
2021                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
2022                         intel_hdcp_update_value(connector,
2023                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2024                                         true);
2025                 }
2026                 goto out;
2027         }
2028
2029         if (ret == HDCP_TOPOLOGY_CHANGE) {
2030                 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2031                         goto out;
2032
2033                 drm_dbg_kms(&i915->drm,
2034                             "HDCP2.2 Downstream topology change\n");
2035                 ret = hdcp2_authenticate_repeater_topology(connector);
2036                 if (!ret) {
2037                         intel_hdcp_update_value(connector,
2038                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2039                                         true);
2040                         goto out;
2041                 }
2042                 drm_dbg_kms(&i915->drm,
2043                             "[%s:%d] Repeater topology auth failed.(%d)\n",
2044                             connector->base.name, connector->base.base.id,
2045                             ret);
2046         } else {
2047                 drm_dbg_kms(&i915->drm,
2048                             "[%s:%d] HDCP2.2 link failed, retrying auth\n",
2049                             connector->base.name, connector->base.base.id);
2050         }
2051
2052         ret = _intel_hdcp2_disable(connector, true);
2053         if (ret) {
2054                 drm_err(&i915->drm,
2055                         "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
2056                         connector->base.name, connector->base.base.id, ret);
2057                 intel_hdcp_update_value(connector,
2058                                 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
2059                 goto out;
2060         }
2061
2062         ret = _intel_hdcp2_enable(connector);
2063         if (ret) {
2064                 drm_dbg_kms(&i915->drm,
2065                             "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
2066                             connector->base.name, connector->base.base.id,
2067                             ret);
2068                 intel_hdcp_update_value(connector,
2069                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
2070                                         true);
2071                 goto out;
2072         }
2073
2074 out:
2075         mutex_unlock(&dig_port->hdcp_mutex);
2076         mutex_unlock(&hdcp->mutex);
2077         return ret;
2078 }
2079
2080 static void intel_hdcp_check_work(struct work_struct *work)
2081 {
2082         struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
2083                                                struct intel_hdcp,
2084                                                check_work);
2085         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
2086         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2087
2088         if (drm_connector_is_unregistered(&connector->base))
2089                 return;
2090
2091         if (!intel_hdcp2_check_link(connector))
2092                 queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2093                                    DRM_HDCP2_CHECK_PERIOD_MS);
2094         else if (!intel_hdcp_check_link(connector))
2095                 queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2096                                    DRM_HDCP_CHECK_PERIOD_MS);
2097 }
2098
2099 static int i915_hdcp_component_bind(struct device *i915_kdev,
2100                                     struct device *mei_kdev, void *data)
2101 {
2102         struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
2103
2104         drm_dbg(&i915->drm, "I915 HDCP comp bind\n");
2105         mutex_lock(&i915->display.hdcp.hdcp_mutex);
2106         i915->display.hdcp.arbiter = (struct i915_hdcp_arbiter *)data;
2107         i915->display.hdcp.arbiter->hdcp_dev = mei_kdev;
2108         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2109
2110         return 0;
2111 }
2112
2113 static void i915_hdcp_component_unbind(struct device *i915_kdev,
2114                                        struct device *mei_kdev, void *data)
2115 {
2116         struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
2117
2118         drm_dbg(&i915->drm, "I915 HDCP comp unbind\n");
2119         mutex_lock(&i915->display.hdcp.hdcp_mutex);
2120         i915->display.hdcp.arbiter = NULL;
2121         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2122 }
2123
2124 static const struct component_ops i915_hdcp_ops = {
2125         .bind   = i915_hdcp_component_bind,
2126         .unbind = i915_hdcp_component_unbind,
2127 };
2128
2129 static enum hdcp_ddi intel_get_hdcp_ddi_index(enum port port)
2130 {
2131         switch (port) {
2132         case PORT_A:
2133                 return HDCP_DDI_A;
2134         case PORT_B ... PORT_F:
2135                 return (enum hdcp_ddi)port;
2136         default:
2137                 return HDCP_DDI_INVALID_PORT;
2138         }
2139 }
2140
2141 static enum hdcp_transcoder intel_get_hdcp_transcoder(enum transcoder cpu_transcoder)
2142 {
2143         switch (cpu_transcoder) {
2144         case TRANSCODER_A ... TRANSCODER_D:
2145                 return (enum hdcp_transcoder)(cpu_transcoder | 0x10);
2146         default: /* eDP, DSI TRANSCODERS are non HDCP capable */
2147                 return HDCP_INVALID_TRANSCODER;
2148         }
2149 }
2150
2151 static int initialize_hdcp_port_data(struct intel_connector *connector,
2152                                      struct intel_digital_port *dig_port,
2153                                      const struct intel_hdcp_shim *shim)
2154 {
2155         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2156         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2157         enum port port = dig_port->base.port;
2158
2159         if (DISPLAY_VER(i915) < 12)
2160                 data->hdcp_ddi = intel_get_hdcp_ddi_index(port);
2161         else
2162                 /*
2163                  * As per ME FW API expectation, for GEN 12+, hdcp_ddi is filled
2164                  * with zero(INVALID PORT index).
2165                  */
2166                 data->hdcp_ddi = HDCP_DDI_INVALID_PORT;
2167
2168         /*
2169          * As associated transcoder is set and modified at modeset, here hdcp_transcoder
2170          * is initialized to zero (invalid transcoder index). This will be
2171          * retained for <Gen12 forever.
2172          */
2173         data->hdcp_transcoder = HDCP_INVALID_TRANSCODER;
2174
2175         data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
2176         data->protocol = (u8)shim->protocol;
2177
2178         if (!data->streams)
2179                 data->streams = kcalloc(INTEL_NUM_PIPES(i915),
2180                                         sizeof(struct hdcp2_streamid_type),
2181                                         GFP_KERNEL);
2182         if (!data->streams) {
2183                 drm_err(&i915->drm, "Out of Memory\n");
2184                 return -ENOMEM;
2185         }
2186
2187         return 0;
2188 }
2189
2190 static bool is_hdcp2_supported(struct drm_i915_private *i915)
2191 {
2192         if (intel_hdcp_gsc_cs_required(i915))
2193                 return true;
2194
2195         if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2196                 return false;
2197
2198         return (DISPLAY_VER(i915) >= 10 ||
2199                 IS_KABYLAKE(i915) ||
2200                 IS_COFFEELAKE(i915) ||
2201                 IS_COMETLAKE(i915));
2202 }
2203
2204 void intel_hdcp_component_init(struct drm_i915_private *i915)
2205 {
2206         int ret;
2207
2208         if (!is_hdcp2_supported(i915))
2209                 return;
2210
2211         mutex_lock(&i915->display.hdcp.hdcp_mutex);
2212         drm_WARN_ON(&i915->drm, i915->display.hdcp.comp_added);
2213
2214         i915->display.hdcp.comp_added = true;
2215         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2216         if (intel_hdcp_gsc_cs_required(i915))
2217                 ret = intel_hdcp_gsc_init(i915);
2218         else
2219                 ret = component_add_typed(i915->drm.dev, &i915_hdcp_ops,
2220                                           I915_COMPONENT_HDCP);
2221
2222         if (ret < 0) {
2223                 drm_dbg_kms(&i915->drm, "Failed at fw component add(%d)\n",
2224                             ret);
2225                 mutex_lock(&i915->display.hdcp.hdcp_mutex);
2226                 i915->display.hdcp.comp_added = false;
2227                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2228                 return;
2229         }
2230 }
2231
2232 static void intel_hdcp2_init(struct intel_connector *connector,
2233                              struct intel_digital_port *dig_port,
2234                              const struct intel_hdcp_shim *shim)
2235 {
2236         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2237         struct intel_hdcp *hdcp = &connector->hdcp;
2238         int ret;
2239
2240         ret = initialize_hdcp_port_data(connector, dig_port, shim);
2241         if (ret) {
2242                 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2243                 return;
2244         }
2245
2246         hdcp->hdcp2_supported = true;
2247 }
2248
2249 int intel_hdcp_init(struct intel_connector *connector,
2250                     struct intel_digital_port *dig_port,
2251                     const struct intel_hdcp_shim *shim)
2252 {
2253         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2254         struct intel_hdcp *hdcp = &connector->hdcp;
2255         int ret;
2256
2257         if (!shim)
2258                 return -EINVAL;
2259
2260         if (is_hdcp2_supported(i915))
2261                 intel_hdcp2_init(connector, dig_port, shim);
2262
2263         ret =
2264         drm_connector_attach_content_protection_property(&connector->base,
2265                                                          hdcp->hdcp2_supported);
2266         if (ret) {
2267                 hdcp->hdcp2_supported = false;
2268                 kfree(dig_port->hdcp_port_data.streams);
2269                 return ret;
2270         }
2271
2272         hdcp->shim = shim;
2273         mutex_init(&hdcp->mutex);
2274         INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2275         INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2276         init_waitqueue_head(&hdcp->cp_irq_queue);
2277
2278         return 0;
2279 }
2280
2281 static int
2282 intel_hdcp_set_streams(struct intel_digital_port *dig_port,
2283                        struct intel_atomic_state *state)
2284 {
2285         struct drm_connector_list_iter conn_iter;
2286         struct intel_digital_port *conn_dig_port;
2287         struct intel_connector *connector;
2288         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
2289         struct hdcp_port_data *data = &dig_port->hdcp_port_data;
2290
2291         if (!intel_encoder_is_mst(&dig_port->base)) {
2292                 data->k = 1;
2293                 data->streams[0].stream_id = 0;
2294                 return 0;
2295         }
2296
2297         data->k = 0;
2298
2299         drm_connector_list_iter_begin(&i915->drm, &conn_iter);
2300         for_each_intel_connector_iter(connector, &conn_iter) {
2301                 if (connector->base.status == connector_status_disconnected)
2302                         continue;
2303
2304                 if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
2305                         continue;
2306
2307                 conn_dig_port = intel_attached_dig_port(connector);
2308                 if (conn_dig_port != dig_port)
2309                         continue;
2310
2311                 data->streams[data->k].stream_id =
2312                         intel_conn_to_vcpi(&state->base, connector);
2313                 data->k++;
2314
2315                 /* if there is only one active stream */
2316                 if (dig_port->dp.active_mst_links <= 1)
2317                         break;
2318         }
2319         drm_connector_list_iter_end(&conn_iter);
2320
2321         if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
2322                 return -EINVAL;
2323
2324         return 0;
2325 }
2326
2327 int intel_hdcp_enable(struct intel_atomic_state *state,
2328                       struct intel_encoder *encoder,
2329                       const struct intel_crtc_state *pipe_config,
2330                       const struct drm_connector_state *conn_state)
2331 {
2332         struct drm_i915_private *i915 = to_i915(encoder->base.dev);
2333         struct intel_connector *connector =
2334                 to_intel_connector(conn_state->connector);
2335         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2336         struct intel_hdcp *hdcp = &connector->hdcp;
2337         unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2338         int ret = -EINVAL;
2339
2340         if (!hdcp->shim)
2341                 return -ENOENT;
2342
2343         if (!connector->encoder) {
2344                 drm_err(&i915->drm, "[%s:%d] encoder is not initialized\n",
2345                         connector->base.name, connector->base.base.id);
2346                 return -ENODEV;
2347         }
2348
2349         mutex_lock(&hdcp->mutex);
2350         mutex_lock(&dig_port->hdcp_mutex);
2351         drm_WARN_ON(&i915->drm,
2352                     hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2353         hdcp->content_type = (u8)conn_state->hdcp_content_type;
2354
2355         if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)) {
2356                 hdcp->cpu_transcoder = pipe_config->mst_master_transcoder;
2357                 hdcp->stream_transcoder = pipe_config->cpu_transcoder;
2358         } else {
2359                 hdcp->cpu_transcoder = pipe_config->cpu_transcoder;
2360                 hdcp->stream_transcoder = INVALID_TRANSCODER;
2361         }
2362
2363         if (DISPLAY_VER(i915) >= 12)
2364                 dig_port->hdcp_port_data.hdcp_transcoder =
2365                         intel_get_hdcp_transcoder(hdcp->cpu_transcoder);
2366
2367         /*
2368          * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2369          * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2370          */
2371         if (intel_hdcp2_capable(connector)) {
2372                 ret = intel_hdcp_set_streams(dig_port, state);
2373                 if (!ret) {
2374                         ret = _intel_hdcp2_enable(connector);
2375                         if (!ret)
2376                                 check_link_interval =
2377                                         DRM_HDCP2_CHECK_PERIOD_MS;
2378                 } else {
2379                         drm_dbg_kms(&i915->drm,
2380                                     "Set content streams failed: (%d)\n",
2381                                     ret);
2382                 }
2383         }
2384
2385         /*
2386          * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2387          * be attempted.
2388          */
2389         if (ret && intel_hdcp_capable(connector) &&
2390             hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2391                 ret = _intel_hdcp_enable(connector);
2392         }
2393
2394         if (!ret) {
2395                 queue_delayed_work(i915->unordered_wq, &hdcp->check_work,
2396                                    check_link_interval);
2397                 intel_hdcp_update_value(connector,
2398                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2399                                         true);
2400         }
2401
2402         mutex_unlock(&dig_port->hdcp_mutex);
2403         mutex_unlock(&hdcp->mutex);
2404         return ret;
2405 }
2406
2407 int intel_hdcp_disable(struct intel_connector *connector)
2408 {
2409         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2410         struct intel_hdcp *hdcp = &connector->hdcp;
2411         int ret = 0;
2412
2413         if (!hdcp->shim)
2414                 return -ENOENT;
2415
2416         mutex_lock(&hdcp->mutex);
2417         mutex_lock(&dig_port->hdcp_mutex);
2418
2419         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2420                 goto out;
2421
2422         intel_hdcp_update_value(connector,
2423                                 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2424         if (hdcp->hdcp2_encrypted)
2425                 ret = _intel_hdcp2_disable(connector, false);
2426         else if (hdcp->hdcp_encrypted)
2427                 ret = _intel_hdcp_disable(connector);
2428
2429 out:
2430         mutex_unlock(&dig_port->hdcp_mutex);
2431         mutex_unlock(&hdcp->mutex);
2432         cancel_delayed_work_sync(&hdcp->check_work);
2433         return ret;
2434 }
2435
2436 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2437                             struct intel_encoder *encoder,
2438                             const struct intel_crtc_state *crtc_state,
2439                             const struct drm_connector_state *conn_state)
2440 {
2441         struct intel_connector *connector =
2442                                 to_intel_connector(conn_state->connector);
2443         struct intel_hdcp *hdcp = &connector->hdcp;
2444         bool content_protection_type_changed, desired_and_not_enabled = false;
2445         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2446
2447         if (!connector->hdcp.shim)
2448                 return;
2449
2450         content_protection_type_changed =
2451                 (conn_state->hdcp_content_type != hdcp->content_type &&
2452                  conn_state->content_protection !=
2453                  DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2454
2455         /*
2456          * During the HDCP encryption session if Type change is requested,
2457          * disable the HDCP and reenable it with new TYPE value.
2458          */
2459         if (conn_state->content_protection ==
2460             DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2461             content_protection_type_changed)
2462                 intel_hdcp_disable(connector);
2463
2464         /*
2465          * Mark the hdcp state as DESIRED after the hdcp disable of type
2466          * change procedure.
2467          */
2468         if (content_protection_type_changed) {
2469                 mutex_lock(&hdcp->mutex);
2470                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2471                 drm_connector_get(&connector->base);
2472                 queue_work(i915->unordered_wq, &hdcp->prop_work);
2473                 mutex_unlock(&hdcp->mutex);
2474         }
2475
2476         if (conn_state->content_protection ==
2477             DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2478                 mutex_lock(&hdcp->mutex);
2479                 /* Avoid enabling hdcp, if it already ENABLED */
2480                 desired_and_not_enabled =
2481                         hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2482                 mutex_unlock(&hdcp->mutex);
2483                 /*
2484                  * If HDCP already ENABLED and CP property is DESIRED, schedule
2485                  * prop_work to update correct CP property to user space.
2486                  */
2487                 if (!desired_and_not_enabled && !content_protection_type_changed) {
2488                         drm_connector_get(&connector->base);
2489                         queue_work(i915->unordered_wq, &hdcp->prop_work);
2490                 }
2491         }
2492
2493         if (desired_and_not_enabled || content_protection_type_changed)
2494                 intel_hdcp_enable(state, encoder, crtc_state, conn_state);
2495 }
2496
2497 void intel_hdcp_component_fini(struct drm_i915_private *i915)
2498 {
2499         mutex_lock(&i915->display.hdcp.hdcp_mutex);
2500         if (!i915->display.hdcp.comp_added) {
2501                 mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2502                 return;
2503         }
2504
2505         i915->display.hdcp.comp_added = false;
2506         mutex_unlock(&i915->display.hdcp.hdcp_mutex);
2507
2508         if (intel_hdcp_gsc_cs_required(i915))
2509                 intel_hdcp_gsc_fini(i915);
2510         else
2511                 component_del(i915->drm.dev, &i915_hdcp_ops);
2512 }
2513
2514 void intel_hdcp_cleanup(struct intel_connector *connector)
2515 {
2516         struct intel_hdcp *hdcp = &connector->hdcp;
2517
2518         if (!hdcp->shim)
2519                 return;
2520
2521         /*
2522          * If the connector is registered, it's possible userspace could kick
2523          * off another HDCP enable, which would re-spawn the workers.
2524          */
2525         drm_WARN_ON(connector->base.dev,
2526                 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2527
2528         /*
2529          * Now that the connector is not registered, check_work won't be run,
2530          * but cancel any outstanding instances of it
2531          */
2532         cancel_delayed_work_sync(&hdcp->check_work);
2533
2534         /*
2535          * We don't cancel prop_work in the same way as check_work since it
2536          * requires connection_mutex which could be held while calling this
2537          * function. Instead, we rely on the connector references grabbed before
2538          * scheduling prop_work to ensure the connector is alive when prop_work
2539          * is run. So if we're in the destroy path (which is where this
2540          * function should be called), we're "guaranteed" that prop_work is not
2541          * active (tl;dr This Should Never Happen).
2542          */
2543         drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2544
2545         mutex_lock(&hdcp->mutex);
2546         hdcp->shim = NULL;
2547         mutex_unlock(&hdcp->mutex);
2548 }
2549
2550 void intel_hdcp_atomic_check(struct drm_connector *connector,
2551                              struct drm_connector_state *old_state,
2552                              struct drm_connector_state *new_state)
2553 {
2554         u64 old_cp = old_state->content_protection;
2555         u64 new_cp = new_state->content_protection;
2556         struct drm_crtc_state *crtc_state;
2557
2558         if (!new_state->crtc) {
2559                 /*
2560                  * If the connector is being disabled with CP enabled, mark it
2561                  * desired so it's re-enabled when the connector is brought back
2562                  */
2563                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2564                         new_state->content_protection =
2565                                 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2566                 return;
2567         }
2568
2569         crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2570                                                    new_state->crtc);
2571         /*
2572          * Fix the HDCP uapi content protection state in case of modeset.
2573          * FIXME: As per HDCP content protection property uapi doc, an uevent()
2574          * need to be sent if there is transition from ENABLED->DESIRED.
2575          */
2576         if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2577             (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2578             new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2579                 new_state->content_protection =
2580                         DRM_MODE_CONTENT_PROTECTION_DESIRED;
2581
2582         /*
2583          * Nothing to do if the state didn't change, or HDCP was activated since
2584          * the last commit. And also no change in hdcp content type.
2585          */
2586         if (old_cp == new_cp ||
2587             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2588              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2589                 if (old_state->hdcp_content_type ==
2590                                 new_state->hdcp_content_type)
2591                         return;
2592         }
2593
2594         crtc_state->mode_changed = true;
2595 }
2596
2597 /* Handles the CP_IRQ raised from the DP HDCP sink */
2598 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2599 {
2600         struct intel_hdcp *hdcp = &connector->hdcp;
2601         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2602
2603         if (!hdcp->shim)
2604                 return;
2605
2606         atomic_inc(&connector->hdcp.cp_irq_count);
2607         wake_up_all(&connector->hdcp.cp_irq_queue);
2608
2609         queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0);
2610 }
This page took 0.191327 seconds and 4 git commands to generate.