]> Git Repo - J-linux.git/blob - drivers/gpu/drm/i915/display/intel_hdcp.c
Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[J-linux.git] / drivers / gpu / drm / i915 / display / intel_hdcp.c
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright (C) 2017 Google, Inc.
4  * Copyright _ 2017-2019, Intel Corporation.
5  *
6  * Authors:
7  * Sean Paul <[email protected]>
8  * Ramalingam C <[email protected]>
9  */
10
11 #include <linux/component.h>
12 #include <linux/i2c.h>
13 #include <linux/random.h>
14
15 #include <drm/drm_hdcp.h>
16 #include <drm/i915_component.h>
17
18 #include "i915_reg.h"
19 #include "intel_display_power.h"
20 #include "intel_display_types.h"
21 #include "intel_hdcp.h"
22 #include "intel_sideband.h"
23 #include "intel_connector.h"
24
25 #define KEY_LOAD_TRIES  5
26 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS        50
27 #define HDCP2_LC_RETRY_CNT                      3
28
29 static
30 bool intel_hdcp_is_ksv_valid(u8 *ksv)
31 {
32         int i, ones = 0;
33         /* KSV has 20 1's and 20 0's */
34         for (i = 0; i < DRM_HDCP_KSV_LEN; i++)
35                 ones += hweight8(ksv[i]);
36         if (ones != 20)
37                 return false;
38
39         return true;
40 }
41
42 static
43 int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
44                                const struct intel_hdcp_shim *shim, u8 *bksv)
45 {
46         struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
47         int ret, i, tries = 2;
48
49         /* HDCP spec states that we must retry the bksv if it is invalid */
50         for (i = 0; i < tries; i++) {
51                 ret = shim->read_bksv(dig_port, bksv);
52                 if (ret)
53                         return ret;
54                 if (intel_hdcp_is_ksv_valid(bksv))
55                         break;
56         }
57         if (i == tries) {
58                 drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
59                 return -ENODEV;
60         }
61
62         return 0;
63 }
64
65 /* Is HDCP1.4 capable on Platform and Sink */
66 bool intel_hdcp_capable(struct intel_connector *connector)
67 {
68         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
69         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
70         bool capable = false;
71         u8 bksv[5];
72
73         if (!shim)
74                 return capable;
75
76         if (shim->hdcp_capable) {
77                 shim->hdcp_capable(dig_port, &capable);
78         } else {
79                 if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
80                         capable = true;
81         }
82
83         return capable;
84 }
85
86 /* Is HDCP2.2 capable on Platform and Sink */
87 bool intel_hdcp2_capable(struct intel_connector *connector)
88 {
89         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
90         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
91         struct intel_hdcp *hdcp = &connector->hdcp;
92         bool capable = false;
93
94         /* I915 support for HDCP2.2 */
95         if (!hdcp->hdcp2_supported)
96                 return false;
97
98         /* MEI interface is solid */
99         mutex_lock(&dev_priv->hdcp_comp_mutex);
100         if (!dev_priv->hdcp_comp_added ||  !dev_priv->hdcp_master) {
101                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
102                 return false;
103         }
104         mutex_unlock(&dev_priv->hdcp_comp_mutex);
105
106         /* Sink's capability for HDCP2.2 */
107         hdcp->shim->hdcp_2_2_capable(dig_port, &capable);
108
109         return capable;
110 }
111
112 static bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
113                               enum transcoder cpu_transcoder, enum port port)
114 {
115         return intel_de_read(dev_priv,
116                              HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
117                HDCP_STATUS_ENC;
118 }
119
120 static bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
121                                enum transcoder cpu_transcoder, enum port port)
122 {
123         return intel_de_read(dev_priv,
124                              HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
125                LINK_ENCRYPTION_STATUS;
126 }
127
128 static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port,
129                                     const struct intel_hdcp_shim *shim)
130 {
131         int ret, read_ret;
132         bool ksv_ready;
133
134         /* Poll for ksv list ready (spec says max time allowed is 5s) */
135         ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port,
136                                                          &ksv_ready),
137                          read_ret || ksv_ready, 5 * 1000 * 1000, 1000,
138                          100 * 1000);
139         if (ret)
140                 return ret;
141         if (read_ret)
142                 return read_ret;
143         if (!ksv_ready)
144                 return -ETIMEDOUT;
145
146         return 0;
147 }
148
149 static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
150 {
151         enum i915_power_well_id id;
152         intel_wakeref_t wakeref;
153         bool enabled = false;
154
155         /*
156          * On HSW and BDW, Display HW loads the Key as soon as Display resumes.
157          * On all BXT+, SW can load the keys only when the PW#1 is turned on.
158          */
159         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
160                 id = HSW_DISP_PW_GLOBAL;
161         else
162                 id = SKL_DISP_PW_1;
163
164         /* PG1 (power well #1) needs to be enabled */
165         with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
166                 enabled = intel_display_power_well_is_enabled(dev_priv, id);
167
168         /*
169          * Another req for hdcp key loadability is enabled state of pll for
170          * cdclk. Without active crtc we wont land here. So we are assuming that
171          * cdclk is already on.
172          */
173
174         return enabled;
175 }
176
177 static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
178 {
179         intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
180         intel_de_write(dev_priv, HDCP_KEY_STATUS,
181                        HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
182 }
183
184 static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
185 {
186         int ret;
187         u32 val;
188
189         val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
190         if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
191                 return 0;
192
193         /*
194          * On HSW and BDW HW loads the HDCP1.4 Key when Display comes
195          * out of reset. So if Key is not already loaded, its an error state.
196          */
197         if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
198                 if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
199                         return -ENXIO;
200
201         /*
202          * Initiate loading the HDCP key from fuses.
203          *
204          * BXT+ platforms, HDCP key needs to be loaded by SW. Only Gen 9
205          * platforms except BXT and GLK, differ in the key load trigger process
206          * from other platforms. So GEN9_BC uses the GT Driver Mailbox i/f.
207          */
208         if (IS_GEN9_BC(dev_priv)) {
209                 ret = sandybridge_pcode_write(dev_priv,
210                                               SKL_PCODE_LOAD_HDCP_KEYS, 1);
211                 if (ret) {
212                         drm_err(&dev_priv->drm,
213                                 "Failed to initiate HDCP key load (%d)\n",
214                                 ret);
215                         return ret;
216                 }
217         } else {
218                 intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
219         }
220
221         /* Wait for the keys to load (500us) */
222         ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
223                                         HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
224                                         10, 1, &val);
225         if (ret)
226                 return ret;
227         else if (!(val & HDCP_KEY_LOAD_STATUS))
228                 return -ENXIO;
229
230         /* Send Aksv over to PCH display for use in authentication */
231         intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
232
233         return 0;
234 }
235
236 /* Returns updated SHA-1 index */
237 static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
238 {
239         intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
240         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
241                 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
242                 return -ETIMEDOUT;
243         }
244         return 0;
245 }
246
247 static
248 u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
249                                 enum transcoder cpu_transcoder, enum port port)
250 {
251         if (INTEL_GEN(dev_priv) >= 12) {
252                 switch (cpu_transcoder) {
253                 case TRANSCODER_A:
254                         return HDCP_TRANSA_REP_PRESENT |
255                                HDCP_TRANSA_SHA1_M0;
256                 case TRANSCODER_B:
257                         return HDCP_TRANSB_REP_PRESENT |
258                                HDCP_TRANSB_SHA1_M0;
259                 case TRANSCODER_C:
260                         return HDCP_TRANSC_REP_PRESENT |
261                                HDCP_TRANSC_SHA1_M0;
262                 case TRANSCODER_D:
263                         return HDCP_TRANSD_REP_PRESENT |
264                                HDCP_TRANSD_SHA1_M0;
265                 default:
266                         drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
267                                 cpu_transcoder);
268                         return -EINVAL;
269                 }
270         }
271
272         switch (port) {
273         case PORT_A:
274                 return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0;
275         case PORT_B:
276                 return HDCP_DDIB_REP_PRESENT | HDCP_DDIB_SHA1_M0;
277         case PORT_C:
278                 return HDCP_DDIC_REP_PRESENT | HDCP_DDIC_SHA1_M0;
279         case PORT_D:
280                 return HDCP_DDID_REP_PRESENT | HDCP_DDID_SHA1_M0;
281         case PORT_E:
282                 return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
283         default:
284                 drm_err(&dev_priv->drm, "Unknown port %d\n", port);
285                 return -EINVAL;
286         }
287 }
288
289 static
290 int intel_hdcp_validate_v_prime(struct intel_connector *connector,
291                                 const struct intel_hdcp_shim *shim,
292                                 u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
293 {
294         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
295         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
296         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
297         enum port port = dig_port->base.port;
298         u32 vprime, sha_text, sha_leftovers, rep_ctl;
299         int ret, i, j, sha_idx;
300
301         /* Process V' values from the receiver */
302         for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
303                 ret = shim->read_v_prime_part(dig_port, i, &vprime);
304                 if (ret)
305                         return ret;
306                 intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
307         }
308
309         /*
310          * We need to write the concatenation of all device KSVs, BINFO (DP) ||
311          * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
312          * stream is written via the HDCP_SHA_TEXT register in 32-bit
313          * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
314          * index will keep track of our progress through the 64 bytes as well as
315          * helping us work the 40-bit KSVs through our 32-bit register.
316          *
317          * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
318          */
319         sha_idx = 0;
320         sha_text = 0;
321         sha_leftovers = 0;
322         rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
323         intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
324         for (i = 0; i < num_downstream; i++) {
325                 unsigned int sha_empty;
326                 u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
327
328                 /* Fill up the empty slots in sha_text and write it out */
329                 sha_empty = sizeof(sha_text) - sha_leftovers;
330                 for (j = 0; j < sha_empty; j++) {
331                         u8 off = ((sizeof(sha_text) - j - 1 - sha_leftovers) * 8);
332                         sha_text |= ksv[j] << off;
333                 }
334
335                 ret = intel_write_sha_text(dev_priv, sha_text);
336                 if (ret < 0)
337                         return ret;
338
339                 /* Programming guide writes this every 64 bytes */
340                 sha_idx += sizeof(sha_text);
341                 if (!(sha_idx % 64))
342                         intel_de_write(dev_priv, HDCP_REP_CTL,
343                                        rep_ctl | HDCP_SHA1_TEXT_32);
344
345                 /* Store the leftover bytes from the ksv in sha_text */
346                 sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
347                 sha_text = 0;
348                 for (j = 0; j < sha_leftovers; j++)
349                         sha_text |= ksv[sha_empty + j] <<
350                                         ((sizeof(sha_text) - j - 1) * 8);
351
352                 /*
353                  * If we still have room in sha_text for more data, continue.
354                  * Otherwise, write it out immediately.
355                  */
356                 if (sizeof(sha_text) > sha_leftovers)
357                         continue;
358
359                 ret = intel_write_sha_text(dev_priv, sha_text);
360                 if (ret < 0)
361                         return ret;
362                 sha_leftovers = 0;
363                 sha_text = 0;
364                 sha_idx += sizeof(sha_text);
365         }
366
367         /*
368          * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
369          * bytes are leftover from the last ksv, we might be able to fit them
370          * all in sha_text (first 2 cases), or we might need to split them up
371          * into 2 writes (last 2 cases).
372          */
373         if (sha_leftovers == 0) {
374                 /* Write 16 bits of text, 16 bits of M0 */
375                 intel_de_write(dev_priv, HDCP_REP_CTL,
376                                rep_ctl | HDCP_SHA1_TEXT_16);
377                 ret = intel_write_sha_text(dev_priv,
378                                            bstatus[0] << 8 | bstatus[1]);
379                 if (ret < 0)
380                         return ret;
381                 sha_idx += sizeof(sha_text);
382
383                 /* Write 32 bits of M0 */
384                 intel_de_write(dev_priv, HDCP_REP_CTL,
385                                rep_ctl | HDCP_SHA1_TEXT_0);
386                 ret = intel_write_sha_text(dev_priv, 0);
387                 if (ret < 0)
388                         return ret;
389                 sha_idx += sizeof(sha_text);
390
391                 /* Write 16 bits of M0 */
392                 intel_de_write(dev_priv, HDCP_REP_CTL,
393                                rep_ctl | HDCP_SHA1_TEXT_16);
394                 ret = intel_write_sha_text(dev_priv, 0);
395                 if (ret < 0)
396                         return ret;
397                 sha_idx += sizeof(sha_text);
398
399         } else if (sha_leftovers == 1) {
400                 /* Write 24 bits of text, 8 bits of M0 */
401                 intel_de_write(dev_priv, HDCP_REP_CTL,
402                                rep_ctl | HDCP_SHA1_TEXT_24);
403                 sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
404                 /* Only 24-bits of data, must be in the LSB */
405                 sha_text = (sha_text & 0xffffff00) >> 8;
406                 ret = intel_write_sha_text(dev_priv, sha_text);
407                 if (ret < 0)
408                         return ret;
409                 sha_idx += sizeof(sha_text);
410
411                 /* Write 32 bits of M0 */
412                 intel_de_write(dev_priv, HDCP_REP_CTL,
413                                rep_ctl | HDCP_SHA1_TEXT_0);
414                 ret = intel_write_sha_text(dev_priv, 0);
415                 if (ret < 0)
416                         return ret;
417                 sha_idx += sizeof(sha_text);
418
419                 /* Write 24 bits of M0 */
420                 intel_de_write(dev_priv, HDCP_REP_CTL,
421                                rep_ctl | HDCP_SHA1_TEXT_8);
422                 ret = intel_write_sha_text(dev_priv, 0);
423                 if (ret < 0)
424                         return ret;
425                 sha_idx += sizeof(sha_text);
426
427         } else if (sha_leftovers == 2) {
428                 /* Write 32 bits of text */
429                 intel_de_write(dev_priv, HDCP_REP_CTL,
430                                rep_ctl | HDCP_SHA1_TEXT_32);
431                 sha_text |= bstatus[0] << 8 | bstatus[1];
432                 ret = intel_write_sha_text(dev_priv, sha_text);
433                 if (ret < 0)
434                         return ret;
435                 sha_idx += sizeof(sha_text);
436
437                 /* Write 64 bits of M0 */
438                 intel_de_write(dev_priv, HDCP_REP_CTL,
439                                rep_ctl | HDCP_SHA1_TEXT_0);
440                 for (i = 0; i < 2; i++) {
441                         ret = intel_write_sha_text(dev_priv, 0);
442                         if (ret < 0)
443                                 return ret;
444                         sha_idx += sizeof(sha_text);
445                 }
446
447                 /*
448                  * Terminate the SHA-1 stream by hand. For the other leftover
449                  * cases this is appended by the hardware.
450                  */
451                 intel_de_write(dev_priv, HDCP_REP_CTL,
452                                rep_ctl | HDCP_SHA1_TEXT_32);
453                 sha_text = DRM_HDCP_SHA1_TERMINATOR << 24;
454                 ret = intel_write_sha_text(dev_priv, sha_text);
455                 if (ret < 0)
456                         return ret;
457                 sha_idx += sizeof(sha_text);
458         } else if (sha_leftovers == 3) {
459                 /* Write 32 bits of text (filled from LSB) */
460                 intel_de_write(dev_priv, HDCP_REP_CTL,
461                                rep_ctl | HDCP_SHA1_TEXT_32);
462                 sha_text |= bstatus[0];
463                 ret = intel_write_sha_text(dev_priv, sha_text);
464                 if (ret < 0)
465                         return ret;
466                 sha_idx += sizeof(sha_text);
467
468                 /* Write 8 bits of text (filled from LSB), 24 bits of M0 */
469                 intel_de_write(dev_priv, HDCP_REP_CTL,
470                                rep_ctl | HDCP_SHA1_TEXT_8);
471                 ret = intel_write_sha_text(dev_priv, bstatus[1]);
472                 if (ret < 0)
473                         return ret;
474                 sha_idx += sizeof(sha_text);
475
476                 /* Write 32 bits of M0 */
477                 intel_de_write(dev_priv, HDCP_REP_CTL,
478                                rep_ctl | HDCP_SHA1_TEXT_0);
479                 ret = intel_write_sha_text(dev_priv, 0);
480                 if (ret < 0)
481                         return ret;
482                 sha_idx += sizeof(sha_text);
483
484                 /* Write 8 bits of M0 */
485                 intel_de_write(dev_priv, HDCP_REP_CTL,
486                                rep_ctl | HDCP_SHA1_TEXT_24);
487                 ret = intel_write_sha_text(dev_priv, 0);
488                 if (ret < 0)
489                         return ret;
490                 sha_idx += sizeof(sha_text);
491         } else {
492                 drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
493                             sha_leftovers);
494                 return -EINVAL;
495         }
496
497         intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
498         /* Fill up to 64-4 bytes with zeros (leave the last write for length) */
499         while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
500                 ret = intel_write_sha_text(dev_priv, 0);
501                 if (ret < 0)
502                         return ret;
503                 sha_idx += sizeof(sha_text);
504         }
505
506         /*
507          * Last write gets the length of the concatenation in bits. That is:
508          *  - 5 bytes per device
509          *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
510          */
511         sha_text = (num_downstream * 5 + 10) * 8;
512         ret = intel_write_sha_text(dev_priv, sha_text);
513         if (ret < 0)
514                 return ret;
515
516         /* Tell the HW we're done with the hash and wait for it to ACK */
517         intel_de_write(dev_priv, HDCP_REP_CTL,
518                        rep_ctl | HDCP_SHA1_COMPLETE_HASH);
519         if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
520                                   HDCP_SHA1_COMPLETE, 1)) {
521                 drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
522                 return -ETIMEDOUT;
523         }
524         if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
525                 drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
526                 return -ENXIO;
527         }
528
529         return 0;
530 }
531
532 /* Implements Part 2 of the HDCP authorization procedure */
533 static
534 int intel_hdcp_auth_downstream(struct intel_connector *connector)
535 {
536         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
537         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
538         const struct intel_hdcp_shim *shim = connector->hdcp.shim;
539         u8 bstatus[2], num_downstream, *ksv_fifo;
540         int ret, i, tries = 3;
541
542         ret = intel_hdcp_poll_ksv_fifo(dig_port, shim);
543         if (ret) {
544                 drm_dbg_kms(&dev_priv->drm,
545                             "KSV list failed to become ready (%d)\n", ret);
546                 return ret;
547         }
548
549         ret = shim->read_bstatus(dig_port, bstatus);
550         if (ret)
551                 return ret;
552
553         if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
554             DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
555                 drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
556                 return -EPERM;
557         }
558
559         /*
560          * When repeater reports 0 device count, HDCP1.4 spec allows disabling
561          * the HDCP encryption. That implies that repeater can't have its own
562          * display. As there is no consumption of encrypted content in the
563          * repeater with 0 downstream devices, we are failing the
564          * authentication.
565          */
566         num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
567         if (num_downstream == 0) {
568                 drm_dbg_kms(&dev_priv->drm,
569                             "Repeater with zero downstream devices\n");
570                 return -EINVAL;
571         }
572
573         ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
574         if (!ksv_fifo) {
575                 drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
576                 return -ENOMEM;
577         }
578
579         ret = shim->read_ksv_fifo(dig_port, num_downstream, ksv_fifo);
580         if (ret)
581                 goto err;
582
583         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
584                                         num_downstream) > 0) {
585                 drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
586                 ret = -EPERM;
587                 goto err;
588         }
589
590         /*
591          * When V prime mismatches, DP Spec mandates re-read of
592          * V prime atleast twice.
593          */
594         for (i = 0; i < tries; i++) {
595                 ret = intel_hdcp_validate_v_prime(connector, shim,
596                                                   ksv_fifo, num_downstream,
597                                                   bstatus);
598                 if (!ret)
599                         break;
600         }
601
602         if (i == tries) {
603                 drm_dbg_kms(&dev_priv->drm,
604                             "V Prime validation failed.(%d)\n", ret);
605                 goto err;
606         }
607
608         drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
609                     num_downstream);
610         ret = 0;
611 err:
612         kfree(ksv_fifo);
613         return ret;
614 }
615
616 /* Implements Part 1 of the HDCP authorization procedure */
617 static int intel_hdcp_auth(struct intel_connector *connector)
618 {
619         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
620         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
621         struct intel_hdcp *hdcp = &connector->hdcp;
622         const struct intel_hdcp_shim *shim = hdcp->shim;
623         enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
624         enum port port = dig_port->base.port;
625         unsigned long r0_prime_gen_start;
626         int ret, i, tries = 2;
627         union {
628                 u32 reg[2];
629                 u8 shim[DRM_HDCP_AN_LEN];
630         } an;
631         union {
632                 u32 reg[2];
633                 u8 shim[DRM_HDCP_KSV_LEN];
634         } bksv;
635         union {
636                 u32 reg;
637                 u8 shim[DRM_HDCP_RI_LEN];
638         } ri;
639         bool repeater_present, hdcp_capable;
640
641         /*
642          * Detects whether the display is HDCP capable. Although we check for
643          * valid Bksv below, the HDCP over DP spec requires that we check
644          * whether the display supports HDCP before we write An. For HDMI
645          * displays, this is not necessary.
646          */
647         if (shim->hdcp_capable) {
648                 ret = shim->hdcp_capable(dig_port, &hdcp_capable);
649                 if (ret)
650                         return ret;
651                 if (!hdcp_capable) {
652                         drm_dbg_kms(&dev_priv->drm,
653                                     "Panel is not HDCP capable\n");
654                         return -EINVAL;
655                 }
656         }
657
658         /* Initialize An with 2 random values and acquire it */
659         for (i = 0; i < 2; i++)
660                 intel_de_write(dev_priv,
661                                HDCP_ANINIT(dev_priv, cpu_transcoder, port),
662                                get_random_u32());
663         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
664                        HDCP_CONF_CAPTURE_AN);
665
666         /* Wait for An to be acquired */
667         if (intel_de_wait_for_set(dev_priv,
668                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
669                                   HDCP_STATUS_AN_READY, 1)) {
670                 drm_err(&dev_priv->drm, "Timed out waiting for An\n");
671                 return -ETIMEDOUT;
672         }
673
674         an.reg[0] = intel_de_read(dev_priv,
675                                   HDCP_ANLO(dev_priv, cpu_transcoder, port));
676         an.reg[1] = intel_de_read(dev_priv,
677                                   HDCP_ANHI(dev_priv, cpu_transcoder, port));
678         ret = shim->write_an_aksv(dig_port, an.shim);
679         if (ret)
680                 return ret;
681
682         r0_prime_gen_start = jiffies;
683
684         memset(&bksv, 0, sizeof(bksv));
685
686         ret = intel_hdcp_read_valid_bksv(dig_port, shim, bksv.shim);
687         if (ret < 0)
688                 return ret;
689
690         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1) > 0) {
691                 drm_err(&dev_priv->drm, "BKSV is revoked\n");
692                 return -EPERM;
693         }
694
695         intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
696                        bksv.reg[0]);
697         intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
698                        bksv.reg[1]);
699
700         ret = shim->repeater_present(dig_port, &repeater_present);
701         if (ret)
702                 return ret;
703         if (repeater_present)
704                 intel_de_write(dev_priv, HDCP_REP_CTL,
705                                intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
706
707         ret = shim->toggle_signalling(dig_port, cpu_transcoder, true);
708         if (ret)
709                 return ret;
710
711         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
712                        HDCP_CONF_AUTH_AND_ENC);
713
714         /* Wait for R0 ready */
715         if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
716                      (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
717                 drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
718                 return -ETIMEDOUT;
719         }
720
721         /*
722          * Wait for R0' to become available. The spec says 100ms from Aksv, but
723          * some monitors can take longer than this. We'll set the timeout at
724          * 300ms just to be sure.
725          *
726          * On DP, there's an R0_READY bit available but no such bit
727          * exists on HDMI. Since the upper-bound is the same, we'll just do
728          * the stupid thing instead of polling on one and not the other.
729          */
730         wait_remaining_ms_from_jiffies(r0_prime_gen_start, 300);
731
732         tries = 3;
733
734         /*
735          * DP HDCP Spec mandates the two more reattempt to read R0, incase
736          * of R0 mismatch.
737          */
738         for (i = 0; i < tries; i++) {
739                 ri.reg = 0;
740                 ret = shim->read_ri_prime(dig_port, ri.shim);
741                 if (ret)
742                         return ret;
743                 intel_de_write(dev_priv,
744                                HDCP_RPRIME(dev_priv, cpu_transcoder, port),
745                                ri.reg);
746
747                 /* Wait for Ri prime match */
748                 if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
749                               (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
750                         break;
751         }
752
753         if (i == tries) {
754                 drm_dbg_kms(&dev_priv->drm,
755                             "Timed out waiting for Ri prime match (%x)\n",
756                             intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
757                                           cpu_transcoder, port)));
758                 return -ETIMEDOUT;
759         }
760
761         /* Wait for encryption confirmation */
762         if (intel_de_wait_for_set(dev_priv,
763                                   HDCP_STATUS(dev_priv, cpu_transcoder, port),
764                                   HDCP_STATUS_ENC,
765                                   ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
766                 drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
767                 return -ETIMEDOUT;
768         }
769
770         /*
771          * XXX: If we have MST-connected devices, we need to enable encryption
772          * on those as well.
773          */
774
775         if (repeater_present)
776                 return intel_hdcp_auth_downstream(connector);
777
778         drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
779         return 0;
780 }
781
782 static int _intel_hdcp_disable(struct intel_connector *connector)
783 {
784         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
785         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
786         struct intel_hdcp *hdcp = &connector->hdcp;
787         enum port port = dig_port->base.port;
788         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
789         u32 repeater_ctl;
790         int ret;
791
792         drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
793                     connector->base.name, connector->base.base.id);
794
795         /*
796          * If there are other connectors on this port using HDCP, don't disable
797          * it. Instead, toggle the HDCP signalling off on that particular
798          * connector/pipe and exit.
799          */
800         if (dig_port->num_hdcp_streams > 0) {
801                 ret = hdcp->shim->toggle_signalling(dig_port,
802                                                     cpu_transcoder, false);
803                 if (ret)
804                         DRM_ERROR("Failed to disable HDCP signalling\n");
805                 return ret;
806         }
807
808         hdcp->hdcp_encrypted = false;
809         intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
810         if (intel_de_wait_for_clear(dev_priv,
811                                     HDCP_STATUS(dev_priv, cpu_transcoder, port),
812                                     ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
813                 drm_err(&dev_priv->drm,
814                         "Failed to disable HDCP, timeout clearing status\n");
815                 return -ETIMEDOUT;
816         }
817
818         repeater_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
819                                                    port);
820         intel_de_write(dev_priv, HDCP_REP_CTL,
821                        intel_de_read(dev_priv, HDCP_REP_CTL) & ~repeater_ctl);
822
823         ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder, false);
824         if (ret) {
825                 drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
826                 return ret;
827         }
828
829         drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
830         return 0;
831 }
832
833 static int _intel_hdcp_enable(struct intel_connector *connector)
834 {
835         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
836         struct intel_hdcp *hdcp = &connector->hdcp;
837         int i, ret, tries = 3;
838
839         drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
840                     connector->base.name, connector->base.base.id);
841
842         if (!hdcp_key_loadable(dev_priv)) {
843                 drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
844                 return -ENXIO;
845         }
846
847         for (i = 0; i < KEY_LOAD_TRIES; i++) {
848                 ret = intel_hdcp_load_keys(dev_priv);
849                 if (!ret)
850                         break;
851                 intel_hdcp_clear_keys(dev_priv);
852         }
853         if (ret) {
854                 drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
855                         ret);
856                 return ret;
857         }
858
859         /* Incase of authentication failures, HDCP spec expects reauth. */
860         for (i = 0; i < tries; i++) {
861                 ret = intel_hdcp_auth(connector);
862                 if (!ret) {
863                         hdcp->hdcp_encrypted = true;
864                         return 0;
865                 }
866
867                 drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
868
869                 /* Ensuring HDCP encryption and signalling are stopped. */
870                 _intel_hdcp_disable(connector);
871         }
872
873         drm_dbg_kms(&dev_priv->drm,
874                     "HDCP authentication failed (%d tries/%d)\n", tries, ret);
875         return ret;
876 }
877
878 static struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
879 {
880         return container_of(hdcp, struct intel_connector, hdcp);
881 }
882
883 static void intel_hdcp_update_value(struct intel_connector *connector,
884                                     u64 value, bool update_property)
885 {
886         struct drm_device *dev = connector->base.dev;
887         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
888         struct intel_hdcp *hdcp = &connector->hdcp;
889
890         drm_WARN_ON(connector->base.dev, !mutex_is_locked(&hdcp->mutex));
891
892         if (hdcp->value == value)
893                 return;
894
895         drm_WARN_ON(dev, !mutex_is_locked(&dig_port->hdcp_mutex));
896
897         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
898                 if (!drm_WARN_ON(dev, dig_port->num_hdcp_streams == 0))
899                         dig_port->num_hdcp_streams--;
900         } else if (value == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
901                 dig_port->num_hdcp_streams++;
902         }
903
904         hdcp->value = value;
905         if (update_property) {
906                 drm_connector_get(&connector->base);
907                 schedule_work(&hdcp->prop_work);
908         }
909 }
910
911 /* Implements Part 3 of the HDCP authorization procedure */
912 static int intel_hdcp_check_link(struct intel_connector *connector)
913 {
914         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
915         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
916         struct intel_hdcp *hdcp = &connector->hdcp;
917         enum port port = dig_port->base.port;
918         enum transcoder cpu_transcoder;
919         int ret = 0;
920
921         mutex_lock(&hdcp->mutex);
922         mutex_lock(&dig_port->hdcp_mutex);
923
924         cpu_transcoder = hdcp->cpu_transcoder;
925
926         /* Check_link valid only when HDCP1.4 is enabled */
927         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
928             !hdcp->hdcp_encrypted) {
929                 ret = -EINVAL;
930                 goto out;
931         }
932
933         if (drm_WARN_ON(&dev_priv->drm,
934                         !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
935                 drm_err(&dev_priv->drm,
936                         "%s:%d HDCP link stopped encryption,%x\n",
937                         connector->base.name, connector->base.base.id,
938                         intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
939                 ret = -ENXIO;
940                 intel_hdcp_update_value(connector,
941                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
942                                         true);
943                 goto out;
944         }
945
946         if (hdcp->shim->check_link(dig_port, connector)) {
947                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
948                         intel_hdcp_update_value(connector,
949                                 DRM_MODE_CONTENT_PROTECTION_ENABLED, true);
950                 }
951                 goto out;
952         }
953
954         drm_dbg_kms(&dev_priv->drm,
955                     "[%s:%d] HDCP link failed, retrying authentication\n",
956                     connector->base.name, connector->base.base.id);
957
958         ret = _intel_hdcp_disable(connector);
959         if (ret) {
960                 drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
961                 intel_hdcp_update_value(connector,
962                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
963                                         true);
964                 goto out;
965         }
966
967         ret = _intel_hdcp_enable(connector);
968         if (ret) {
969                 drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
970                 intel_hdcp_update_value(connector,
971                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
972                                         true);
973                 goto out;
974         }
975
976 out:
977         mutex_unlock(&dig_port->hdcp_mutex);
978         mutex_unlock(&hdcp->mutex);
979         return ret;
980 }
981
982 static void intel_hdcp_prop_work(struct work_struct *work)
983 {
984         struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
985                                                prop_work);
986         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
987         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
988
989         drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
990         mutex_lock(&hdcp->mutex);
991
992         /*
993          * This worker is only used to flip between ENABLED/DESIRED. Either of
994          * those to UNDESIRED is handled by core. If value == UNDESIRED,
995          * we're running just after hdcp has been disabled, so just exit
996          */
997         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
998                 drm_hdcp_update_content_protection(&connector->base,
999                                                    hdcp->value);
1000
1001         mutex_unlock(&hdcp->mutex);
1002         drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
1003
1004         drm_connector_put(&connector->base);
1005 }
1006
1007 bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
1008 {
1009         return INTEL_INFO(dev_priv)->display.has_hdcp &&
1010                         (INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
1011 }
1012
1013 static int
1014 hdcp2_prepare_ake_init(struct intel_connector *connector,
1015                        struct hdcp2_ake_init *ake_data)
1016 {
1017         struct hdcp_port_data *data = &connector->hdcp.port_data;
1018         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1019         struct i915_hdcp_comp_master *comp;
1020         int ret;
1021
1022         mutex_lock(&dev_priv->hdcp_comp_mutex);
1023         comp = dev_priv->hdcp_master;
1024
1025         if (!comp || !comp->ops) {
1026                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1027                 return -EINVAL;
1028         }
1029
1030         ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
1031         if (ret)
1032                 drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
1033                             ret);
1034         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1035
1036         return ret;
1037 }
1038
1039 static int
1040 hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
1041                                 struct hdcp2_ake_send_cert *rx_cert,
1042                                 bool *paired,
1043                                 struct hdcp2_ake_no_stored_km *ek_pub_km,
1044                                 size_t *msg_sz)
1045 {
1046         struct hdcp_port_data *data = &connector->hdcp.port_data;
1047         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1048         struct i915_hdcp_comp_master *comp;
1049         int ret;
1050
1051         mutex_lock(&dev_priv->hdcp_comp_mutex);
1052         comp = dev_priv->hdcp_master;
1053
1054         if (!comp || !comp->ops) {
1055                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1056                 return -EINVAL;
1057         }
1058
1059         ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
1060                                                          rx_cert, paired,
1061                                                          ek_pub_km, msg_sz);
1062         if (ret < 0)
1063                 drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
1064                             ret);
1065         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1066
1067         return ret;
1068 }
1069
1070 static int hdcp2_verify_hprime(struct intel_connector *connector,
1071                                struct hdcp2_ake_send_hprime *rx_hprime)
1072 {
1073         struct hdcp_port_data *data = &connector->hdcp.port_data;
1074         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1075         struct i915_hdcp_comp_master *comp;
1076         int ret;
1077
1078         mutex_lock(&dev_priv->hdcp_comp_mutex);
1079         comp = dev_priv->hdcp_master;
1080
1081         if (!comp || !comp->ops) {
1082                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1083                 return -EINVAL;
1084         }
1085
1086         ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
1087         if (ret < 0)
1088                 drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
1089         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1090
1091         return ret;
1092 }
1093
1094 static int
1095 hdcp2_store_pairing_info(struct intel_connector *connector,
1096                          struct hdcp2_ake_send_pairing_info *pairing_info)
1097 {
1098         struct hdcp_port_data *data = &connector->hdcp.port_data;
1099         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1100         struct i915_hdcp_comp_master *comp;
1101         int ret;
1102
1103         mutex_lock(&dev_priv->hdcp_comp_mutex);
1104         comp = dev_priv->hdcp_master;
1105
1106         if (!comp || !comp->ops) {
1107                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1108                 return -EINVAL;
1109         }
1110
1111         ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
1112         if (ret < 0)
1113                 drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
1114                             ret);
1115         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1116
1117         return ret;
1118 }
1119
1120 static int
1121 hdcp2_prepare_lc_init(struct intel_connector *connector,
1122                       struct hdcp2_lc_init *lc_init)
1123 {
1124         struct hdcp_port_data *data = &connector->hdcp.port_data;
1125         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1126         struct i915_hdcp_comp_master *comp;
1127         int ret;
1128
1129         mutex_lock(&dev_priv->hdcp_comp_mutex);
1130         comp = dev_priv->hdcp_master;
1131
1132         if (!comp || !comp->ops) {
1133                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1134                 return -EINVAL;
1135         }
1136
1137         ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
1138         if (ret < 0)
1139                 drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
1140                             ret);
1141         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1142
1143         return ret;
1144 }
1145
1146 static int
1147 hdcp2_verify_lprime(struct intel_connector *connector,
1148                     struct hdcp2_lc_send_lprime *rx_lprime)
1149 {
1150         struct hdcp_port_data *data = &connector->hdcp.port_data;
1151         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1152         struct i915_hdcp_comp_master *comp;
1153         int ret;
1154
1155         mutex_lock(&dev_priv->hdcp_comp_mutex);
1156         comp = dev_priv->hdcp_master;
1157
1158         if (!comp || !comp->ops) {
1159                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1160                 return -EINVAL;
1161         }
1162
1163         ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
1164         if (ret < 0)
1165                 drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
1166                             ret);
1167         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1168
1169         return ret;
1170 }
1171
1172 static int hdcp2_prepare_skey(struct intel_connector *connector,
1173                               struct hdcp2_ske_send_eks *ske_data)
1174 {
1175         struct hdcp_port_data *data = &connector->hdcp.port_data;
1176         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1177         struct i915_hdcp_comp_master *comp;
1178         int ret;
1179
1180         mutex_lock(&dev_priv->hdcp_comp_mutex);
1181         comp = dev_priv->hdcp_master;
1182
1183         if (!comp || !comp->ops) {
1184                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1185                 return -EINVAL;
1186         }
1187
1188         ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
1189         if (ret < 0)
1190                 drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
1191                             ret);
1192         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1193
1194         return ret;
1195 }
1196
1197 static int
1198 hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
1199                                       struct hdcp2_rep_send_receiverid_list
1200                                                                 *rep_topology,
1201                                       struct hdcp2_rep_send_ack *rep_send_ack)
1202 {
1203         struct hdcp_port_data *data = &connector->hdcp.port_data;
1204         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1205         struct i915_hdcp_comp_master *comp;
1206         int ret;
1207
1208         mutex_lock(&dev_priv->hdcp_comp_mutex);
1209         comp = dev_priv->hdcp_master;
1210
1211         if (!comp || !comp->ops) {
1212                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1213                 return -EINVAL;
1214         }
1215
1216         ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
1217                                                          rep_topology,
1218                                                          rep_send_ack);
1219         if (ret < 0)
1220                 drm_dbg_kms(&dev_priv->drm,
1221                             "Verify rep topology failed. %d\n", ret);
1222         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1223
1224         return ret;
1225 }
1226
1227 static int
1228 hdcp2_verify_mprime(struct intel_connector *connector,
1229                     struct hdcp2_rep_stream_ready *stream_ready)
1230 {
1231         struct hdcp_port_data *data = &connector->hdcp.port_data;
1232         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1233         struct i915_hdcp_comp_master *comp;
1234         int ret;
1235
1236         mutex_lock(&dev_priv->hdcp_comp_mutex);
1237         comp = dev_priv->hdcp_master;
1238
1239         if (!comp || !comp->ops) {
1240                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1241                 return -EINVAL;
1242         }
1243
1244         ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
1245         if (ret < 0)
1246                 drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
1247         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1248
1249         return ret;
1250 }
1251
1252 static int hdcp2_authenticate_port(struct intel_connector *connector)
1253 {
1254         struct hdcp_port_data *data = &connector->hdcp.port_data;
1255         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1256         struct i915_hdcp_comp_master *comp;
1257         int ret;
1258
1259         mutex_lock(&dev_priv->hdcp_comp_mutex);
1260         comp = dev_priv->hdcp_master;
1261
1262         if (!comp || !comp->ops) {
1263                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1264                 return -EINVAL;
1265         }
1266
1267         ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
1268         if (ret < 0)
1269                 drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
1270                             ret);
1271         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1272
1273         return ret;
1274 }
1275
1276 static int hdcp2_close_mei_session(struct intel_connector *connector)
1277 {
1278         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1279         struct i915_hdcp_comp_master *comp;
1280         int ret;
1281
1282         mutex_lock(&dev_priv->hdcp_comp_mutex);
1283         comp = dev_priv->hdcp_master;
1284
1285         if (!comp || !comp->ops) {
1286                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
1287                 return -EINVAL;
1288         }
1289
1290         ret = comp->ops->close_hdcp_session(comp->mei_dev,
1291                                              &connector->hdcp.port_data);
1292         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1293
1294         return ret;
1295 }
1296
1297 static int hdcp2_deauthenticate_port(struct intel_connector *connector)
1298 {
1299         return hdcp2_close_mei_session(connector);
1300 }
1301
1302 /* Authentication flow starts from here */
1303 static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
1304 {
1305         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1306         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1307         struct intel_hdcp *hdcp = &connector->hdcp;
1308         union {
1309                 struct hdcp2_ake_init ake_init;
1310                 struct hdcp2_ake_send_cert send_cert;
1311                 struct hdcp2_ake_no_stored_km no_stored_km;
1312                 struct hdcp2_ake_send_hprime send_hprime;
1313                 struct hdcp2_ake_send_pairing_info pairing_info;
1314         } msgs;
1315         const struct intel_hdcp_shim *shim = hdcp->shim;
1316         size_t size;
1317         int ret;
1318
1319         /* Init for seq_num */
1320         hdcp->seq_num_v = 0;
1321         hdcp->seq_num_m = 0;
1322
1323         ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
1324         if (ret < 0)
1325                 return ret;
1326
1327         ret = shim->write_2_2_msg(dig_port, &msgs.ake_init,
1328                                   sizeof(msgs.ake_init));
1329         if (ret < 0)
1330                 return ret;
1331
1332         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_CERT,
1333                                  &msgs.send_cert, sizeof(msgs.send_cert));
1334         if (ret < 0)
1335                 return ret;
1336
1337         if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
1338                 drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
1339                 return -EINVAL;
1340         }
1341
1342         hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
1343
1344         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1345                                         msgs.send_cert.cert_rx.receiver_id,
1346                                         1) > 0) {
1347                 drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
1348                 return -EPERM;
1349         }
1350
1351         /*
1352          * Here msgs.no_stored_km will hold msgs corresponding to the km
1353          * stored also.
1354          */
1355         ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
1356                                               &hdcp->is_paired,
1357                                               &msgs.no_stored_km, &size);
1358         if (ret < 0)
1359                 return ret;
1360
1361         ret = shim->write_2_2_msg(dig_port, &msgs.no_stored_km, size);
1362         if (ret < 0)
1363                 return ret;
1364
1365         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_AKE_SEND_HPRIME,
1366                                  &msgs.send_hprime, sizeof(msgs.send_hprime));
1367         if (ret < 0)
1368                 return ret;
1369
1370         ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
1371         if (ret < 0)
1372                 return ret;
1373
1374         if (!hdcp->is_paired) {
1375                 /* Pairing is required */
1376                 ret = shim->read_2_2_msg(dig_port,
1377                                          HDCP_2_2_AKE_SEND_PAIRING_INFO,
1378                                          &msgs.pairing_info,
1379                                          sizeof(msgs.pairing_info));
1380                 if (ret < 0)
1381                         return ret;
1382
1383                 ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
1384                 if (ret < 0)
1385                         return ret;
1386                 hdcp->is_paired = true;
1387         }
1388
1389         return 0;
1390 }
1391
1392 static int hdcp2_locality_check(struct intel_connector *connector)
1393 {
1394         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1395         struct intel_hdcp *hdcp = &connector->hdcp;
1396         union {
1397                 struct hdcp2_lc_init lc_init;
1398                 struct hdcp2_lc_send_lprime send_lprime;
1399         } msgs;
1400         const struct intel_hdcp_shim *shim = hdcp->shim;
1401         int tries = HDCP2_LC_RETRY_CNT, ret, i;
1402
1403         for (i = 0; i < tries; i++) {
1404                 ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
1405                 if (ret < 0)
1406                         continue;
1407
1408                 ret = shim->write_2_2_msg(dig_port, &msgs.lc_init,
1409                                       sizeof(msgs.lc_init));
1410                 if (ret < 0)
1411                         continue;
1412
1413                 ret = shim->read_2_2_msg(dig_port,
1414                                          HDCP_2_2_LC_SEND_LPRIME,
1415                                          &msgs.send_lprime,
1416                                          sizeof(msgs.send_lprime));
1417                 if (ret < 0)
1418                         continue;
1419
1420                 ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
1421                 if (!ret)
1422                         break;
1423         }
1424
1425         return ret;
1426 }
1427
1428 static int hdcp2_session_key_exchange(struct intel_connector *connector)
1429 {
1430         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1431         struct intel_hdcp *hdcp = &connector->hdcp;
1432         struct hdcp2_ske_send_eks send_eks;
1433         int ret;
1434
1435         ret = hdcp2_prepare_skey(connector, &send_eks);
1436         if (ret < 0)
1437                 return ret;
1438
1439         ret = hdcp->shim->write_2_2_msg(dig_port, &send_eks,
1440                                         sizeof(send_eks));
1441         if (ret < 0)
1442                 return ret;
1443
1444         return 0;
1445 }
1446
1447 static
1448 int _hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1449 {
1450         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1451         struct intel_hdcp *hdcp = &connector->hdcp;
1452         union {
1453                 struct hdcp2_rep_stream_manage stream_manage;
1454                 struct hdcp2_rep_stream_ready stream_ready;
1455         } msgs;
1456         const struct intel_hdcp_shim *shim = hdcp->shim;
1457         int ret;
1458
1459         if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX)
1460                 return -ERANGE;
1461
1462         /* Prepare RepeaterAuth_Stream_Manage msg */
1463         msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
1464         drm_hdcp_cpu_to_be24(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
1465
1466         /* K no of streams is fixed as 1. Stored as big-endian. */
1467         msgs.stream_manage.k = cpu_to_be16(1);
1468
1469         /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
1470         msgs.stream_manage.streams[0].stream_id = 0;
1471         msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
1472
1473         /* Send it to Repeater */
1474         ret = shim->write_2_2_msg(dig_port, &msgs.stream_manage,
1475                                   sizeof(msgs.stream_manage));
1476         if (ret < 0)
1477                 goto out;
1478
1479         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_STREAM_READY,
1480                                  &msgs.stream_ready, sizeof(msgs.stream_ready));
1481         if (ret < 0)
1482                 goto out;
1483
1484         hdcp->port_data.seq_num_m = hdcp->seq_num_m;
1485         hdcp->port_data.streams[0].stream_type = hdcp->content_type;
1486         ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
1487
1488 out:
1489         hdcp->seq_num_m++;
1490
1491         return ret;
1492 }
1493
1494 static
1495 int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
1496 {
1497         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1498         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1499         struct intel_hdcp *hdcp = &connector->hdcp;
1500         union {
1501                 struct hdcp2_rep_send_receiverid_list recvid_list;
1502                 struct hdcp2_rep_send_ack rep_ack;
1503         } msgs;
1504         const struct intel_hdcp_shim *shim = hdcp->shim;
1505         u32 seq_num_v, device_cnt;
1506         u8 *rx_info;
1507         int ret;
1508
1509         ret = shim->read_2_2_msg(dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
1510                                  &msgs.recvid_list, sizeof(msgs.recvid_list));
1511         if (ret < 0)
1512                 return ret;
1513
1514         rx_info = msgs.recvid_list.rx_info;
1515
1516         if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
1517             HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
1518                 drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
1519                 return -EINVAL;
1520         }
1521
1522         /* Converting and Storing the seq_num_v to local variable as DWORD */
1523         seq_num_v =
1524                 drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
1525
1526         if (!hdcp->hdcp2_encrypted && seq_num_v) {
1527                 drm_dbg_kms(&dev_priv->drm,
1528                             "Non zero Seq_num_v at first RecvId_List msg\n");
1529                 return -EINVAL;
1530         }
1531
1532         if (seq_num_v < hdcp->seq_num_v) {
1533                 /* Roll over of the seq_num_v from repeater. Reauthenticate. */
1534                 drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
1535                 return -EINVAL;
1536         }
1537
1538         device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
1539                       HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
1540         if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
1541                                         msgs.recvid_list.receiver_ids,
1542                                         device_cnt) > 0) {
1543                 drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
1544                 return -EPERM;
1545         }
1546
1547         ret = hdcp2_verify_rep_topology_prepare_ack(connector,
1548                                                     &msgs.recvid_list,
1549                                                     &msgs.rep_ack);
1550         if (ret < 0)
1551                 return ret;
1552
1553         hdcp->seq_num_v = seq_num_v;
1554         ret = shim->write_2_2_msg(dig_port, &msgs.rep_ack,
1555                                   sizeof(msgs.rep_ack));
1556         if (ret < 0)
1557                 return ret;
1558
1559         return 0;
1560 }
1561
1562 static int hdcp2_authenticate_sink(struct intel_connector *connector)
1563 {
1564         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1565         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1566         struct intel_hdcp *hdcp = &connector->hdcp;
1567         const struct intel_hdcp_shim *shim = hdcp->shim;
1568         int ret;
1569
1570         ret = hdcp2_authentication_key_exchange(connector);
1571         if (ret < 0) {
1572                 drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
1573                 return ret;
1574         }
1575
1576         ret = hdcp2_locality_check(connector);
1577         if (ret < 0) {
1578                 drm_dbg_kms(&i915->drm,
1579                             "Locality Check failed. Err : %d\n", ret);
1580                 return ret;
1581         }
1582
1583         ret = hdcp2_session_key_exchange(connector);
1584         if (ret < 0) {
1585                 drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
1586                 return ret;
1587         }
1588
1589         if (shim->config_stream_type) {
1590                 ret = shim->config_stream_type(dig_port,
1591                                                hdcp->is_repeater,
1592                                                hdcp->content_type);
1593                 if (ret < 0)
1594                         return ret;
1595         }
1596
1597         if (hdcp->is_repeater) {
1598                 ret = hdcp2_authenticate_repeater_topology(connector);
1599                 if (ret < 0) {
1600                         drm_dbg_kms(&i915->drm,
1601                                     "Repeater Auth Failed. Err: %d\n", ret);
1602                         return ret;
1603                 }
1604         }
1605
1606         return ret;
1607 }
1608
1609 static int hdcp2_enable_encryption(struct intel_connector *connector)
1610 {
1611         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1612         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1613         struct intel_hdcp *hdcp = &connector->hdcp;
1614         enum port port = dig_port->base.port;
1615         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1616         int ret;
1617
1618         drm_WARN_ON(&dev_priv->drm,
1619                     intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1620                     LINK_ENCRYPTION_STATUS);
1621         if (hdcp->shim->toggle_signalling) {
1622                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1623                                                     true);
1624                 if (ret) {
1625                         drm_err(&dev_priv->drm,
1626                                 "Failed to enable HDCP signalling. %d\n",
1627                                 ret);
1628                         return ret;
1629                 }
1630         }
1631
1632         if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1633             LINK_AUTH_STATUS) {
1634                 /* Link is Authenticated. Now set for Encryption */
1635                 intel_de_write(dev_priv,
1636                                HDCP2_CTL(dev_priv, cpu_transcoder, port),
1637                                intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
1638         }
1639
1640         ret = intel_de_wait_for_set(dev_priv,
1641                                     HDCP2_STATUS(dev_priv, cpu_transcoder,
1642                                                  port),
1643                                     LINK_ENCRYPTION_STATUS,
1644                                     ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1645
1646         return ret;
1647 }
1648
1649 static int hdcp2_disable_encryption(struct intel_connector *connector)
1650 {
1651         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1652         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1653         struct intel_hdcp *hdcp = &connector->hdcp;
1654         enum port port = dig_port->base.port;
1655         enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
1656         int ret;
1657
1658         drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
1659                                       LINK_ENCRYPTION_STATUS));
1660
1661         intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
1662                        intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
1663
1664         ret = intel_de_wait_for_clear(dev_priv,
1665                                       HDCP2_STATUS(dev_priv, cpu_transcoder,
1666                                                    port),
1667                                       LINK_ENCRYPTION_STATUS,
1668                                       ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
1669         if (ret == -ETIMEDOUT)
1670                 drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
1671
1672         if (hdcp->shim->toggle_signalling) {
1673                 ret = hdcp->shim->toggle_signalling(dig_port, cpu_transcoder,
1674                                                     false);
1675                 if (ret) {
1676                         drm_err(&dev_priv->drm,
1677                                 "Failed to disable HDCP signalling. %d\n",
1678                                 ret);
1679                         return ret;
1680                 }
1681         }
1682
1683         return ret;
1684 }
1685
1686 static int
1687 hdcp2_propagate_stream_management_info(struct intel_connector *connector)
1688 {
1689         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1690         int i, tries = 3, ret;
1691
1692         if (!connector->hdcp.is_repeater)
1693                 return 0;
1694
1695         for (i = 0; i < tries; i++) {
1696                 ret = _hdcp2_propagate_stream_management_info(connector);
1697                 if (!ret)
1698                         break;
1699
1700                 /* Lets restart the auth incase of seq_num_m roll over */
1701                 if (connector->hdcp.seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
1702                         drm_dbg_kms(&i915->drm,
1703                                     "seq_num_m roll over.(%d)\n", ret);
1704                         break;
1705                 }
1706
1707                 drm_dbg_kms(&i915->drm,
1708                             "HDCP2 stream management %d of %d Failed.(%d)\n",
1709                             i + 1, tries, ret);
1710         }
1711
1712         return ret;
1713 }
1714
1715 static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
1716 {
1717         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1718         struct intel_hdcp *hdcp = &connector->hdcp;
1719         int ret, i, tries = 3;
1720
1721         for (i = 0; i < tries; i++) {
1722                 ret = hdcp2_authenticate_sink(connector);
1723                 if (!ret) {
1724                         ret = hdcp2_propagate_stream_management_info(connector);
1725                         if (ret) {
1726                                 drm_dbg_kms(&i915->drm,
1727                                             "Stream management failed.(%d)\n",
1728                                             ret);
1729                                 break;
1730                         }
1731                         hdcp->port_data.streams[0].stream_type =
1732                                                         hdcp->content_type;
1733                         ret = hdcp2_authenticate_port(connector);
1734                         if (!ret)
1735                                 break;
1736                         drm_dbg_kms(&i915->drm, "HDCP2 port auth failed.(%d)\n",
1737                                     ret);
1738                 }
1739
1740                 /* Clearing the mei hdcp session */
1741                 drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
1742                             i + 1, tries, ret);
1743                 if (hdcp2_deauthenticate_port(connector) < 0)
1744                         drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1745         }
1746
1747         if (!ret) {
1748                 /*
1749                  * Ensuring the required 200mSec min time interval between
1750                  * Session Key Exchange and encryption.
1751                  */
1752                 msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
1753                 ret = hdcp2_enable_encryption(connector);
1754                 if (ret < 0) {
1755                         drm_dbg_kms(&i915->drm,
1756                                     "Encryption Enable Failed.(%d)\n", ret);
1757                         if (hdcp2_deauthenticate_port(connector) < 0)
1758                                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1759                 }
1760         }
1761
1762         return ret;
1763 }
1764
1765 static int _intel_hdcp2_enable(struct intel_connector *connector)
1766 {
1767         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1768         struct intel_hdcp *hdcp = &connector->hdcp;
1769         int ret;
1770
1771         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
1772                     connector->base.name, connector->base.base.id,
1773                     hdcp->content_type);
1774
1775         ret = hdcp2_authenticate_and_encrypt(connector);
1776         if (ret) {
1777                 drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
1778                             hdcp->content_type, ret);
1779                 return ret;
1780         }
1781
1782         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
1783                     connector->base.name, connector->base.base.id,
1784                     hdcp->content_type);
1785
1786         hdcp->hdcp2_encrypted = true;
1787         return 0;
1788 }
1789
1790 static int _intel_hdcp2_disable(struct intel_connector *connector)
1791 {
1792         struct drm_i915_private *i915 = to_i915(connector->base.dev);
1793         int ret;
1794
1795         drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
1796                     connector->base.name, connector->base.base.id);
1797
1798         ret = hdcp2_disable_encryption(connector);
1799
1800         if (hdcp2_deauthenticate_port(connector) < 0)
1801                 drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
1802
1803         connector->hdcp.hdcp2_encrypted = false;
1804
1805         return ret;
1806 }
1807
1808 /* Implements the Link Integrity Check for HDCP2.2 */
1809 static int intel_hdcp2_check_link(struct intel_connector *connector)
1810 {
1811         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
1812         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1813         struct intel_hdcp *hdcp = &connector->hdcp;
1814         enum port port = dig_port->base.port;
1815         enum transcoder cpu_transcoder;
1816         int ret = 0;
1817
1818         mutex_lock(&hdcp->mutex);
1819         cpu_transcoder = hdcp->cpu_transcoder;
1820
1821         /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
1822         if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
1823             !hdcp->hdcp2_encrypted) {
1824                 ret = -EINVAL;
1825                 goto out;
1826         }
1827
1828         if (drm_WARN_ON(&dev_priv->drm,
1829                         !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
1830                 drm_err(&dev_priv->drm,
1831                         "HDCP2.2 link stopped the encryption, %x\n",
1832                         intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
1833                 ret = -ENXIO;
1834                 intel_hdcp_update_value(connector,
1835                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1836                                         true);
1837                 goto out;
1838         }
1839
1840         ret = hdcp->shim->check_2_2_link(dig_port);
1841         if (ret == HDCP_LINK_PROTECTED) {
1842                 if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
1843                         intel_hdcp_update_value(connector,
1844                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
1845                                         true);
1846                 }
1847                 goto out;
1848         }
1849
1850         if (ret == HDCP_TOPOLOGY_CHANGE) {
1851                 if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
1852                         goto out;
1853
1854                 drm_dbg_kms(&dev_priv->drm,
1855                             "HDCP2.2 Downstream topology change\n");
1856                 ret = hdcp2_authenticate_repeater_topology(connector);
1857                 if (!ret) {
1858                         intel_hdcp_update_value(connector,
1859                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
1860                                         true);
1861                         goto out;
1862                 }
1863                 drm_dbg_kms(&dev_priv->drm,
1864                             "[%s:%d] Repeater topology auth failed.(%d)\n",
1865                             connector->base.name, connector->base.base.id,
1866                             ret);
1867         } else {
1868                 drm_dbg_kms(&dev_priv->drm,
1869                             "[%s:%d] HDCP2.2 link failed, retrying auth\n",
1870                             connector->base.name, connector->base.base.id);
1871         }
1872
1873         ret = _intel_hdcp2_disable(connector);
1874         if (ret) {
1875                 drm_err(&dev_priv->drm,
1876                         "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
1877                         connector->base.name, connector->base.base.id, ret);
1878                 intel_hdcp_update_value(connector,
1879                                 DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
1880                 goto out;
1881         }
1882
1883         ret = _intel_hdcp2_enable(connector);
1884         if (ret) {
1885                 drm_dbg_kms(&dev_priv->drm,
1886                             "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
1887                             connector->base.name, connector->base.base.id,
1888                             ret);
1889                 intel_hdcp_update_value(connector,
1890                                         DRM_MODE_CONTENT_PROTECTION_DESIRED,
1891                                         true);
1892                 goto out;
1893         }
1894
1895 out:
1896         mutex_unlock(&hdcp->mutex);
1897         return ret;
1898 }
1899
1900 static void intel_hdcp_check_work(struct work_struct *work)
1901 {
1902         struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
1903                                                struct intel_hdcp,
1904                                                check_work);
1905         struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
1906
1907         if (drm_connector_is_unregistered(&connector->base))
1908                 return;
1909
1910         if (!intel_hdcp2_check_link(connector))
1911                 schedule_delayed_work(&hdcp->check_work,
1912                                       DRM_HDCP2_CHECK_PERIOD_MS);
1913         else if (!intel_hdcp_check_link(connector))
1914                 schedule_delayed_work(&hdcp->check_work,
1915                                       DRM_HDCP_CHECK_PERIOD_MS);
1916 }
1917
1918 static int i915_hdcp_component_bind(struct device *i915_kdev,
1919                                     struct device *mei_kdev, void *data)
1920 {
1921         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1922
1923         drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
1924         mutex_lock(&dev_priv->hdcp_comp_mutex);
1925         dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
1926         dev_priv->hdcp_master->mei_dev = mei_kdev;
1927         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1928
1929         return 0;
1930 }
1931
1932 static void i915_hdcp_component_unbind(struct device *i915_kdev,
1933                                        struct device *mei_kdev, void *data)
1934 {
1935         struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
1936
1937         drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
1938         mutex_lock(&dev_priv->hdcp_comp_mutex);
1939         dev_priv->hdcp_master = NULL;
1940         mutex_unlock(&dev_priv->hdcp_comp_mutex);
1941 }
1942
1943 static const struct component_ops i915_hdcp_component_ops = {
1944         .bind   = i915_hdcp_component_bind,
1945         .unbind = i915_hdcp_component_unbind,
1946 };
1947
1948 static enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port)
1949 {
1950         switch (port) {
1951         case PORT_A:
1952                 return MEI_DDI_A;
1953         case PORT_B ... PORT_F:
1954                 return (enum mei_fw_ddi)port;
1955         default:
1956                 return MEI_DDI_INVALID_PORT;
1957         }
1958 }
1959
1960 static enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder)
1961 {
1962         switch (cpu_transcoder) {
1963         case TRANSCODER_A ... TRANSCODER_D:
1964                 return (enum mei_fw_tc)(cpu_transcoder | 0x10);
1965         default: /* eDP, DSI TRANSCODERS are non HDCP capable */
1966                 return MEI_INVALID_TRANSCODER;
1967         }
1968 }
1969
1970 static int initialize_hdcp_port_data(struct intel_connector *connector,
1971                                      enum port port,
1972                                      const struct intel_hdcp_shim *shim)
1973 {
1974         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1975         struct intel_hdcp *hdcp = &connector->hdcp;
1976         struct hdcp_port_data *data = &hdcp->port_data;
1977
1978         if (INTEL_GEN(dev_priv) < 12)
1979                 data->fw_ddi = intel_get_mei_fw_ddi_index(port);
1980         else
1981                 /*
1982                  * As per ME FW API expectation, for GEN 12+, fw_ddi is filled
1983                  * with zero(INVALID PORT index).
1984                  */
1985                 data->fw_ddi = MEI_DDI_INVALID_PORT;
1986
1987         /*
1988          * As associated transcoder is set and modified at modeset, here fw_tc
1989          * is initialized to zero (invalid transcoder index). This will be
1990          * retained for <Gen12 forever.
1991          */
1992         data->fw_tc = MEI_INVALID_TRANSCODER;
1993
1994         data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
1995         data->protocol = (u8)shim->protocol;
1996
1997         data->k = 1;
1998         if (!data->streams)
1999                 data->streams = kcalloc(data->k,
2000                                         sizeof(struct hdcp2_streamid_type),
2001                                         GFP_KERNEL);
2002         if (!data->streams) {
2003                 drm_err(&dev_priv->drm, "Out of Memory\n");
2004                 return -ENOMEM;
2005         }
2006
2007         data->streams[0].stream_id = 0;
2008         data->streams[0].stream_type = hdcp->content_type;
2009
2010         return 0;
2011 }
2012
2013 static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
2014 {
2015         if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
2016                 return false;
2017
2018         return (INTEL_GEN(dev_priv) >= 10 ||
2019                 IS_GEMINILAKE(dev_priv) ||
2020                 IS_KABYLAKE(dev_priv) ||
2021                 IS_COFFEELAKE(dev_priv) ||
2022                 IS_COMETLAKE(dev_priv));
2023 }
2024
2025 void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
2026 {
2027         int ret;
2028
2029         if (!is_hdcp2_supported(dev_priv))
2030                 return;
2031
2032         mutex_lock(&dev_priv->hdcp_comp_mutex);
2033         drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
2034
2035         dev_priv->hdcp_comp_added = true;
2036         mutex_unlock(&dev_priv->hdcp_comp_mutex);
2037         ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
2038                                   I915_COMPONENT_HDCP);
2039         if (ret < 0) {
2040                 drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
2041                             ret);
2042                 mutex_lock(&dev_priv->hdcp_comp_mutex);
2043                 dev_priv->hdcp_comp_added = false;
2044                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2045                 return;
2046         }
2047 }
2048
2049 static void intel_hdcp2_init(struct intel_connector *connector, enum port port,
2050                              const struct intel_hdcp_shim *shim)
2051 {
2052         struct drm_i915_private *i915 = to_i915(connector->base.dev);
2053         struct intel_hdcp *hdcp = &connector->hdcp;
2054         int ret;
2055
2056         ret = initialize_hdcp_port_data(connector, port, shim);
2057         if (ret) {
2058                 drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
2059                 return;
2060         }
2061
2062         hdcp->hdcp2_supported = true;
2063 }
2064
2065 int intel_hdcp_init(struct intel_connector *connector,
2066                     enum port port,
2067                     const struct intel_hdcp_shim *shim)
2068 {
2069         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2070         struct intel_hdcp *hdcp = &connector->hdcp;
2071         int ret;
2072
2073         if (!shim)
2074                 return -EINVAL;
2075
2076         if (is_hdcp2_supported(dev_priv) && !connector->mst_port)
2077                 intel_hdcp2_init(connector, port, shim);
2078
2079         ret =
2080         drm_connector_attach_content_protection_property(&connector->base,
2081                                                          hdcp->hdcp2_supported);
2082         if (ret) {
2083                 hdcp->hdcp2_supported = false;
2084                 kfree(hdcp->port_data.streams);
2085                 return ret;
2086         }
2087
2088         hdcp->shim = shim;
2089         mutex_init(&hdcp->mutex);
2090         INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
2091         INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
2092         init_waitqueue_head(&hdcp->cp_irq_queue);
2093
2094         return 0;
2095 }
2096
2097 int intel_hdcp_enable(struct intel_connector *connector,
2098                       enum transcoder cpu_transcoder, u8 content_type)
2099 {
2100         struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
2101         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2102         struct intel_hdcp *hdcp = &connector->hdcp;
2103         unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
2104         int ret = -EINVAL;
2105
2106         if (!hdcp->shim)
2107                 return -ENOENT;
2108
2109         mutex_lock(&hdcp->mutex);
2110         mutex_lock(&dig_port->hdcp_mutex);
2111         drm_WARN_ON(&dev_priv->drm,
2112                     hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
2113         hdcp->content_type = content_type;
2114         hdcp->cpu_transcoder = cpu_transcoder;
2115
2116         if (INTEL_GEN(dev_priv) >= 12)
2117                 hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder);
2118
2119         /*
2120          * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
2121          * is capable of HDCP2.2, it is preferred to use HDCP2.2.
2122          */
2123         if (intel_hdcp2_capable(connector)) {
2124                 ret = _intel_hdcp2_enable(connector);
2125                 if (!ret)
2126                         check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
2127         }
2128
2129         /*
2130          * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
2131          * be attempted.
2132          */
2133         if (ret && intel_hdcp_capable(connector) &&
2134             hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
2135                 ret = _intel_hdcp_enable(connector);
2136         }
2137
2138         if (!ret) {
2139                 schedule_delayed_work(&hdcp->check_work, check_link_interval);
2140                 intel_hdcp_update_value(connector,
2141                                         DRM_MODE_CONTENT_PROTECTION_ENABLED,
2142                                         true);
2143         }
2144
2145         mutex_unlock(&dig_port->hdcp_mutex);
2146         mutex_unlock(&hdcp->mutex);
2147         return ret;
2148 }
2149
2150 int intel_hdcp_disable(struct intel_connector *connector)
2151 {
2152         struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
2153         struct intel_hdcp *hdcp = &connector->hdcp;
2154         int ret = 0;
2155
2156         if (!hdcp->shim)
2157                 return -ENOENT;
2158
2159         mutex_lock(&hdcp->mutex);
2160         mutex_lock(&dig_port->hdcp_mutex);
2161
2162         if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
2163                 goto out;
2164
2165         intel_hdcp_update_value(connector,
2166                                 DRM_MODE_CONTENT_PROTECTION_UNDESIRED, false);
2167         if (hdcp->hdcp2_encrypted)
2168                 ret = _intel_hdcp2_disable(connector);
2169         else if (hdcp->hdcp_encrypted)
2170                 ret = _intel_hdcp_disable(connector);
2171
2172 out:
2173         mutex_unlock(&dig_port->hdcp_mutex);
2174         mutex_unlock(&hdcp->mutex);
2175         cancel_delayed_work_sync(&hdcp->check_work);
2176         return ret;
2177 }
2178
2179 void intel_hdcp_update_pipe(struct intel_atomic_state *state,
2180                             struct intel_encoder *encoder,
2181                             const struct intel_crtc_state *crtc_state,
2182                             const struct drm_connector_state *conn_state)
2183 {
2184         struct intel_connector *connector =
2185                                 to_intel_connector(conn_state->connector);
2186         struct intel_hdcp *hdcp = &connector->hdcp;
2187         bool content_protection_type_changed, desired_and_not_enabled = false;
2188
2189         if (!connector->hdcp.shim)
2190                 return;
2191
2192         content_protection_type_changed =
2193                 (conn_state->hdcp_content_type != hdcp->content_type &&
2194                  conn_state->content_protection !=
2195                  DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
2196
2197         /*
2198          * During the HDCP encryption session if Type change is requested,
2199          * disable the HDCP and reenable it with new TYPE value.
2200          */
2201         if (conn_state->content_protection ==
2202             DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
2203             content_protection_type_changed)
2204                 intel_hdcp_disable(connector);
2205
2206         /*
2207          * Mark the hdcp state as DESIRED after the hdcp disable of type
2208          * change procedure.
2209          */
2210         if (content_protection_type_changed) {
2211                 mutex_lock(&hdcp->mutex);
2212                 hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2213                 schedule_work(&hdcp->prop_work);
2214                 mutex_unlock(&hdcp->mutex);
2215         }
2216
2217         if (conn_state->content_protection ==
2218             DRM_MODE_CONTENT_PROTECTION_DESIRED) {
2219                 mutex_lock(&hdcp->mutex);
2220                 /* Avoid enabling hdcp, if it already ENABLED */
2221                 desired_and_not_enabled =
2222                         hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
2223                 mutex_unlock(&hdcp->mutex);
2224         }
2225
2226         if (desired_and_not_enabled || content_protection_type_changed)
2227                 intel_hdcp_enable(connector,
2228                                   crtc_state->cpu_transcoder,
2229                                   (u8)conn_state->hdcp_content_type);
2230 }
2231
2232 void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
2233 {
2234         mutex_lock(&dev_priv->hdcp_comp_mutex);
2235         if (!dev_priv->hdcp_comp_added) {
2236                 mutex_unlock(&dev_priv->hdcp_comp_mutex);
2237                 return;
2238         }
2239
2240         dev_priv->hdcp_comp_added = false;
2241         mutex_unlock(&dev_priv->hdcp_comp_mutex);
2242
2243         component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
2244 }
2245
2246 void intel_hdcp_cleanup(struct intel_connector *connector)
2247 {
2248         struct intel_hdcp *hdcp = &connector->hdcp;
2249
2250         if (!hdcp->shim)
2251                 return;
2252
2253         /*
2254          * If the connector is registered, it's possible userspace could kick
2255          * off another HDCP enable, which would re-spawn the workers.
2256          */
2257         drm_WARN_ON(connector->base.dev,
2258                 connector->base.registration_state == DRM_CONNECTOR_REGISTERED);
2259
2260         /*
2261          * Now that the connector is not registered, check_work won't be run,
2262          * but cancel any outstanding instances of it
2263          */
2264         cancel_delayed_work_sync(&hdcp->check_work);
2265
2266         /*
2267          * We don't cancel prop_work in the same way as check_work since it
2268          * requires connection_mutex which could be held while calling this
2269          * function. Instead, we rely on the connector references grabbed before
2270          * scheduling prop_work to ensure the connector is alive when prop_work
2271          * is run. So if we're in the destroy path (which is where this
2272          * function should be called), we're "guaranteed" that prop_work is not
2273          * active (tl;dr This Should Never Happen).
2274          */
2275         drm_WARN_ON(connector->base.dev, work_pending(&hdcp->prop_work));
2276
2277         mutex_lock(&hdcp->mutex);
2278         kfree(hdcp->port_data.streams);
2279         hdcp->shim = NULL;
2280         mutex_unlock(&hdcp->mutex);
2281 }
2282
2283 void intel_hdcp_atomic_check(struct drm_connector *connector,
2284                              struct drm_connector_state *old_state,
2285                              struct drm_connector_state *new_state)
2286 {
2287         u64 old_cp = old_state->content_protection;
2288         u64 new_cp = new_state->content_protection;
2289         struct drm_crtc_state *crtc_state;
2290
2291         if (!new_state->crtc) {
2292                 /*
2293                  * If the connector is being disabled with CP enabled, mark it
2294                  * desired so it's re-enabled when the connector is brought back
2295                  */
2296                 if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2297                         new_state->content_protection =
2298                                 DRM_MODE_CONTENT_PROTECTION_DESIRED;
2299                 return;
2300         }
2301
2302         crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
2303                                                    new_state->crtc);
2304         /*
2305          * Fix the HDCP uapi content protection state in case of modeset.
2306          * FIXME: As per HDCP content protection property uapi doc, an uevent()
2307          * need to be sent if there is transition from ENABLED->DESIRED.
2308          */
2309         if (drm_atomic_crtc_needs_modeset(crtc_state) &&
2310             (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
2311             new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
2312                 new_state->content_protection =
2313                         DRM_MODE_CONTENT_PROTECTION_DESIRED;
2314
2315         /*
2316          * Nothing to do if the state didn't change, or HDCP was activated since
2317          * the last commit. And also no change in hdcp content type.
2318          */
2319         if (old_cp == new_cp ||
2320             (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
2321              new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
2322                 if (old_state->hdcp_content_type ==
2323                                 new_state->hdcp_content_type)
2324                         return;
2325         }
2326
2327         crtc_state->mode_changed = true;
2328 }
2329
2330 /* Handles the CP_IRQ raised from the DP HDCP sink */
2331 void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
2332 {
2333         struct intel_hdcp *hdcp = &connector->hdcp;
2334
2335         if (!hdcp->shim)
2336                 return;
2337
2338         atomic_inc(&connector->hdcp.cp_irq_count);
2339         wake_up_all(&connector->hdcp.cp_irq_queue);
2340
2341         schedule_delayed_work(&hdcp->check_work, 0);
2342 }
This page took 0.165462 seconds and 4 git commands to generate.