]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
Linux 6.14-rc3
[linux.git] / drivers / gpu / drm / amd / amdgpu / dce_v11_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <drm/drm_edid.h>
25 #include <drm/drm_fourcc.h>
26 #include <drm/drm_modeset_helper.h>
27 #include <drm/drm_modeset_helper_vtables.h>
28 #include <drm/drm_vblank.h>
29
30 #include "amdgpu.h"
31 #include "amdgpu_pm.h"
32 #include "amdgpu_i2c.h"
33 #include "vid.h"
34 #include "atom.h"
35 #include "amdgpu_atombios.h"
36 #include "atombios_crtc.h"
37 #include "atombios_encoders.h"
38 #include "amdgpu_pll.h"
39 #include "amdgpu_connectors.h"
40 #include "amdgpu_display.h"
41 #include "dce_v11_0.h"
42
43 #include "dce/dce_11_0_d.h"
44 #include "dce/dce_11_0_sh_mask.h"
45 #include "dce/dce_11_0_enum.h"
46 #include "oss/oss_3_0_d.h"
47 #include "oss/oss_3_0_sh_mask.h"
48 #include "gmc/gmc_8_1_d.h"
49 #include "gmc/gmc_8_1_sh_mask.h"
50
51 #include "ivsrcid/ivsrcid_vislands30.h"
52
53 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
54 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
55 static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd);
56
57 static const u32 crtc_offsets[] =
58 {
59         CRTC0_REGISTER_OFFSET,
60         CRTC1_REGISTER_OFFSET,
61         CRTC2_REGISTER_OFFSET,
62         CRTC3_REGISTER_OFFSET,
63         CRTC4_REGISTER_OFFSET,
64         CRTC5_REGISTER_OFFSET,
65         CRTC6_REGISTER_OFFSET
66 };
67
68 static const u32 hpd_offsets[] =
69 {
70         HPD0_REGISTER_OFFSET,
71         HPD1_REGISTER_OFFSET,
72         HPD2_REGISTER_OFFSET,
73         HPD3_REGISTER_OFFSET,
74         HPD4_REGISTER_OFFSET,
75         HPD5_REGISTER_OFFSET
76 };
77
78 static const uint32_t dig_offsets[] = {
79         DIG0_REGISTER_OFFSET,
80         DIG1_REGISTER_OFFSET,
81         DIG2_REGISTER_OFFSET,
82         DIG3_REGISTER_OFFSET,
83         DIG4_REGISTER_OFFSET,
84         DIG5_REGISTER_OFFSET,
85         DIG6_REGISTER_OFFSET,
86         DIG7_REGISTER_OFFSET,
87         DIG8_REGISTER_OFFSET
88 };
89
90 static const struct {
91         uint32_t        reg;
92         uint32_t        vblank;
93         uint32_t        vline;
94         uint32_t        hpd;
95
96 } interrupt_status_offsets[] = { {
97         .reg = mmDISP_INTERRUPT_STATUS,
98         .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
99         .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
100         .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
101 }, {
102         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
103         .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
104         .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
105         .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
106 }, {
107         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
108         .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
109         .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
110         .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
111 }, {
112         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
113         .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
114         .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
115         .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
116 }, {
117         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
118         .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
119         .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
120         .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
121 }, {
122         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
123         .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
124         .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
125         .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
126 } };
127
128 static const u32 cz_golden_settings_a11[] =
129 {
130         mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
131         mmFBC_MISC, 0x1f311fff, 0x14300000,
132 };
133
134 static const u32 cz_mgcg_cgcg_init[] =
135 {
136         mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
137         mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
138 };
139
140 static const u32 stoney_golden_settings_a11[] =
141 {
142         mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
143         mmFBC_MISC, 0x1f311fff, 0x14302000,
144 };
145
146 static const u32 polaris11_golden_settings_a11[] =
147 {
148         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
149         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
150         mmFBC_DEBUG1, 0xffffffff, 0x00000008,
151         mmFBC_MISC, 0x9f313fff, 0x14302008,
152         mmHDMI_CONTROL, 0x313f031f, 0x00000011,
153 };
154
155 static const u32 polaris10_golden_settings_a11[] =
156 {
157         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
158         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
159         mmFBC_MISC, 0x9f313fff, 0x14302008,
160         mmHDMI_CONTROL, 0x313f031f, 0x00000011,
161 };
162
163 static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
164 {
165         switch (adev->asic_type) {
166         case CHIP_CARRIZO:
167                 amdgpu_device_program_register_sequence(adev,
168                                                         cz_mgcg_cgcg_init,
169                                                         ARRAY_SIZE(cz_mgcg_cgcg_init));
170                 amdgpu_device_program_register_sequence(adev,
171                                                         cz_golden_settings_a11,
172                                                         ARRAY_SIZE(cz_golden_settings_a11));
173                 break;
174         case CHIP_STONEY:
175                 amdgpu_device_program_register_sequence(adev,
176                                                         stoney_golden_settings_a11,
177                                                         ARRAY_SIZE(stoney_golden_settings_a11));
178                 break;
179         case CHIP_POLARIS11:
180         case CHIP_POLARIS12:
181                 amdgpu_device_program_register_sequence(adev,
182                                                         polaris11_golden_settings_a11,
183                                                         ARRAY_SIZE(polaris11_golden_settings_a11));
184                 break;
185         case CHIP_POLARIS10:
186         case CHIP_VEGAM:
187                 amdgpu_device_program_register_sequence(adev,
188                                                         polaris10_golden_settings_a11,
189                                                         ARRAY_SIZE(polaris10_golden_settings_a11));
190                 break;
191         default:
192                 break;
193         }
194 }
195
196 static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
197                                      u32 block_offset, u32 reg)
198 {
199         unsigned long flags;
200         u32 r;
201
202         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
203         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
204         r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
205         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
206
207         return r;
208 }
209
210 static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
211                                       u32 block_offset, u32 reg, u32 v)
212 {
213         unsigned long flags;
214
215         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
216         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
217         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
218         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
219 }
220
221 static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
222 {
223         if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
224                 return 0;
225         else
226                 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
227 }
228
229 static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
230 {
231         unsigned i;
232
233         /* Enable pflip interrupts */
234         for (i = 0; i < adev->mode_info.num_crtc; i++)
235                 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
236 }
237
238 static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
239 {
240         unsigned i;
241
242         /* Disable pflip interrupts */
243         for (i = 0; i < adev->mode_info.num_crtc; i++)
244                 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
245 }
246
247 /**
248  * dce_v11_0_page_flip - pageflip callback.
249  *
250  * @adev: amdgpu_device pointer
251  * @crtc_id: crtc to cleanup pageflip on
252  * @crtc_base: new address of the crtc (GPU MC address)
253  * @async: asynchronous flip
254  *
255  * Triggers the actual pageflip by updating the primary
256  * surface base address.
257  */
258 static void dce_v11_0_page_flip(struct amdgpu_device *adev,
259                                 int crtc_id, u64 crtc_base, bool async)
260 {
261         struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
262         struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
263         u32 tmp;
264
265         /* flip immediate for async, default is vsync */
266         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
267         tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
268                             GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
269         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
270         /* update pitch */
271         WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
272                fb->pitches[0] / fb->format->cpp[0]);
273         /* update the scanout addresses */
274         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
275                upper_32_bits(crtc_base));
276         /* writing to the low address triggers the update */
277         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
278                lower_32_bits(crtc_base));
279         /* post the write */
280         RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
281 }
282
283 static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
284                                         u32 *vbl, u32 *position)
285 {
286         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
287                 return -EINVAL;
288
289         *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
290         *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
291
292         return 0;
293 }
294
295 /**
296  * dce_v11_0_hpd_sense - hpd sense callback.
297  *
298  * @adev: amdgpu_device pointer
299  * @hpd: hpd (hotplug detect) pin
300  *
301  * Checks if a digital monitor is connected (evergreen+).
302  * Returns true if connected, false if not connected.
303  */
304 static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
305                                enum amdgpu_hpd_id hpd)
306 {
307         bool connected = false;
308
309         if (hpd >= adev->mode_info.num_hpd)
310                 return connected;
311
312         if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
313             DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
314                 connected = true;
315
316         return connected;
317 }
318
319 /**
320  * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
321  *
322  * @adev: amdgpu_device pointer
323  * @hpd: hpd (hotplug detect) pin
324  *
325  * Set the polarity of the hpd pin (evergreen+).
326  */
327 static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
328                                       enum amdgpu_hpd_id hpd)
329 {
330         u32 tmp;
331         bool connected = dce_v11_0_hpd_sense(adev, hpd);
332
333         if (hpd >= adev->mode_info.num_hpd)
334                 return;
335
336         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
337         if (connected)
338                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
339         else
340                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
341         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
342 }
343
344 /**
345  * dce_v11_0_hpd_init - hpd setup callback.
346  *
347  * @adev: amdgpu_device pointer
348  *
349  * Setup the hpd pins used by the card (evergreen+).
350  * Enable the pin, set the polarity, and enable the hpd interrupts.
351  */
352 static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
353 {
354         struct drm_device *dev = adev_to_drm(adev);
355         struct drm_connector *connector;
356         struct drm_connector_list_iter iter;
357         u32 tmp;
358
359         drm_connector_list_iter_begin(dev, &iter);
360         drm_for_each_connector_iter(connector, &iter) {
361                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
362
363                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
364                         continue;
365
366                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
367                     connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
368                         /* don't try to enable hpd on eDP or LVDS avoid breaking the
369                          * aux dp channel on imac and help (but not completely fix)
370                          * https://bugzilla.redhat.com/show_bug.cgi?id=726143
371                          * also avoid interrupt storms during dpms.
372                          */
373                         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
374                         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
375                         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
376                         continue;
377                 }
378
379                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
380                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
381                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
382
383                 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
384                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
385                                     DC_HPD_CONNECT_INT_DELAY,
386                                     AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
387                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
388                                     DC_HPD_DISCONNECT_INT_DELAY,
389                                     AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
390                 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
391
392                 dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd);
393                 dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
394                 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
395         }
396         drm_connector_list_iter_end(&iter);
397 }
398
399 /**
400  * dce_v11_0_hpd_fini - hpd tear down callback.
401  *
402  * @adev: amdgpu_device pointer
403  *
404  * Tear down the hpd pins used by the card (evergreen+).
405  * Disable the hpd interrupts.
406  */
407 static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
408 {
409         struct drm_device *dev = adev_to_drm(adev);
410         struct drm_connector *connector;
411         struct drm_connector_list_iter iter;
412         u32 tmp;
413
414         drm_connector_list_iter_begin(dev, &iter);
415         drm_for_each_connector_iter(connector, &iter) {
416                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
417
418                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
419                         continue;
420
421                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
422                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
423                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
424
425                 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
426         }
427         drm_connector_list_iter_end(&iter);
428 }
429
430 static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
431 {
432         return mmDC_GPIO_HPD_A;
433 }
434
435 static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
436 {
437         u32 crtc_hung = 0;
438         u32 crtc_status[6];
439         u32 i, j, tmp;
440
441         for (i = 0; i < adev->mode_info.num_crtc; i++) {
442                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
443                 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
444                         crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
445                         crtc_hung |= (1 << i);
446                 }
447         }
448
449         for (j = 0; j < 10; j++) {
450                 for (i = 0; i < adev->mode_info.num_crtc; i++) {
451                         if (crtc_hung & (1 << i)) {
452                                 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
453                                 if (tmp != crtc_status[i])
454                                         crtc_hung &= ~(1 << i);
455                         }
456                 }
457                 if (crtc_hung == 0)
458                         return false;
459                 udelay(100);
460         }
461
462         return true;
463 }
464
465 static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
466                                            bool render)
467 {
468         u32 tmp;
469
470         /* Lockout access through VGA aperture*/
471         tmp = RREG32(mmVGA_HDP_CONTROL);
472         if (render)
473                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
474         else
475                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
476         WREG32(mmVGA_HDP_CONTROL, tmp);
477
478         /* disable VGA render */
479         tmp = RREG32(mmVGA_RENDER_CONTROL);
480         if (render)
481                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
482         else
483                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
484         WREG32(mmVGA_RENDER_CONTROL, tmp);
485 }
486
487 static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
488 {
489         int num_crtc = 0;
490
491         switch (adev->asic_type) {
492         case CHIP_CARRIZO:
493                 num_crtc = 3;
494                 break;
495         case CHIP_STONEY:
496                 num_crtc = 2;
497                 break;
498         case CHIP_POLARIS10:
499         case CHIP_VEGAM:
500                 num_crtc = 6;
501                 break;
502         case CHIP_POLARIS11:
503         case CHIP_POLARIS12:
504                 num_crtc = 5;
505                 break;
506         default:
507                 num_crtc = 0;
508         }
509         return num_crtc;
510 }
511
512 void dce_v11_0_disable_dce(struct amdgpu_device *adev)
513 {
514         /*Disable VGA render and enabled crtc, if has DCE engine*/
515         if (amdgpu_atombios_has_dce_engine_info(adev)) {
516                 u32 tmp;
517                 int crtc_enabled, i;
518
519                 dce_v11_0_set_vga_render_state(adev, false);
520
521                 /*Disable crtc*/
522                 for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
523                         crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
524                                                                          CRTC_CONTROL, CRTC_MASTER_EN);
525                         if (crtc_enabled) {
526                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
527                                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
528                                 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
529                                 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
530                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
531                         }
532                 }
533         }
534 }
535
536 static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
537 {
538         struct drm_device *dev = encoder->dev;
539         struct amdgpu_device *adev = drm_to_adev(dev);
540         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
541         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
542         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
543         int bpc = 0;
544         u32 tmp = 0;
545         enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
546
547         if (connector) {
548                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
549                 bpc = amdgpu_connector_get_monitor_bpc(connector);
550                 dither = amdgpu_connector->dither;
551         }
552
553         /* LVDS/eDP FMT is set up by atom */
554         if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
555                 return;
556
557         /* not needed for analog */
558         if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
559             (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
560                 return;
561
562         if (bpc == 0)
563                 return;
564
565         switch (bpc) {
566         case 6:
567                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
568                         /* XXX sort out optimal dither settings */
569                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
570                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
571                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
572                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
573                 } else {
574                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
575                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
576                 }
577                 break;
578         case 8:
579                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
580                         /* XXX sort out optimal dither settings */
581                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
582                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
583                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
584                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
585                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
586                 } else {
587                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
588                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
589                 }
590                 break;
591         case 10:
592                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
593                         /* XXX sort out optimal dither settings */
594                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
595                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
596                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
597                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
598                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
599                 } else {
600                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
601                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
602                 }
603                 break;
604         default:
605                 /* not needed */
606                 break;
607         }
608
609         WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
610 }
611
612
613 /* display watermark setup */
614 /**
615  * dce_v11_0_line_buffer_adjust - Set up the line buffer
616  *
617  * @adev: amdgpu_device pointer
618  * @amdgpu_crtc: the selected display controller
619  * @mode: the current display mode on the selected display
620  * controller
621  *
622  * Setup up the line buffer allocation for
623  * the selected display controller (CIK).
624  * Returns the line buffer size in pixels.
625  */
626 static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
627                                        struct amdgpu_crtc *amdgpu_crtc,
628                                        struct drm_display_mode *mode)
629 {
630         u32 tmp, buffer_alloc, i, mem_cfg;
631         u32 pipe_offset = amdgpu_crtc->crtc_id;
632         /*
633          * Line Buffer Setup
634          * There are 6 line buffers, one for each display controllers.
635          * There are 3 partitions per LB. Select the number of partitions
636          * to enable based on the display width.  For display widths larger
637          * than 4096, you need use to use 2 display controllers and combine
638          * them using the stereo blender.
639          */
640         if (amdgpu_crtc->base.enabled && mode) {
641                 if (mode->crtc_hdisplay < 1920) {
642                         mem_cfg = 1;
643                         buffer_alloc = 2;
644                 } else if (mode->crtc_hdisplay < 2560) {
645                         mem_cfg = 2;
646                         buffer_alloc = 2;
647                 } else if (mode->crtc_hdisplay < 4096) {
648                         mem_cfg = 0;
649                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
650                 } else {
651                         DRM_DEBUG_KMS("Mode too big for LB!\n");
652                         mem_cfg = 0;
653                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
654                 }
655         } else {
656                 mem_cfg = 1;
657                 buffer_alloc = 0;
658         }
659
660         tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
661         tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
662         WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
663
664         tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
665         tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
666         WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
667
668         for (i = 0; i < adev->usec_timeout; i++) {
669                 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
670                 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
671                         break;
672                 udelay(1);
673         }
674
675         if (amdgpu_crtc->base.enabled && mode) {
676                 switch (mem_cfg) {
677                 case 0:
678                 default:
679                         return 4096 * 2;
680                 case 1:
681                         return 1920 * 2;
682                 case 2:
683                         return 2560 * 2;
684                 }
685         }
686
687         /* controller not enabled, so no lb used */
688         return 0;
689 }
690
691 /**
692  * cik_get_number_of_dram_channels - get the number of dram channels
693  *
694  * @adev: amdgpu_device pointer
695  *
696  * Look up the number of video ram channels (CIK).
697  * Used for display watermark bandwidth calculations
698  * Returns the number of dram channels
699  */
700 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
701 {
702         u32 tmp = RREG32(mmMC_SHARED_CHMAP);
703
704         switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
705         case 0:
706         default:
707                 return 1;
708         case 1:
709                 return 2;
710         case 2:
711                 return 4;
712         case 3:
713                 return 8;
714         case 4:
715                 return 3;
716         case 5:
717                 return 6;
718         case 6:
719                 return 10;
720         case 7:
721                 return 12;
722         case 8:
723                 return 16;
724         }
725 }
726
727 struct dce10_wm_params {
728         u32 dram_channels; /* number of dram channels */
729         u32 yclk;          /* bandwidth per dram data pin in kHz */
730         u32 sclk;          /* engine clock in kHz */
731         u32 disp_clk;      /* display clock in kHz */
732         u32 src_width;     /* viewport width */
733         u32 active_time;   /* active display time in ns */
734         u32 blank_time;    /* blank time in ns */
735         bool interlaced;    /* mode is interlaced */
736         fixed20_12 vsc;    /* vertical scale ratio */
737         u32 num_heads;     /* number of active crtcs */
738         u32 bytes_per_pixel; /* bytes per pixel display + overlay */
739         u32 lb_size;       /* line buffer allocated to pipe */
740         u32 vtaps;         /* vertical scaler taps */
741 };
742
743 /**
744  * dce_v11_0_dram_bandwidth - get the dram bandwidth
745  *
746  * @wm: watermark calculation data
747  *
748  * Calculate the raw dram bandwidth (CIK).
749  * Used for display watermark bandwidth calculations
750  * Returns the dram bandwidth in MBytes/s
751  */
752 static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
753 {
754         /* Calculate raw DRAM Bandwidth */
755         fixed20_12 dram_efficiency; /* 0.7 */
756         fixed20_12 yclk, dram_channels, bandwidth;
757         fixed20_12 a;
758
759         a.full = dfixed_const(1000);
760         yclk.full = dfixed_const(wm->yclk);
761         yclk.full = dfixed_div(yclk, a);
762         dram_channels.full = dfixed_const(wm->dram_channels * 4);
763         a.full = dfixed_const(10);
764         dram_efficiency.full = dfixed_const(7);
765         dram_efficiency.full = dfixed_div(dram_efficiency, a);
766         bandwidth.full = dfixed_mul(dram_channels, yclk);
767         bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
768
769         return dfixed_trunc(bandwidth);
770 }
771
772 /**
773  * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
774  *
775  * @wm: watermark calculation data
776  *
777  * Calculate the dram bandwidth used for display (CIK).
778  * Used for display watermark bandwidth calculations
779  * Returns the dram bandwidth for display in MBytes/s
780  */
781 static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
782 {
783         /* Calculate DRAM Bandwidth and the part allocated to display. */
784         fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
785         fixed20_12 yclk, dram_channels, bandwidth;
786         fixed20_12 a;
787
788         a.full = dfixed_const(1000);
789         yclk.full = dfixed_const(wm->yclk);
790         yclk.full = dfixed_div(yclk, a);
791         dram_channels.full = dfixed_const(wm->dram_channels * 4);
792         a.full = dfixed_const(10);
793         disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
794         disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
795         bandwidth.full = dfixed_mul(dram_channels, yclk);
796         bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
797
798         return dfixed_trunc(bandwidth);
799 }
800
801 /**
802  * dce_v11_0_data_return_bandwidth - get the data return bandwidth
803  *
804  * @wm: watermark calculation data
805  *
806  * Calculate the data return bandwidth used for display (CIK).
807  * Used for display watermark bandwidth calculations
808  * Returns the data return bandwidth in MBytes/s
809  */
810 static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
811 {
812         /* Calculate the display Data return Bandwidth */
813         fixed20_12 return_efficiency; /* 0.8 */
814         fixed20_12 sclk, bandwidth;
815         fixed20_12 a;
816
817         a.full = dfixed_const(1000);
818         sclk.full = dfixed_const(wm->sclk);
819         sclk.full = dfixed_div(sclk, a);
820         a.full = dfixed_const(10);
821         return_efficiency.full = dfixed_const(8);
822         return_efficiency.full = dfixed_div(return_efficiency, a);
823         a.full = dfixed_const(32);
824         bandwidth.full = dfixed_mul(a, sclk);
825         bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
826
827         return dfixed_trunc(bandwidth);
828 }
829
830 /**
831  * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
832  *
833  * @wm: watermark calculation data
834  *
835  * Calculate the dmif bandwidth used for display (CIK).
836  * Used for display watermark bandwidth calculations
837  * Returns the dmif bandwidth in MBytes/s
838  */
839 static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
840 {
841         /* Calculate the DMIF Request Bandwidth */
842         fixed20_12 disp_clk_request_efficiency; /* 0.8 */
843         fixed20_12 disp_clk, bandwidth;
844         fixed20_12 a, b;
845
846         a.full = dfixed_const(1000);
847         disp_clk.full = dfixed_const(wm->disp_clk);
848         disp_clk.full = dfixed_div(disp_clk, a);
849         a.full = dfixed_const(32);
850         b.full = dfixed_mul(a, disp_clk);
851
852         a.full = dfixed_const(10);
853         disp_clk_request_efficiency.full = dfixed_const(8);
854         disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
855
856         bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
857
858         return dfixed_trunc(bandwidth);
859 }
860
861 /**
862  * dce_v11_0_available_bandwidth - get the min available bandwidth
863  *
864  * @wm: watermark calculation data
865  *
866  * Calculate the min available bandwidth used for display (CIK).
867  * Used for display watermark bandwidth calculations
868  * Returns the min available bandwidth in MBytes/s
869  */
870 static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
871 {
872         /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
873         u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
874         u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
875         u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
876
877         return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
878 }
879
880 /**
881  * dce_v11_0_average_bandwidth - get the average available bandwidth
882  *
883  * @wm: watermark calculation data
884  *
885  * Calculate the average available bandwidth used for display (CIK).
886  * Used for display watermark bandwidth calculations
887  * Returns the average available bandwidth in MBytes/s
888  */
889 static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
890 {
891         /* Calculate the display mode Average Bandwidth
892          * DisplayMode should contain the source and destination dimensions,
893          * timing, etc.
894          */
895         fixed20_12 bpp;
896         fixed20_12 line_time;
897         fixed20_12 src_width;
898         fixed20_12 bandwidth;
899         fixed20_12 a;
900
901         a.full = dfixed_const(1000);
902         line_time.full = dfixed_const(wm->active_time + wm->blank_time);
903         line_time.full = dfixed_div(line_time, a);
904         bpp.full = dfixed_const(wm->bytes_per_pixel);
905         src_width.full = dfixed_const(wm->src_width);
906         bandwidth.full = dfixed_mul(src_width, bpp);
907         bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
908         bandwidth.full = dfixed_div(bandwidth, line_time);
909
910         return dfixed_trunc(bandwidth);
911 }
912
913 /**
914  * dce_v11_0_latency_watermark - get the latency watermark
915  *
916  * @wm: watermark calculation data
917  *
918  * Calculate the latency watermark (CIK).
919  * Used for display watermark bandwidth calculations
920  * Returns the latency watermark in ns
921  */
922 static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
923 {
924         /* First calculate the latency in ns */
925         u32 mc_latency = 2000; /* 2000 ns. */
926         u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
927         u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
928         u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
929         u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
930         u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
931                 (wm->num_heads * cursor_line_pair_return_time);
932         u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
933         u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
934         u32 tmp, dmif_size = 12288;
935         fixed20_12 a, b, c;
936
937         if (wm->num_heads == 0)
938                 return 0;
939
940         a.full = dfixed_const(2);
941         b.full = dfixed_const(1);
942         if ((wm->vsc.full > a.full) ||
943             ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
944             (wm->vtaps >= 5) ||
945             ((wm->vsc.full >= a.full) && wm->interlaced))
946                 max_src_lines_per_dst_line = 4;
947         else
948                 max_src_lines_per_dst_line = 2;
949
950         a.full = dfixed_const(available_bandwidth);
951         b.full = dfixed_const(wm->num_heads);
952         a.full = dfixed_div(a, b);
953         tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
954         tmp = min(dfixed_trunc(a), tmp);
955
956         lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
957
958         a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
959         b.full = dfixed_const(1000);
960         c.full = dfixed_const(lb_fill_bw);
961         b.full = dfixed_div(c, b);
962         a.full = dfixed_div(a, b);
963         line_fill_time = dfixed_trunc(a);
964
965         if (line_fill_time < wm->active_time)
966                 return latency;
967         else
968                 return latency + (line_fill_time - wm->active_time);
969
970 }
971
972 /**
973  * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
974  * average and available dram bandwidth
975  *
976  * @wm: watermark calculation data
977  *
978  * Check if the display average bandwidth fits in the display
979  * dram bandwidth (CIK).
980  * Used for display watermark bandwidth calculations
981  * Returns true if the display fits, false if not.
982  */
983 static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
984 {
985         if (dce_v11_0_average_bandwidth(wm) <=
986             (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
987                 return true;
988         else
989                 return false;
990 }
991
992 /**
993  * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
994  * average and available bandwidth
995  *
996  * @wm: watermark calculation data
997  *
998  * Check if the display average bandwidth fits in the display
999  * available bandwidth (CIK).
1000  * Used for display watermark bandwidth calculations
1001  * Returns true if the display fits, false if not.
1002  */
1003 static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
1004 {
1005         if (dce_v11_0_average_bandwidth(wm) <=
1006             (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
1007                 return true;
1008         else
1009                 return false;
1010 }
1011
1012 /**
1013  * dce_v11_0_check_latency_hiding - check latency hiding
1014  *
1015  * @wm: watermark calculation data
1016  *
1017  * Check latency hiding (CIK).
1018  * Used for display watermark bandwidth calculations
1019  * Returns true if the display fits, false if not.
1020  */
1021 static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
1022 {
1023         u32 lb_partitions = wm->lb_size / wm->src_width;
1024         u32 line_time = wm->active_time + wm->blank_time;
1025         u32 latency_tolerant_lines;
1026         u32 latency_hiding;
1027         fixed20_12 a;
1028
1029         a.full = dfixed_const(1);
1030         if (wm->vsc.full > a.full)
1031                 latency_tolerant_lines = 1;
1032         else {
1033                 if (lb_partitions <= (wm->vtaps + 1))
1034                         latency_tolerant_lines = 1;
1035                 else
1036                         latency_tolerant_lines = 2;
1037         }
1038
1039         latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1040
1041         if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
1042                 return true;
1043         else
1044                 return false;
1045 }
1046
1047 /**
1048  * dce_v11_0_program_watermarks - program display watermarks
1049  *
1050  * @adev: amdgpu_device pointer
1051  * @amdgpu_crtc: the selected display controller
1052  * @lb_size: line buffer size
1053  * @num_heads: number of display controllers in use
1054  *
1055  * Calculate and program the display watermarks for the
1056  * selected display controller (CIK).
1057  */
1058 static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1059                                         struct amdgpu_crtc *amdgpu_crtc,
1060                                         u32 lb_size, u32 num_heads)
1061 {
1062         struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1063         struct dce10_wm_params wm_low, wm_high;
1064         u32 active_time;
1065         u32 line_time = 0;
1066         u32 latency_watermark_a = 0, latency_watermark_b = 0;
1067         u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1068
1069         if (amdgpu_crtc->base.enabled && num_heads && mode) {
1070                 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1071                                             (u32)mode->clock);
1072                 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1073                                           (u32)mode->clock);
1074                 line_time = min_t(u32, line_time, 65535);
1075
1076                 /* watermark for high clocks */
1077                 if (adev->pm.dpm_enabled) {
1078                         wm_high.yclk =
1079                                 amdgpu_dpm_get_mclk(adev, false) * 10;
1080                         wm_high.sclk =
1081                                 amdgpu_dpm_get_sclk(adev, false) * 10;
1082                 } else {
1083                         wm_high.yclk = adev->pm.current_mclk * 10;
1084                         wm_high.sclk = adev->pm.current_sclk * 10;
1085                 }
1086
1087                 wm_high.disp_clk = mode->clock;
1088                 wm_high.src_width = mode->crtc_hdisplay;
1089                 wm_high.active_time = active_time;
1090                 wm_high.blank_time = line_time - wm_high.active_time;
1091                 wm_high.interlaced = false;
1092                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1093                         wm_high.interlaced = true;
1094                 wm_high.vsc = amdgpu_crtc->vsc;
1095                 wm_high.vtaps = 1;
1096                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1097                         wm_high.vtaps = 2;
1098                 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1099                 wm_high.lb_size = lb_size;
1100                 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1101                 wm_high.num_heads = num_heads;
1102
1103                 /* set for high clocks */
1104                 latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535);
1105
1106                 /* possibly force display priority to high */
1107                 /* should really do this at mode validation time... */
1108                 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1109                     !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1110                     !dce_v11_0_check_latency_hiding(&wm_high) ||
1111                     (adev->mode_info.disp_priority == 2)) {
1112                         DRM_DEBUG_KMS("force priority to high\n");
1113                 }
1114
1115                 /* watermark for low clocks */
1116                 if (adev->pm.dpm_enabled) {
1117                         wm_low.yclk =
1118                                 amdgpu_dpm_get_mclk(adev, true) * 10;
1119                         wm_low.sclk =
1120                                 amdgpu_dpm_get_sclk(adev, true) * 10;
1121                 } else {
1122                         wm_low.yclk = adev->pm.current_mclk * 10;
1123                         wm_low.sclk = adev->pm.current_sclk * 10;
1124                 }
1125
1126                 wm_low.disp_clk = mode->clock;
1127                 wm_low.src_width = mode->crtc_hdisplay;
1128                 wm_low.active_time = active_time;
1129                 wm_low.blank_time = line_time - wm_low.active_time;
1130                 wm_low.interlaced = false;
1131                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1132                         wm_low.interlaced = true;
1133                 wm_low.vsc = amdgpu_crtc->vsc;
1134                 wm_low.vtaps = 1;
1135                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1136                         wm_low.vtaps = 2;
1137                 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1138                 wm_low.lb_size = lb_size;
1139                 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1140                 wm_low.num_heads = num_heads;
1141
1142                 /* set for low clocks */
1143                 latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535);
1144
1145                 /* possibly force display priority to high */
1146                 /* should really do this at mode validation time... */
1147                 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1148                     !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1149                     !dce_v11_0_check_latency_hiding(&wm_low) ||
1150                     (adev->mode_info.disp_priority == 2)) {
1151                         DRM_DEBUG_KMS("force priority to high\n");
1152                 }
1153                 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1154         }
1155
1156         /* select wm A */
1157         wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1158         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1159         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1160         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1161         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1162         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1163         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1164         /* select wm B */
1165         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1166         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1167         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1168         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1169         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1170         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1171         /* restore original selection */
1172         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1173
1174         /* save values for DPM */
1175         amdgpu_crtc->line_time = line_time;
1176         amdgpu_crtc->wm_high = latency_watermark_a;
1177         amdgpu_crtc->wm_low = latency_watermark_b;
1178         /* Save number of lines the linebuffer leads before the scanout */
1179         amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1180 }
1181
1182 /**
1183  * dce_v11_0_bandwidth_update - program display watermarks
1184  *
1185  * @adev: amdgpu_device pointer
1186  *
1187  * Calculate and program the display watermarks and line
1188  * buffer allocation (CIK).
1189  */
1190 static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
1191 {
1192         struct drm_display_mode *mode = NULL;
1193         u32 num_heads = 0, lb_size;
1194         int i;
1195
1196         amdgpu_display_update_priority(adev);
1197
1198         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1199                 if (adev->mode_info.crtcs[i]->base.enabled)
1200                         num_heads++;
1201         }
1202         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1203                 mode = &adev->mode_info.crtcs[i]->base.mode;
1204                 lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1205                 dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1206                                             lb_size, num_heads);
1207         }
1208 }
1209
1210 static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
1211 {
1212         int i;
1213         u32 offset, tmp;
1214
1215         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1216                 offset = adev->mode_info.audio.pin[i].offset;
1217                 tmp = RREG32_AUDIO_ENDPT(offset,
1218                                          ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1219                 if (((tmp &
1220                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1221                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1222                         adev->mode_info.audio.pin[i].connected = false;
1223                 else
1224                         adev->mode_info.audio.pin[i].connected = true;
1225         }
1226 }
1227
1228 static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
1229 {
1230         int i;
1231
1232         dce_v11_0_audio_get_connected_pins(adev);
1233
1234         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1235                 if (adev->mode_info.audio.pin[i].connected)
1236                         return &adev->mode_info.audio.pin[i];
1237         }
1238         DRM_ERROR("No connected audio pins found!\n");
1239         return NULL;
1240 }
1241
1242 static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1243 {
1244         struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1245         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1246         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1247         u32 tmp;
1248
1249         if (!dig || !dig->afmt || !dig->afmt->pin)
1250                 return;
1251
1252         tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1253         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1254         WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1255 }
1256
1257 static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
1258                                                 struct drm_display_mode *mode)
1259 {
1260         struct drm_device *dev = encoder->dev;
1261         struct amdgpu_device *adev = drm_to_adev(dev);
1262         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1263         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1264         struct drm_connector *connector;
1265         struct drm_connector_list_iter iter;
1266         struct amdgpu_connector *amdgpu_connector = NULL;
1267         u32 tmp;
1268         int interlace = 0;
1269
1270         if (!dig || !dig->afmt || !dig->afmt->pin)
1271                 return;
1272
1273         drm_connector_list_iter_begin(dev, &iter);
1274         drm_for_each_connector_iter(connector, &iter) {
1275                 if (connector->encoder == encoder) {
1276                         amdgpu_connector = to_amdgpu_connector(connector);
1277                         break;
1278                 }
1279         }
1280         drm_connector_list_iter_end(&iter);
1281
1282         if (!amdgpu_connector) {
1283                 DRM_ERROR("Couldn't find encoder's connector\n");
1284                 return;
1285         }
1286
1287         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1288                 interlace = 1;
1289         if (connector->latency_present[interlace]) {
1290                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1291                                     VIDEO_LIPSYNC, connector->video_latency[interlace]);
1292                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1293                                     AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1294         } else {
1295                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1296                                     VIDEO_LIPSYNC, 0);
1297                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1298                                     AUDIO_LIPSYNC, 0);
1299         }
1300         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1301                            ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1302 }
1303
1304 static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1305 {
1306         struct drm_device *dev = encoder->dev;
1307         struct amdgpu_device *adev = drm_to_adev(dev);
1308         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1309         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1310         struct drm_connector *connector;
1311         struct drm_connector_list_iter iter;
1312         struct amdgpu_connector *amdgpu_connector = NULL;
1313         u32 tmp;
1314         u8 *sadb = NULL;
1315         int sad_count;
1316
1317         if (!dig || !dig->afmt || !dig->afmt->pin)
1318                 return;
1319
1320         drm_connector_list_iter_begin(dev, &iter);
1321         drm_for_each_connector_iter(connector, &iter) {
1322                 if (connector->encoder == encoder) {
1323                         amdgpu_connector = to_amdgpu_connector(connector);
1324                         break;
1325                 }
1326         }
1327         drm_connector_list_iter_end(&iter);
1328
1329         if (!amdgpu_connector) {
1330                 DRM_ERROR("Couldn't find encoder's connector\n");
1331                 return;
1332         }
1333
1334         sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
1335         if (sad_count < 0) {
1336                 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1337                 sad_count = 0;
1338         }
1339
1340         /* program the speaker allocation */
1341         tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1342                                  ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1343         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1344                             DP_CONNECTION, 0);
1345         /* set HDMI mode */
1346         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1347                             HDMI_CONNECTION, 1);
1348         if (sad_count)
1349                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1350                                     SPEAKER_ALLOCATION, sadb[0]);
1351         else
1352                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1353                                     SPEAKER_ALLOCATION, 5); /* stereo */
1354         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1355                            ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1356
1357         kfree(sadb);
1358 }
1359
1360 static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
1361 {
1362         struct drm_device *dev = encoder->dev;
1363         struct amdgpu_device *adev = drm_to_adev(dev);
1364         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1365         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1366         struct drm_connector *connector;
1367         struct drm_connector_list_iter iter;
1368         struct amdgpu_connector *amdgpu_connector = NULL;
1369         struct cea_sad *sads;
1370         int i, sad_count;
1371
1372         static const u16 eld_reg_to_type[][2] = {
1373                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1374                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1375                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1376                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1377                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1378                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1379                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1380                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1381                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1382                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1383                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1384                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1385         };
1386
1387         if (!dig || !dig->afmt || !dig->afmt->pin)
1388                 return;
1389
1390         drm_connector_list_iter_begin(dev, &iter);
1391         drm_for_each_connector_iter(connector, &iter) {
1392                 if (connector->encoder == encoder) {
1393                         amdgpu_connector = to_amdgpu_connector(connector);
1394                         break;
1395                 }
1396         }
1397         drm_connector_list_iter_end(&iter);
1398
1399         if (!amdgpu_connector) {
1400                 DRM_ERROR("Couldn't find encoder's connector\n");
1401                 return;
1402         }
1403
1404         sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
1405         if (sad_count < 0)
1406                 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1407         if (sad_count <= 0)
1408                 return;
1409         BUG_ON(!sads);
1410
1411         for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1412                 u32 tmp = 0;
1413                 u8 stereo_freqs = 0;
1414                 int max_channels = -1;
1415                 int j;
1416
1417                 for (j = 0; j < sad_count; j++) {
1418                         struct cea_sad *sad = &sads[j];
1419
1420                         if (sad->format == eld_reg_to_type[i][1]) {
1421                                 if (sad->channels > max_channels) {
1422                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1423                                                             MAX_CHANNELS, sad->channels);
1424                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1425                                                             DESCRIPTOR_BYTE_2, sad->byte2);
1426                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1427                                                             SUPPORTED_FREQUENCIES, sad->freq);
1428                                         max_channels = sad->channels;
1429                                 }
1430
1431                                 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1432                                         stereo_freqs |= sad->freq;
1433                                 else
1434                                         break;
1435                         }
1436                 }
1437
1438                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1439                                     SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1440                 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1441         }
1442
1443         kfree(sads);
1444 }
1445
1446 static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
1447                                   struct amdgpu_audio_pin *pin,
1448                                   bool enable)
1449 {
1450         if (!pin)
1451                 return;
1452
1453         WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1454                            enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1455 }
1456
1457 static const u32 pin_offsets[] =
1458 {
1459         AUD0_REGISTER_OFFSET,
1460         AUD1_REGISTER_OFFSET,
1461         AUD2_REGISTER_OFFSET,
1462         AUD3_REGISTER_OFFSET,
1463         AUD4_REGISTER_OFFSET,
1464         AUD5_REGISTER_OFFSET,
1465         AUD6_REGISTER_OFFSET,
1466         AUD7_REGISTER_OFFSET,
1467 };
1468
1469 static int dce_v11_0_audio_init(struct amdgpu_device *adev)
1470 {
1471         int i;
1472
1473         if (!amdgpu_audio)
1474                 return 0;
1475
1476         adev->mode_info.audio.enabled = true;
1477
1478         switch (adev->asic_type) {
1479         case CHIP_CARRIZO:
1480         case CHIP_STONEY:
1481                 adev->mode_info.audio.num_pins = 7;
1482                 break;
1483         case CHIP_POLARIS10:
1484         case CHIP_VEGAM:
1485                 adev->mode_info.audio.num_pins = 8;
1486                 break;
1487         case CHIP_POLARIS11:
1488         case CHIP_POLARIS12:
1489                 adev->mode_info.audio.num_pins = 6;
1490                 break;
1491         default:
1492                 return -EINVAL;
1493         }
1494
1495         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1496                 adev->mode_info.audio.pin[i].channels = -1;
1497                 adev->mode_info.audio.pin[i].rate = -1;
1498                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1499                 adev->mode_info.audio.pin[i].status_bits = 0;
1500                 adev->mode_info.audio.pin[i].category_code = 0;
1501                 adev->mode_info.audio.pin[i].connected = false;
1502                 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1503                 adev->mode_info.audio.pin[i].id = i;
1504                 /* disable audio.  it will be set up later */
1505                 /* XXX remove once we switch to ip funcs */
1506                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1507         }
1508
1509         return 0;
1510 }
1511
1512 static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
1513 {
1514         int i;
1515
1516         if (!amdgpu_audio)
1517                 return;
1518
1519         if (!adev->mode_info.audio.enabled)
1520                 return;
1521
1522         for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1523                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1524
1525         adev->mode_info.audio.enabled = false;
1526 }
1527
1528 /*
1529  * update the N and CTS parameters for a given pixel clock rate
1530  */
1531 static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1532 {
1533         struct drm_device *dev = encoder->dev;
1534         struct amdgpu_device *adev = drm_to_adev(dev);
1535         struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1536         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1537         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1538         u32 tmp;
1539
1540         tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1541         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1542         WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1543         tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1544         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1545         WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1546
1547         tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1548         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1549         WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1550         tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1551         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1552         WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1553
1554         tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1555         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1556         WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1557         tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1558         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1559         WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1560
1561 }
1562
1563 /*
1564  * build a HDMI Video Info Frame
1565  */
1566 static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1567                                                void *buffer, size_t size)
1568 {
1569         struct drm_device *dev = encoder->dev;
1570         struct amdgpu_device *adev = drm_to_adev(dev);
1571         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1572         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1573         uint8_t *frame = buffer + 3;
1574         uint8_t *header = buffer;
1575
1576         WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1577                 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1578         WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1579                 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1580         WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1581                 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1582         WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1583                 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1584 }
1585
1586 static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1587 {
1588         struct drm_device *dev = encoder->dev;
1589         struct amdgpu_device *adev = drm_to_adev(dev);
1590         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1591         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1592         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1593         u32 dto_phase = 24 * 1000;
1594         u32 dto_modulo = clock;
1595         u32 tmp;
1596
1597         if (!dig || !dig->afmt)
1598                 return;
1599
1600         /* XXX two dtos; generally use dto0 for hdmi */
1601         /* Express [24MHz / target pixel clock] as an exact rational
1602          * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1603          * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1604          */
1605         tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1606         tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1607                             amdgpu_crtc->crtc_id);
1608         WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1609         WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1610         WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1611 }
1612
1613 /*
1614  * update the info frames with the data from the current display mode
1615  */
1616 static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
1617                                   struct drm_display_mode *mode)
1618 {
1619         struct drm_device *dev = encoder->dev;
1620         struct amdgpu_device *adev = drm_to_adev(dev);
1621         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1622         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1623         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1624         u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1625         struct hdmi_avi_infoframe frame;
1626         ssize_t err;
1627         u32 tmp;
1628         int bpc = 8;
1629
1630         if (!dig || !dig->afmt)
1631                 return;
1632
1633         /* Silent, r600_hdmi_enable will raise WARN for us */
1634         if (!dig->afmt->enabled)
1635                 return;
1636
1637         /* hdmi deep color mode general control packets setup, if bpc > 8 */
1638         if (encoder->crtc) {
1639                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1640                 bpc = amdgpu_crtc->bpc;
1641         }
1642
1643         /* disable audio prior to setting up hw */
1644         dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
1645         dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1646
1647         dce_v11_0_audio_set_dto(encoder, mode->clock);
1648
1649         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1650         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1651         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1652
1653         WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1654
1655         tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1656         switch (bpc) {
1657         case 0:
1658         case 6:
1659         case 8:
1660         case 16:
1661         default:
1662                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1663                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1664                 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1665                           connector->name, bpc);
1666                 break;
1667         case 10:
1668                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1669                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1670                 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1671                           connector->name);
1672                 break;
1673         case 12:
1674                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1675                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1676                 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1677                           connector->name);
1678                 break;
1679         }
1680         WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1681
1682         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1683         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1684         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1685         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1686         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1687
1688         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1689         /* enable audio info frames (frames won't be set until audio is enabled) */
1690         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1691         /* required for audio info values to be updated */
1692         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1693         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1694
1695         tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1696         /* required for audio info values to be updated */
1697         tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1698         WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1699
1700         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1701         /* anything other than 0 */
1702         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1703         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1704
1705         WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1706
1707         tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1708         /* set the default audio delay */
1709         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1710         /* should be suffient for all audio modes and small enough for all hblanks */
1711         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1712         WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1713
1714         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1715         /* allow 60958 channel status fields to be updated */
1716         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1717         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1718
1719         tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1720         if (bpc > 8)
1721                 /* clear SW CTS value */
1722                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1723         else
1724                 /* select SW CTS value */
1725                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1726         /* allow hw to sent ACR packets when required */
1727         tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1728         WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1729
1730         dce_v11_0_afmt_update_ACR(encoder, mode->clock);
1731
1732         tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1733         tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1734         WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1735
1736         tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1737         tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1738         WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1739
1740         tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1741         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1742         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1743         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1744         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1745         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1746         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1747         WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1748
1749         dce_v11_0_audio_write_speaker_allocation(encoder);
1750
1751         WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1752                (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1753
1754         dce_v11_0_afmt_audio_select_pin(encoder);
1755         dce_v11_0_audio_write_sad_regs(encoder);
1756         dce_v11_0_audio_write_latency_fields(encoder, mode);
1757
1758         err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1759         if (err < 0) {
1760                 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1761                 return;
1762         }
1763
1764         err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1765         if (err < 0) {
1766                 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1767                 return;
1768         }
1769
1770         dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1771
1772         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1773         /* enable AVI info frames */
1774         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1775         /* required for audio info values to be updated */
1776         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1777         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1778
1779         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1780         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1781         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1782
1783         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1784         /* send audio packets */
1785         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1786         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1787
1788         WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1789         WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1790         WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1791         WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1792
1793         /* enable audio after to setting up hw */
1794         dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
1795 }
1796
1797 static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1798 {
1799         struct drm_device *dev = encoder->dev;
1800         struct amdgpu_device *adev = drm_to_adev(dev);
1801         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1802         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1803
1804         if (!dig || !dig->afmt)
1805                 return;
1806
1807         /* Silent, r600_hdmi_enable will raise WARN for us */
1808         if (enable && dig->afmt->enabled)
1809                 return;
1810         if (!enable && !dig->afmt->enabled)
1811                 return;
1812
1813         if (!enable && dig->afmt->pin) {
1814                 dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1815                 dig->afmt->pin = NULL;
1816         }
1817
1818         dig->afmt->enabled = enable;
1819
1820         DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1821                   enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1822 }
1823
1824 static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
1825 {
1826         int i;
1827
1828         for (i = 0; i < adev->mode_info.num_dig; i++)
1829                 adev->mode_info.afmt[i] = NULL;
1830
1831         /* DCE11 has audio blocks tied to DIG encoders */
1832         for (i = 0; i < adev->mode_info.num_dig; i++) {
1833                 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1834                 if (adev->mode_info.afmt[i]) {
1835                         adev->mode_info.afmt[i]->offset = dig_offsets[i];
1836                         adev->mode_info.afmt[i]->id = i;
1837                 } else {
1838                         int j;
1839                         for (j = 0; j < i; j++) {
1840                                 kfree(adev->mode_info.afmt[j]);
1841                                 adev->mode_info.afmt[j] = NULL;
1842                         }
1843                         return -ENOMEM;
1844                 }
1845         }
1846         return 0;
1847 }
1848
1849 static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
1850 {
1851         int i;
1852
1853         for (i = 0; i < adev->mode_info.num_dig; i++) {
1854                 kfree(adev->mode_info.afmt[i]);
1855                 adev->mode_info.afmt[i] = NULL;
1856         }
1857 }
1858
1859 static const u32 vga_control_regs[6] =
1860 {
1861         mmD1VGA_CONTROL,
1862         mmD2VGA_CONTROL,
1863         mmD3VGA_CONTROL,
1864         mmD4VGA_CONTROL,
1865         mmD5VGA_CONTROL,
1866         mmD6VGA_CONTROL,
1867 };
1868
1869 static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
1870 {
1871         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1872         struct drm_device *dev = crtc->dev;
1873         struct amdgpu_device *adev = drm_to_adev(dev);
1874         u32 vga_control;
1875
1876         vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1877         if (enable)
1878                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1879         else
1880                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1881 }
1882
1883 static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
1884 {
1885         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1886         struct drm_device *dev = crtc->dev;
1887         struct amdgpu_device *adev = drm_to_adev(dev);
1888
1889         if (enable)
1890                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1891         else
1892                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1893 }
1894
1895 static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
1896                                      struct drm_framebuffer *fb,
1897                                      int x, int y, int atomic)
1898 {
1899         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1900         struct drm_device *dev = crtc->dev;
1901         struct amdgpu_device *adev = drm_to_adev(dev);
1902         struct drm_framebuffer *target_fb;
1903         struct drm_gem_object *obj;
1904         struct amdgpu_bo *abo;
1905         uint64_t fb_location, tiling_flags;
1906         uint32_t fb_format, fb_pitch_pixels;
1907         u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
1908         u32 pipe_config;
1909         u32 tmp, viewport_w, viewport_h;
1910         int r;
1911         bool bypass_lut = false;
1912
1913         /* no fb bound */
1914         if (!atomic && !crtc->primary->fb) {
1915                 DRM_DEBUG_KMS("No FB bound\n");
1916                 return 0;
1917         }
1918
1919         if (atomic)
1920                 target_fb = fb;
1921         else
1922                 target_fb = crtc->primary->fb;
1923
1924         /* If atomic, assume fb object is pinned & idle & fenced and
1925          * just update base pointers
1926          */
1927         obj = target_fb->obj[0];
1928         abo = gem_to_amdgpu_bo(obj);
1929         r = amdgpu_bo_reserve(abo, false);
1930         if (unlikely(r != 0))
1931                 return r;
1932
1933         if (!atomic) {
1934                 abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
1935                 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1936                 if (unlikely(r != 0)) {
1937                         amdgpu_bo_unreserve(abo);
1938                         return -EINVAL;
1939                 }
1940         }
1941         fb_location = amdgpu_bo_gpu_offset(abo);
1942
1943         amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1944         amdgpu_bo_unreserve(abo);
1945
1946         pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1947
1948         switch (target_fb->format->format) {
1949         case DRM_FORMAT_C8:
1950                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
1951                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1952                 break;
1953         case DRM_FORMAT_XRGB4444:
1954         case DRM_FORMAT_ARGB4444:
1955                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1956                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
1957 #ifdef __BIG_ENDIAN
1958                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1959                                         ENDIAN_8IN16);
1960 #endif
1961                 break;
1962         case DRM_FORMAT_XRGB1555:
1963         case DRM_FORMAT_ARGB1555:
1964                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1965                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1966 #ifdef __BIG_ENDIAN
1967                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1968                                         ENDIAN_8IN16);
1969 #endif
1970                 break;
1971         case DRM_FORMAT_BGRX5551:
1972         case DRM_FORMAT_BGRA5551:
1973                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1974                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
1975 #ifdef __BIG_ENDIAN
1976                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1977                                         ENDIAN_8IN16);
1978 #endif
1979                 break;
1980         case DRM_FORMAT_RGB565:
1981                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1982                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1983 #ifdef __BIG_ENDIAN
1984                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1985                                         ENDIAN_8IN16);
1986 #endif
1987                 break;
1988         case DRM_FORMAT_XRGB8888:
1989         case DRM_FORMAT_ARGB8888:
1990                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1991                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1992 #ifdef __BIG_ENDIAN
1993                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1994                                         ENDIAN_8IN32);
1995 #endif
1996                 break;
1997         case DRM_FORMAT_XRGB2101010:
1998         case DRM_FORMAT_ARGB2101010:
1999                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2000                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
2001 #ifdef __BIG_ENDIAN
2002                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2003                                         ENDIAN_8IN32);
2004 #endif
2005                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2006                 bypass_lut = true;
2007                 break;
2008         case DRM_FORMAT_BGRX1010102:
2009         case DRM_FORMAT_BGRA1010102:
2010                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2011                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
2012 #ifdef __BIG_ENDIAN
2013                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2014                                         ENDIAN_8IN32);
2015 #endif
2016                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
2017                 bypass_lut = true;
2018                 break;
2019         case DRM_FORMAT_XBGR8888:
2020         case DRM_FORMAT_ABGR8888:
2021                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
2022                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
2023                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
2024                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
2025 #ifdef __BIG_ENDIAN
2026                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2027                                         ENDIAN_8IN32);
2028 #endif
2029                 break;
2030         default:
2031                 DRM_ERROR("Unsupported screen format %p4cc\n",
2032                           &target_fb->format->format);
2033                 return -EINVAL;
2034         }
2035
2036         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2037                 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2038
2039                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2040                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2041                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2042                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2043                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2044
2045                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
2046                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2047                                           ARRAY_2D_TILED_THIN1);
2048                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2049                                           tile_split);
2050                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2051                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2052                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2053                                           mtaspect);
2054                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2055                                           ADDR_SURF_MICRO_TILING_DISPLAY);
2056         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2057                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2058                                           ARRAY_1D_TILED_THIN1);
2059         }
2060
2061         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2062                                   pipe_config);
2063
2064         dce_v11_0_vga_enable(crtc, false);
2065
2066         /* Make sure surface address is updated at vertical blank rather than
2067          * horizontal blank
2068          */
2069         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2070         tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2071                             GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2072         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2073
2074         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2075                upper_32_bits(fb_location));
2076         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2077                upper_32_bits(fb_location));
2078         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2079                (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2080         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2081                (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2082         WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2083         WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2084
2085         /*
2086          * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2087          * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2088          * retain the full precision throughout the pipeline.
2089          */
2090         tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2091         if (bypass_lut)
2092                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2093         else
2094                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2095         WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2096
2097         if (bypass_lut)
2098                 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2099
2100         WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2101         WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2102         WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2103         WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2104         WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2105         WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2106
2107         fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2108         WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2109
2110         dce_v11_0_grph_enable(crtc, true);
2111
2112         WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2113                target_fb->height);
2114
2115         x &= ~3;
2116         y &= ~1;
2117         WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2118                (x << 16) | y);
2119         viewport_w = crtc->mode.hdisplay;
2120         viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2121         WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2122                (viewport_w << 16) | viewport_h);
2123
2124         /* set pageflip to happen anywhere in vblank interval */
2125         WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2126
2127         if (!atomic && fb && fb != crtc->primary->fb) {
2128                 abo = gem_to_amdgpu_bo(fb->obj[0]);
2129                 r = amdgpu_bo_reserve(abo, true);
2130                 if (unlikely(r != 0))
2131                         return r;
2132                 amdgpu_bo_unpin(abo);
2133                 amdgpu_bo_unreserve(abo);
2134         }
2135
2136         /* Bytes per pixel may have changed */
2137         dce_v11_0_bandwidth_update(adev);
2138
2139         return 0;
2140 }
2141
2142 static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
2143                                      struct drm_display_mode *mode)
2144 {
2145         struct drm_device *dev = crtc->dev;
2146         struct amdgpu_device *adev = drm_to_adev(dev);
2147         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2148         u32 tmp;
2149
2150         tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2151         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2152                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2153         else
2154                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2155         WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2156 }
2157
2158 static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
2159 {
2160         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2161         struct drm_device *dev = crtc->dev;
2162         struct amdgpu_device *adev = drm_to_adev(dev);
2163         u16 *r, *g, *b;
2164         int i;
2165         u32 tmp;
2166
2167         DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2168
2169         tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2170         tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2171         WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2172
2173         tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2174         tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2175         WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2176
2177         tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2178         tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2179         WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2180
2181         WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2182
2183         WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2184         WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2185         WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2186
2187         WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2188         WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2189         WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2190
2191         WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2192         WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2193
2194         WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2195         r = crtc->gamma_store;
2196         g = r + crtc->gamma_size;
2197         b = g + crtc->gamma_size;
2198         for (i = 0; i < 256; i++) {
2199                 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2200                        ((*r++ & 0xffc0) << 14) |
2201                        ((*g++ & 0xffc0) << 4) |
2202                        (*b++ >> 6));
2203         }
2204
2205         tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2206         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2207         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2208         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
2209         WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2210
2211         tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2212         tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2213         WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2214
2215         tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2216         tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2217         WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2218
2219         tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2220         tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2221         WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2222
2223         /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2224         WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2225         /* XXX this only needs to be programmed once per crtc at startup,
2226          * not sure where the best place for it is
2227          */
2228         tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2229         tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2230         WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2231 }
2232
2233 static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
2234 {
2235         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2236         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2237
2238         switch (amdgpu_encoder->encoder_id) {
2239         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2240                 if (dig->linkb)
2241                         return 1;
2242                 else
2243                         return 0;
2244         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2245                 if (dig->linkb)
2246                         return 3;
2247                 else
2248                         return 2;
2249         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2250                 if (dig->linkb)
2251                         return 5;
2252                 else
2253                         return 4;
2254         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2255                 return 6;
2256         default:
2257                 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2258                 return 0;
2259         }
2260 }
2261
2262 /**
2263  * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
2264  *
2265  * @crtc: drm crtc
2266  *
2267  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2268  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2269  * monitors a dedicated PPLL must be used.  If a particular board has
2270  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2271  * as there is no need to program the PLL itself.  If we are not able to
2272  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2273  * avoid messing up an existing monitor.
2274  *
2275  * Asic specific PLL information
2276  *
2277  * DCE 10.x
2278  * Tonga
2279  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2280  * CI
2281  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2282  *
2283  */
2284 static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
2285 {
2286         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2287         struct drm_device *dev = crtc->dev;
2288         struct amdgpu_device *adev = drm_to_adev(dev);
2289         u32 pll_in_use;
2290         int pll;
2291
2292         if ((adev->asic_type == CHIP_POLARIS10) ||
2293             (adev->asic_type == CHIP_POLARIS11) ||
2294             (adev->asic_type == CHIP_POLARIS12) ||
2295             (adev->asic_type == CHIP_VEGAM)) {
2296                 struct amdgpu_encoder *amdgpu_encoder =
2297                         to_amdgpu_encoder(amdgpu_crtc->encoder);
2298                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2299
2300                 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2301                         return ATOM_DP_DTO;
2302
2303                 switch (amdgpu_encoder->encoder_id) {
2304                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2305                         if (dig->linkb)
2306                                 return ATOM_COMBOPHY_PLL1;
2307                         else
2308                                 return ATOM_COMBOPHY_PLL0;
2309                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2310                         if (dig->linkb)
2311                                 return ATOM_COMBOPHY_PLL3;
2312                         else
2313                                 return ATOM_COMBOPHY_PLL2;
2314                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2315                         if (dig->linkb)
2316                                 return ATOM_COMBOPHY_PLL5;
2317                         else
2318                                 return ATOM_COMBOPHY_PLL4;
2319                 default:
2320                         DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2321                         return ATOM_PPLL_INVALID;
2322                 }
2323         }
2324
2325         if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2326                 if (adev->clock.dp_extclk)
2327                         /* skip PPLL programming if using ext clock */
2328                         return ATOM_PPLL_INVALID;
2329                 else {
2330                         /* use the same PPLL for all DP monitors */
2331                         pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2332                         if (pll != ATOM_PPLL_INVALID)
2333                                 return pll;
2334                 }
2335         } else {
2336                 /* use the same PPLL for all monitors with the same clock */
2337                 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2338                 if (pll != ATOM_PPLL_INVALID)
2339                         return pll;
2340         }
2341
2342         /* XXX need to determine what plls are available on each DCE11 part */
2343         pll_in_use = amdgpu_pll_get_use_mask(crtc);
2344         if (adev->flags & AMD_IS_APU) {
2345                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2346                         return ATOM_PPLL1;
2347                 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2348                         return ATOM_PPLL0;
2349                 DRM_ERROR("unable to allocate a PPLL\n");
2350                 return ATOM_PPLL_INVALID;
2351         } else {
2352                 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2353                         return ATOM_PPLL2;
2354                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2355                         return ATOM_PPLL1;
2356                 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2357                         return ATOM_PPLL0;
2358                 DRM_ERROR("unable to allocate a PPLL\n");
2359                 return ATOM_PPLL_INVALID;
2360         }
2361         return ATOM_PPLL_INVALID;
2362 }
2363
2364 static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2365 {
2366         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2367         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2368         uint32_t cur_lock;
2369
2370         cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2371         if (lock)
2372                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2373         else
2374                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2375         WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2376 }
2377
2378 static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
2379 {
2380         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2381         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2382         u32 tmp;
2383
2384         tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2385         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2386         WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2387 }
2388
2389 static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
2390 {
2391         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2392         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2393         u32 tmp;
2394
2395         WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2396                upper_32_bits(amdgpu_crtc->cursor_addr));
2397         WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2398                lower_32_bits(amdgpu_crtc->cursor_addr));
2399
2400         tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2401         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2402         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2403         WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2404 }
2405
2406 static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
2407                                         int x, int y)
2408 {
2409         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2410         struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2411         int xorigin = 0, yorigin = 0;
2412
2413         amdgpu_crtc->cursor_x = x;
2414         amdgpu_crtc->cursor_y = y;
2415
2416         /* avivo cursor are offset into the total surface */
2417         x += crtc->x;
2418         y += crtc->y;
2419         DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2420
2421         if (x < 0) {
2422                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2423                 x = 0;
2424         }
2425         if (y < 0) {
2426                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2427                 y = 0;
2428         }
2429
2430         WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2431         WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2432         WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2433                ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2434
2435         return 0;
2436 }
2437
2438 static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
2439                                       int x, int y)
2440 {
2441         int ret;
2442
2443         dce_v11_0_lock_cursor(crtc, true);
2444         ret = dce_v11_0_cursor_move_locked(crtc, x, y);
2445         dce_v11_0_lock_cursor(crtc, false);
2446
2447         return ret;
2448 }
2449
2450 static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2451                                       struct drm_file *file_priv,
2452                                       uint32_t handle,
2453                                       uint32_t width,
2454                                       uint32_t height,
2455                                       int32_t hot_x,
2456                                       int32_t hot_y)
2457 {
2458         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2459         struct drm_gem_object *obj;
2460         struct amdgpu_bo *aobj;
2461         int ret;
2462
2463         if (!handle) {
2464                 /* turn off cursor */
2465                 dce_v11_0_hide_cursor(crtc);
2466                 obj = NULL;
2467                 goto unpin;
2468         }
2469
2470         if ((width > amdgpu_crtc->max_cursor_width) ||
2471             (height > amdgpu_crtc->max_cursor_height)) {
2472                 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2473                 return -EINVAL;
2474         }
2475
2476         obj = drm_gem_object_lookup(file_priv, handle);
2477         if (!obj) {
2478                 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2479                 return -ENOENT;
2480         }
2481
2482         aobj = gem_to_amdgpu_bo(obj);
2483         ret = amdgpu_bo_reserve(aobj, false);
2484         if (ret != 0) {
2485                 drm_gem_object_put(obj);
2486                 return ret;
2487         }
2488
2489         aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
2490         ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2491         amdgpu_bo_unreserve(aobj);
2492         if (ret) {
2493                 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2494                 drm_gem_object_put(obj);
2495                 return ret;
2496         }
2497         amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2498
2499         dce_v11_0_lock_cursor(crtc, true);
2500
2501         if (width != amdgpu_crtc->cursor_width ||
2502             height != amdgpu_crtc->cursor_height ||
2503             hot_x != amdgpu_crtc->cursor_hot_x ||
2504             hot_y != amdgpu_crtc->cursor_hot_y) {
2505                 int x, y;
2506
2507                 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2508                 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2509
2510                 dce_v11_0_cursor_move_locked(crtc, x, y);
2511
2512                 amdgpu_crtc->cursor_width = width;
2513                 amdgpu_crtc->cursor_height = height;
2514                 amdgpu_crtc->cursor_hot_x = hot_x;
2515                 amdgpu_crtc->cursor_hot_y = hot_y;
2516         }
2517
2518         dce_v11_0_show_cursor(crtc);
2519         dce_v11_0_lock_cursor(crtc, false);
2520
2521 unpin:
2522         if (amdgpu_crtc->cursor_bo) {
2523                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2524                 ret = amdgpu_bo_reserve(aobj, true);
2525                 if (likely(ret == 0)) {
2526                         amdgpu_bo_unpin(aobj);
2527                         amdgpu_bo_unreserve(aobj);
2528                 }
2529                 drm_gem_object_put(amdgpu_crtc->cursor_bo);
2530         }
2531
2532         amdgpu_crtc->cursor_bo = obj;
2533         return 0;
2534 }
2535
2536 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2537 {
2538         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2539
2540         if (amdgpu_crtc->cursor_bo) {
2541                 dce_v11_0_lock_cursor(crtc, true);
2542
2543                 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2544                                              amdgpu_crtc->cursor_y);
2545
2546                 dce_v11_0_show_cursor(crtc);
2547
2548                 dce_v11_0_lock_cursor(crtc, false);
2549         }
2550 }
2551
2552 static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2553                                     u16 *blue, uint32_t size,
2554                                     struct drm_modeset_acquire_ctx *ctx)
2555 {
2556         dce_v11_0_crtc_load_lut(crtc);
2557
2558         return 0;
2559 }
2560
2561 static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
2562 {
2563         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2564
2565         drm_crtc_cleanup(crtc);
2566         kfree(amdgpu_crtc);
2567 }
2568
2569 static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
2570         .cursor_set2 = dce_v11_0_crtc_cursor_set2,
2571         .cursor_move = dce_v11_0_crtc_cursor_move,
2572         .gamma_set = dce_v11_0_crtc_gamma_set,
2573         .set_config = amdgpu_display_crtc_set_config,
2574         .destroy = dce_v11_0_crtc_destroy,
2575         .page_flip_target = amdgpu_display_crtc_page_flip_target,
2576         .get_vblank_counter = amdgpu_get_vblank_counter_kms,
2577         .enable_vblank = amdgpu_enable_vblank_kms,
2578         .disable_vblank = amdgpu_disable_vblank_kms,
2579         .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2580 };
2581
2582 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2583 {
2584         struct drm_device *dev = crtc->dev;
2585         struct amdgpu_device *adev = drm_to_adev(dev);
2586         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2587         unsigned type;
2588
2589         switch (mode) {
2590         case DRM_MODE_DPMS_ON:
2591                 amdgpu_crtc->enabled = true;
2592                 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2593                 dce_v11_0_vga_enable(crtc, true);
2594                 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2595                 dce_v11_0_vga_enable(crtc, false);
2596                 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2597                 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2598                                                 amdgpu_crtc->crtc_id);
2599                 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2600                 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2601                 drm_crtc_vblank_on(crtc);
2602                 dce_v11_0_crtc_load_lut(crtc);
2603                 break;
2604         case DRM_MODE_DPMS_STANDBY:
2605         case DRM_MODE_DPMS_SUSPEND:
2606         case DRM_MODE_DPMS_OFF:
2607                 drm_crtc_vblank_off(crtc);
2608                 if (amdgpu_crtc->enabled) {
2609                         dce_v11_0_vga_enable(crtc, true);
2610                         amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2611                         dce_v11_0_vga_enable(crtc, false);
2612                 }
2613                 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2614                 amdgpu_crtc->enabled = false;
2615                 break;
2616         }
2617         /* adjust pm to dpms */
2618         amdgpu_dpm_compute_clocks(adev);
2619 }
2620
2621 static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
2622 {
2623         /* disable crtc pair power gating before programming */
2624         amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2625         amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2626         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2627 }
2628
2629 static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
2630 {
2631         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2632         amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2633 }
2634
2635 static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
2636 {
2637         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2638         struct drm_device *dev = crtc->dev;
2639         struct amdgpu_device *adev = drm_to_adev(dev);
2640         struct amdgpu_atom_ss ss;
2641         int i;
2642
2643         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2644         if (crtc->primary->fb) {
2645                 int r;
2646                 struct amdgpu_bo *abo;
2647
2648                 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2649                 r = amdgpu_bo_reserve(abo, true);
2650                 if (unlikely(r))
2651                         DRM_ERROR("failed to reserve abo before unpin\n");
2652                 else {
2653                         amdgpu_bo_unpin(abo);
2654                         amdgpu_bo_unreserve(abo);
2655                 }
2656         }
2657         /* disable the GRPH */
2658         dce_v11_0_grph_enable(crtc, false);
2659
2660         amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2661
2662         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2663                 if (adev->mode_info.crtcs[i] &&
2664                     adev->mode_info.crtcs[i]->enabled &&
2665                     i != amdgpu_crtc->crtc_id &&
2666                     amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2667                         /* one other crtc is using this pll don't turn
2668                          * off the pll
2669                          */
2670                         goto done;
2671                 }
2672         }
2673
2674         switch (amdgpu_crtc->pll_id) {
2675         case ATOM_PPLL0:
2676         case ATOM_PPLL1:
2677         case ATOM_PPLL2:
2678                 /* disable the ppll */
2679                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2680                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2681                 break;
2682         case ATOM_COMBOPHY_PLL0:
2683         case ATOM_COMBOPHY_PLL1:
2684         case ATOM_COMBOPHY_PLL2:
2685         case ATOM_COMBOPHY_PLL3:
2686         case ATOM_COMBOPHY_PLL4:
2687         case ATOM_COMBOPHY_PLL5:
2688                 /* disable the ppll */
2689                 amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
2690                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2691                 break;
2692         default:
2693                 break;
2694         }
2695 done:
2696         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2697         amdgpu_crtc->adjusted_clock = 0;
2698         amdgpu_crtc->encoder = NULL;
2699         amdgpu_crtc->connector = NULL;
2700 }
2701
2702 static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
2703                                   struct drm_display_mode *mode,
2704                                   struct drm_display_mode *adjusted_mode,
2705                                   int x, int y, struct drm_framebuffer *old_fb)
2706 {
2707         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2708         struct drm_device *dev = crtc->dev;
2709         struct amdgpu_device *adev = drm_to_adev(dev);
2710
2711         if (!amdgpu_crtc->adjusted_clock)
2712                 return -EINVAL;
2713
2714         if ((adev->asic_type == CHIP_POLARIS10) ||
2715             (adev->asic_type == CHIP_POLARIS11) ||
2716             (adev->asic_type == CHIP_POLARIS12) ||
2717             (adev->asic_type == CHIP_VEGAM)) {
2718                 struct amdgpu_encoder *amdgpu_encoder =
2719                         to_amdgpu_encoder(amdgpu_crtc->encoder);
2720                 int encoder_mode =
2721                         amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
2722
2723                 /* SetPixelClock calculates the plls and ss values now */
2724                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
2725                                                  amdgpu_crtc->pll_id,
2726                                                  encoder_mode, amdgpu_encoder->encoder_id,
2727                                                  adjusted_mode->clock, 0, 0, 0, 0,
2728                                                  amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
2729         } else {
2730                 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2731         }
2732         amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2733         dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2734         amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2735         amdgpu_atombios_crtc_scaler_setup(crtc);
2736         dce_v11_0_cursor_reset(crtc);
2737         /* update the hw version fpr dpm */
2738         amdgpu_crtc->hw_mode = *adjusted_mode;
2739
2740         return 0;
2741 }
2742
2743 static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
2744                                      const struct drm_display_mode *mode,
2745                                      struct drm_display_mode *adjusted_mode)
2746 {
2747         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2748         struct drm_device *dev = crtc->dev;
2749         struct drm_encoder *encoder;
2750
2751         /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2752         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2753                 if (encoder->crtc == crtc) {
2754                         amdgpu_crtc->encoder = encoder;
2755                         amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2756                         break;
2757                 }
2758         }
2759         if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2760                 amdgpu_crtc->encoder = NULL;
2761                 amdgpu_crtc->connector = NULL;
2762                 return false;
2763         }
2764         if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2765                 return false;
2766         if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2767                 return false;
2768         /* pick pll */
2769         amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
2770         /* if we can't get a PPLL for a non-DP encoder, fail */
2771         if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2772             !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2773                 return false;
2774
2775         return true;
2776 }
2777
2778 static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2779                                   struct drm_framebuffer *old_fb)
2780 {
2781         return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2782 }
2783
2784 static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2785                                          struct drm_framebuffer *fb,
2786                                          int x, int y, enum mode_set_atomic state)
2787 {
2788         return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
2789 }
2790
2791 static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
2792         .dpms = dce_v11_0_crtc_dpms,
2793         .mode_fixup = dce_v11_0_crtc_mode_fixup,
2794         .mode_set = dce_v11_0_crtc_mode_set,
2795         .mode_set_base = dce_v11_0_crtc_set_base,
2796         .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
2797         .prepare = dce_v11_0_crtc_prepare,
2798         .commit = dce_v11_0_crtc_commit,
2799         .disable = dce_v11_0_crtc_disable,
2800         .get_scanout_position = amdgpu_crtc_get_scanout_position,
2801 };
2802
2803 static void dce_v11_0_panic_flush(struct drm_plane *plane)
2804 {
2805         struct drm_framebuffer *fb;
2806         struct amdgpu_crtc *amdgpu_crtc;
2807         struct amdgpu_device *adev;
2808         uint32_t fb_format;
2809
2810         if (!plane->fb)
2811                 return;
2812
2813         fb = plane->fb;
2814         amdgpu_crtc = to_amdgpu_crtc(plane->crtc);
2815         adev = drm_to_adev(fb->dev);
2816
2817         /* Disable DC tiling */
2818         fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset);
2819         fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK;
2820         WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2821
2822 }
2823
2824 static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = {
2825         .get_scanout_buffer = amdgpu_display_get_scanout_buffer,
2826         .panic_flush = dce_v11_0_panic_flush,
2827 };
2828
2829 static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
2830 {
2831         struct amdgpu_crtc *amdgpu_crtc;
2832
2833         amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2834                               (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2835         if (amdgpu_crtc == NULL)
2836                 return -ENOMEM;
2837
2838         drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
2839
2840         drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2841         amdgpu_crtc->crtc_id = index;
2842         adev->mode_info.crtcs[index] = amdgpu_crtc;
2843
2844         amdgpu_crtc->max_cursor_width = 128;
2845         amdgpu_crtc->max_cursor_height = 128;
2846         adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2847         adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2848
2849         switch (amdgpu_crtc->crtc_id) {
2850         case 0:
2851         default:
2852                 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2853                 break;
2854         case 1:
2855                 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2856                 break;
2857         case 2:
2858                 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2859                 break;
2860         case 3:
2861                 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2862                 break;
2863         case 4:
2864                 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2865                 break;
2866         case 5:
2867                 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2868                 break;
2869         }
2870
2871         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2872         amdgpu_crtc->adjusted_clock = 0;
2873         amdgpu_crtc->encoder = NULL;
2874         amdgpu_crtc->connector = NULL;
2875         drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
2876         drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs);
2877
2878         return 0;
2879 }
2880
2881 static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block)
2882 {
2883         struct amdgpu_device *adev = ip_block->adev;
2884
2885         adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
2886         adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
2887
2888         dce_v11_0_set_display_funcs(adev);
2889
2890         adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
2891
2892         switch (adev->asic_type) {
2893         case CHIP_CARRIZO:
2894                 adev->mode_info.num_hpd = 6;
2895                 adev->mode_info.num_dig = 9;
2896                 break;
2897         case CHIP_STONEY:
2898                 adev->mode_info.num_hpd = 6;
2899                 adev->mode_info.num_dig = 9;
2900                 break;
2901         case CHIP_POLARIS10:
2902         case CHIP_VEGAM:
2903                 adev->mode_info.num_hpd = 6;
2904                 adev->mode_info.num_dig = 6;
2905                 break;
2906         case CHIP_POLARIS11:
2907         case CHIP_POLARIS12:
2908                 adev->mode_info.num_hpd = 5;
2909                 adev->mode_info.num_dig = 5;
2910                 break;
2911         default:
2912                 /* FIXME: not supported yet */
2913                 return -EINVAL;
2914         }
2915
2916         dce_v11_0_set_irq_funcs(adev);
2917
2918         return 0;
2919 }
2920
2921 static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block)
2922 {
2923         int r, i;
2924         struct amdgpu_device *adev = ip_block->adev;
2925
2926         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2927                 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2928                 if (r)
2929                         return r;
2930         }
2931
2932         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2933                 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2934                 if (r)
2935                         return r;
2936         }
2937
2938         /* HPD hotplug */
2939         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2940         if (r)
2941                 return r;
2942
2943         adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2944
2945         adev_to_drm(adev)->mode_config.async_page_flip = true;
2946
2947         adev_to_drm(adev)->mode_config.max_width = 16384;
2948         adev_to_drm(adev)->mode_config.max_height = 16384;
2949
2950         adev_to_drm(adev)->mode_config.preferred_depth = 24;
2951         adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2952
2953         adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2954
2955         r = amdgpu_display_modeset_create_props(adev);
2956         if (r)
2957                 return r;
2958
2959         adev_to_drm(adev)->mode_config.max_width = 16384;
2960         adev_to_drm(adev)->mode_config.max_height = 16384;
2961
2962
2963         /* allocate crtcs */
2964         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2965                 r = dce_v11_0_crtc_init(adev, i);
2966                 if (r)
2967                         return r;
2968         }
2969
2970         if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2971                 amdgpu_display_print_display_setup(adev_to_drm(adev));
2972         else
2973                 return -EINVAL;
2974
2975         /* setup afmt */
2976         r = dce_v11_0_afmt_init(adev);
2977         if (r)
2978                 return r;
2979
2980         r = dce_v11_0_audio_init(adev);
2981         if (r)
2982                 return r;
2983
2984         /* Disable vblank IRQs aggressively for power-saving */
2985         /* XXX: can this be enabled for DC? */
2986         adev_to_drm(adev)->vblank_disable_immediate = true;
2987
2988         r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2989         if (r)
2990                 return r;
2991
2992         INIT_DELAYED_WORK(&adev->hotplug_work,
2993                   amdgpu_display_hotplug_work_func);
2994
2995         drm_kms_helper_poll_init(adev_to_drm(adev));
2996
2997         adev->mode_info.mode_config_initialized = true;
2998         return 0;
2999 }
3000
3001 static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block)
3002 {
3003         struct amdgpu_device *adev = ip_block->adev;
3004
3005         drm_edid_free(adev->mode_info.bios_hardcoded_edid);
3006
3007         drm_kms_helper_poll_fini(adev_to_drm(adev));
3008
3009         dce_v11_0_audio_fini(adev);
3010
3011         dce_v11_0_afmt_fini(adev);
3012
3013         drm_mode_config_cleanup(adev_to_drm(adev));
3014         adev->mode_info.mode_config_initialized = false;
3015
3016         return 0;
3017 }
3018
3019 static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
3020 {
3021         int i;
3022         struct amdgpu_device *adev = ip_block->adev;
3023
3024         dce_v11_0_init_golden_registers(adev);
3025
3026         /* disable vga render */
3027         dce_v11_0_set_vga_render_state(adev, false);
3028         /* init dig PHYs, disp eng pll */
3029         amdgpu_atombios_crtc_powergate_init(adev);
3030         amdgpu_atombios_encoder_init_dig(adev);
3031         if ((adev->asic_type == CHIP_POLARIS10) ||
3032             (adev->asic_type == CHIP_POLARIS11) ||
3033             (adev->asic_type == CHIP_POLARIS12) ||
3034             (adev->asic_type == CHIP_VEGAM)) {
3035                 amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
3036                                                    DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
3037                 amdgpu_atombios_crtc_set_dce_clock(adev, 0,
3038                                                    DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
3039         } else {
3040                 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
3041         }
3042
3043         /* initialize hpd */
3044         dce_v11_0_hpd_init(adev);
3045
3046         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3047                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3048         }
3049
3050         dce_v11_0_pageflip_interrupt_init(adev);
3051
3052         return 0;
3053 }
3054
3055 static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block)
3056 {
3057         int i;
3058         struct amdgpu_device *adev = ip_block->adev;
3059
3060         dce_v11_0_hpd_fini(adev);
3061
3062         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3063                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3064         }
3065
3066         dce_v11_0_pageflip_interrupt_fini(adev);
3067
3068         flush_delayed_work(&adev->hotplug_work);
3069
3070         return 0;
3071 }
3072
3073 static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block)
3074 {
3075         struct amdgpu_device *adev = ip_block->adev;
3076         int r;
3077
3078         r = amdgpu_display_suspend_helper(adev);
3079         if (r)
3080                 return r;
3081
3082         adev->mode_info.bl_level =
3083                 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
3084
3085         return dce_v11_0_hw_fini(ip_block);
3086 }
3087
3088 static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block)
3089 {
3090         struct amdgpu_device *adev = ip_block->adev;
3091         int ret;
3092
3093         amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
3094                                                            adev->mode_info.bl_level);
3095
3096         ret = dce_v11_0_hw_init(ip_block);
3097
3098         /* turn on the BL */
3099         if (adev->mode_info.bl_encoder) {
3100                 u8 bl_level = amdgpu_display_backlight_get_level(adev,
3101                                                                   adev->mode_info.bl_encoder);
3102                 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3103                                                     bl_level);
3104         }
3105         if (ret)
3106                 return ret;
3107
3108         return amdgpu_display_resume_helper(adev);
3109 }
3110
3111 static bool dce_v11_0_is_idle(void *handle)
3112 {
3113         return true;
3114 }
3115
3116 static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block)
3117 {
3118         u32 srbm_soft_reset = 0, tmp;
3119         struct amdgpu_device *adev = ip_block->adev;
3120
3121         if (dce_v11_0_is_display_hung(adev))
3122                 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3123
3124         if (srbm_soft_reset) {
3125                 tmp = RREG32(mmSRBM_SOFT_RESET);
3126                 tmp |= srbm_soft_reset;
3127                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3128                 WREG32(mmSRBM_SOFT_RESET, tmp);
3129                 tmp = RREG32(mmSRBM_SOFT_RESET);
3130
3131                 udelay(50);
3132
3133                 tmp &= ~srbm_soft_reset;
3134                 WREG32(mmSRBM_SOFT_RESET, tmp);
3135                 tmp = RREG32(mmSRBM_SOFT_RESET);
3136
3137                 /* Wait a little for things to settle down */
3138                 udelay(50);
3139         }
3140         return 0;
3141 }
3142
3143 static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3144                                                      int crtc,
3145                                                      enum amdgpu_interrupt_state state)
3146 {
3147         u32 lb_interrupt_mask;
3148
3149         if (crtc >= adev->mode_info.num_crtc) {
3150                 DRM_DEBUG("invalid crtc %d\n", crtc);
3151                 return;
3152         }
3153
3154         switch (state) {
3155         case AMDGPU_IRQ_STATE_DISABLE:
3156                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3157                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3158                                                   VBLANK_INTERRUPT_MASK, 0);
3159                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3160                 break;
3161         case AMDGPU_IRQ_STATE_ENABLE:
3162                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3163                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3164                                                   VBLANK_INTERRUPT_MASK, 1);
3165                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3166                 break;
3167         default:
3168                 break;
3169         }
3170 }
3171
3172 static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3173                                                     int crtc,
3174                                                     enum amdgpu_interrupt_state state)
3175 {
3176         u32 lb_interrupt_mask;
3177
3178         if (crtc >= adev->mode_info.num_crtc) {
3179                 DRM_DEBUG("invalid crtc %d\n", crtc);
3180                 return;
3181         }
3182
3183         switch (state) {
3184         case AMDGPU_IRQ_STATE_DISABLE:
3185                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3186                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3187                                                   VLINE_INTERRUPT_MASK, 0);
3188                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3189                 break;
3190         case AMDGPU_IRQ_STATE_ENABLE:
3191                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3192                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3193                                                   VLINE_INTERRUPT_MASK, 1);
3194                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3195                 break;
3196         default:
3197                 break;
3198         }
3199 }
3200
3201 static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
3202                                         struct amdgpu_irq_src *source,
3203                                         unsigned hpd,
3204                                         enum amdgpu_interrupt_state state)
3205 {
3206         u32 tmp;
3207
3208         if (hpd >= adev->mode_info.num_hpd) {
3209                 DRM_DEBUG("invalid hdp %d\n", hpd);
3210                 return 0;
3211         }
3212
3213         switch (state) {
3214         case AMDGPU_IRQ_STATE_DISABLE:
3215                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3216                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3217                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3218                 break;
3219         case AMDGPU_IRQ_STATE_ENABLE:
3220                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3221                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3222                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3223                 break;
3224         default:
3225                 break;
3226         }
3227
3228         return 0;
3229 }
3230
3231 static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
3232                                         struct amdgpu_irq_src *source,
3233                                         unsigned type,
3234                                         enum amdgpu_interrupt_state state)
3235 {
3236         switch (type) {
3237         case AMDGPU_CRTC_IRQ_VBLANK1:
3238                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3239                 break;
3240         case AMDGPU_CRTC_IRQ_VBLANK2:
3241                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3242                 break;
3243         case AMDGPU_CRTC_IRQ_VBLANK3:
3244                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3245                 break;
3246         case AMDGPU_CRTC_IRQ_VBLANK4:
3247                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3248                 break;
3249         case AMDGPU_CRTC_IRQ_VBLANK5:
3250                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3251                 break;
3252         case AMDGPU_CRTC_IRQ_VBLANK6:
3253                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3254                 break;
3255         case AMDGPU_CRTC_IRQ_VLINE1:
3256                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
3257                 break;
3258         case AMDGPU_CRTC_IRQ_VLINE2:
3259                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
3260                 break;
3261         case AMDGPU_CRTC_IRQ_VLINE3:
3262                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
3263                 break;
3264         case AMDGPU_CRTC_IRQ_VLINE4:
3265                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
3266                 break;
3267         case AMDGPU_CRTC_IRQ_VLINE5:
3268                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
3269                 break;
3270          case AMDGPU_CRTC_IRQ_VLINE6:
3271                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
3272                 break;
3273         default:
3274                 break;
3275         }
3276         return 0;
3277 }
3278
3279 static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3280                                             struct amdgpu_irq_src *src,
3281                                             unsigned type,
3282                                             enum amdgpu_interrupt_state state)
3283 {
3284         u32 reg;
3285
3286         if (type >= adev->mode_info.num_crtc) {
3287                 DRM_ERROR("invalid pageflip crtc %d\n", type);
3288                 return -EINVAL;
3289         }
3290
3291         reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3292         if (state == AMDGPU_IRQ_STATE_DISABLE)
3293                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3294                        reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3295         else
3296                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3297                        reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3298
3299         return 0;
3300 }
3301
3302 static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3303                                   struct amdgpu_irq_src *source,
3304                                   struct amdgpu_iv_entry *entry)
3305 {
3306         unsigned long flags;
3307         unsigned crtc_id;
3308         struct amdgpu_crtc *amdgpu_crtc;
3309         struct amdgpu_flip_work *works;
3310
3311         crtc_id = (entry->src_id - 8) >> 1;
3312         amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3313
3314         if (crtc_id >= adev->mode_info.num_crtc) {
3315                 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3316                 return -EINVAL;
3317         }
3318
3319         if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3320             GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3321                 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3322                        GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3323
3324         /* IRQ could occur when in initial stage */
3325         if(amdgpu_crtc == NULL)
3326                 return 0;
3327
3328         spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3329         works = amdgpu_crtc->pflip_works;
3330         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3331                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3332                                                  "AMDGPU_FLIP_SUBMITTED(%d)\n",
3333                                                  amdgpu_crtc->pflip_status,
3334                                                  AMDGPU_FLIP_SUBMITTED);
3335                 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3336                 return 0;
3337         }
3338
3339         /* page flip completed. clean up */
3340         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3341         amdgpu_crtc->pflip_works = NULL;
3342
3343         /* wakeup usersapce */
3344         if(works->event)
3345                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3346
3347         spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3348
3349         drm_crtc_vblank_put(&amdgpu_crtc->base);
3350         schedule_work(&works->unpin_work);
3351
3352         return 0;
3353 }
3354
3355 static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
3356                                   int hpd)
3357 {
3358         u32 tmp;
3359
3360         if (hpd >= adev->mode_info.num_hpd) {
3361                 DRM_DEBUG("invalid hdp %d\n", hpd);
3362                 return;
3363         }
3364
3365         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3366         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3367         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3368 }
3369
3370 static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3371                                           int crtc)
3372 {
3373         u32 tmp;
3374
3375         if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3376                 DRM_DEBUG("invalid crtc %d\n", crtc);
3377                 return;
3378         }
3379
3380         tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3381         tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3382         WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3383 }
3384
3385 static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3386                                          int crtc)
3387 {
3388         u32 tmp;
3389
3390         if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3391                 DRM_DEBUG("invalid crtc %d\n", crtc);
3392                 return;
3393         }
3394
3395         tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3396         tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3397         WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3398 }
3399
3400 static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3401                                 struct amdgpu_irq_src *source,
3402                                 struct amdgpu_iv_entry *entry)
3403 {
3404         unsigned crtc = entry->src_id - 1;
3405         uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3406         unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3407                                                                     crtc);
3408
3409         switch (entry->src_data[0]) {
3410         case 0: /* vblank */
3411                 if (disp_int & interrupt_status_offsets[crtc].vblank)
3412                         dce_v11_0_crtc_vblank_int_ack(adev, crtc);
3413                 else
3414                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3415
3416                 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3417                         drm_handle_vblank(adev_to_drm(adev), crtc);
3418                 }
3419                 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3420
3421                 break;
3422         case 1: /* vline */
3423                 if (disp_int & interrupt_status_offsets[crtc].vline)
3424                         dce_v11_0_crtc_vline_int_ack(adev, crtc);
3425                 else
3426                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3427
3428                 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3429
3430                 break;
3431         default:
3432                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3433                 break;
3434         }
3435
3436         return 0;
3437 }
3438
3439 static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
3440                              struct amdgpu_irq_src *source,
3441                              struct amdgpu_iv_entry *entry)
3442 {
3443         uint32_t disp_int, mask;
3444         unsigned hpd;
3445
3446         if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3447                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3448                 return 0;
3449         }
3450
3451         hpd = entry->src_data[0];
3452         disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3453         mask = interrupt_status_offsets[hpd].hpd;
3454
3455         if (disp_int & mask) {
3456                 dce_v11_0_hpd_int_ack(adev, hpd);
3457                 schedule_delayed_work(&adev->hotplug_work, 0);
3458                 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3459         }
3460
3461         return 0;
3462 }
3463
3464 static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
3465                                           enum amd_clockgating_state state)
3466 {
3467         return 0;
3468 }
3469
3470 static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
3471                                           enum amd_powergating_state state)
3472 {
3473         return 0;
3474 }
3475
3476 static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
3477         .name = "dce_v11_0",
3478         .early_init = dce_v11_0_early_init,
3479         .sw_init = dce_v11_0_sw_init,
3480         .sw_fini = dce_v11_0_sw_fini,
3481         .hw_init = dce_v11_0_hw_init,
3482         .hw_fini = dce_v11_0_hw_fini,
3483         .suspend = dce_v11_0_suspend,
3484         .resume = dce_v11_0_resume,
3485         .is_idle = dce_v11_0_is_idle,
3486         .soft_reset = dce_v11_0_soft_reset,
3487         .set_clockgating_state = dce_v11_0_set_clockgating_state,
3488         .set_powergating_state = dce_v11_0_set_powergating_state,
3489 };
3490
3491 static void
3492 dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
3493                           struct drm_display_mode *mode,
3494                           struct drm_display_mode *adjusted_mode)
3495 {
3496         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3497
3498         amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3499
3500         /* need to call this here rather than in prepare() since we need some crtc info */
3501         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3502
3503         /* set scaler clears this on some chips */
3504         dce_v11_0_set_interleave(encoder->crtc, mode);
3505
3506         if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3507                 dce_v11_0_afmt_enable(encoder, true);
3508                 dce_v11_0_afmt_setmode(encoder, adjusted_mode);
3509         }
3510 }
3511
3512 static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
3513 {
3514         struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3515         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3516         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3517
3518         if ((amdgpu_encoder->active_device &
3519              (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3520             (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3521              ENCODER_OBJECT_ID_NONE)) {
3522                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3523                 if (dig) {
3524                         dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
3525                         if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3526                                 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3527                 }
3528         }
3529
3530         amdgpu_atombios_scratch_regs_lock(adev, true);
3531
3532         if (connector) {
3533                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3534
3535                 /* select the clock/data port if it uses a router */
3536                 if (amdgpu_connector->router.cd_valid)
3537                         amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3538
3539                 /* turn eDP panel on for mode set */
3540                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3541                         amdgpu_atombios_encoder_set_edp_panel_power(connector,
3542                                                              ATOM_TRANSMITTER_ACTION_POWER_ON);
3543         }
3544
3545         /* this is needed for the pll/ss setup to work correctly in some cases */
3546         amdgpu_atombios_encoder_set_crtc_source(encoder);
3547         /* set up the FMT blocks */
3548         dce_v11_0_program_fmt(encoder);
3549 }
3550
3551 static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
3552 {
3553         struct drm_device *dev = encoder->dev;
3554         struct amdgpu_device *adev = drm_to_adev(dev);
3555
3556         /* need to call this here as we need the crtc set up */
3557         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3558         amdgpu_atombios_scratch_regs_lock(adev, false);
3559 }
3560
3561 static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
3562 {
3563         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3564         struct amdgpu_encoder_atom_dig *dig;
3565
3566         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3567
3568         if (amdgpu_atombios_encoder_is_digital(encoder)) {
3569                 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3570                         dce_v11_0_afmt_enable(encoder, false);
3571                 dig = amdgpu_encoder->enc_priv;
3572                 dig->dig_encoder = -1;
3573         }
3574         amdgpu_encoder->active_device = 0;
3575 }
3576
3577 /* these are handled by the primary encoders */
3578 static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
3579 {
3580
3581 }
3582
3583 static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
3584 {
3585
3586 }
3587
3588 static void
3589 dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
3590                       struct drm_display_mode *mode,
3591                       struct drm_display_mode *adjusted_mode)
3592 {
3593
3594 }
3595
3596 static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
3597 {
3598
3599 }
3600
3601 static void
3602 dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
3603 {
3604
3605 }
3606
3607 static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
3608         .dpms = dce_v11_0_ext_dpms,
3609         .prepare = dce_v11_0_ext_prepare,
3610         .mode_set = dce_v11_0_ext_mode_set,
3611         .commit = dce_v11_0_ext_commit,
3612         .disable = dce_v11_0_ext_disable,
3613         /* no detect for TMDS/LVDS yet */
3614 };
3615
3616 static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
3617         .dpms = amdgpu_atombios_encoder_dpms,
3618         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3619         .prepare = dce_v11_0_encoder_prepare,
3620         .mode_set = dce_v11_0_encoder_mode_set,
3621         .commit = dce_v11_0_encoder_commit,
3622         .disable = dce_v11_0_encoder_disable,
3623         .detect = amdgpu_atombios_encoder_dig_detect,
3624 };
3625
3626 static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
3627         .dpms = amdgpu_atombios_encoder_dpms,
3628         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3629         .prepare = dce_v11_0_encoder_prepare,
3630         .mode_set = dce_v11_0_encoder_mode_set,
3631         .commit = dce_v11_0_encoder_commit,
3632         .detect = amdgpu_atombios_encoder_dac_detect,
3633 };
3634
3635 static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
3636 {
3637         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3638         if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3639                 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3640         kfree(amdgpu_encoder->enc_priv);
3641         drm_encoder_cleanup(encoder);
3642         kfree(amdgpu_encoder);
3643 }
3644
3645 static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
3646         .destroy = dce_v11_0_encoder_destroy,
3647 };
3648
3649 static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3650                                  uint32_t encoder_enum,
3651                                  uint32_t supported_device,
3652                                  u16 caps)
3653 {
3654         struct drm_device *dev = adev_to_drm(adev);
3655         struct drm_encoder *encoder;
3656         struct amdgpu_encoder *amdgpu_encoder;
3657
3658         /* see if we already added it */
3659         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3660                 amdgpu_encoder = to_amdgpu_encoder(encoder);
3661                 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3662                         amdgpu_encoder->devices |= supported_device;
3663                         return;
3664                 }
3665
3666         }
3667
3668         /* add a new one */
3669         amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3670         if (!amdgpu_encoder)
3671                 return;
3672
3673         encoder = &amdgpu_encoder->base;
3674         switch (adev->mode_info.num_crtc) {
3675         case 1:
3676                 encoder->possible_crtcs = 0x1;
3677                 break;
3678         case 2:
3679         default:
3680                 encoder->possible_crtcs = 0x3;
3681                 break;
3682         case 3:
3683                 encoder->possible_crtcs = 0x7;
3684                 break;
3685         case 4:
3686                 encoder->possible_crtcs = 0xf;
3687                 break;
3688         case 5:
3689                 encoder->possible_crtcs = 0x1f;
3690                 break;
3691         case 6:
3692                 encoder->possible_crtcs = 0x3f;
3693                 break;
3694         }
3695
3696         amdgpu_encoder->enc_priv = NULL;
3697
3698         amdgpu_encoder->encoder_enum = encoder_enum;
3699         amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3700         amdgpu_encoder->devices = supported_device;
3701         amdgpu_encoder->rmx_type = RMX_OFF;
3702         amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3703         amdgpu_encoder->is_ext_encoder = false;
3704         amdgpu_encoder->caps = caps;
3705
3706         switch (amdgpu_encoder->encoder_id) {
3707         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3708         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3709                 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3710                                  DRM_MODE_ENCODER_DAC, NULL);
3711                 drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
3712                 break;
3713         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3714         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3715         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3716         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3717         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3718                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3719                         amdgpu_encoder->rmx_type = RMX_FULL;
3720                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3721                                          DRM_MODE_ENCODER_LVDS, NULL);
3722                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3723                 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3724                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3725                                          DRM_MODE_ENCODER_DAC, NULL);
3726                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3727                 } else {
3728                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3729                                          DRM_MODE_ENCODER_TMDS, NULL);
3730                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3731                 }
3732                 drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
3733                 break;
3734         case ENCODER_OBJECT_ID_SI170B:
3735         case ENCODER_OBJECT_ID_CH7303:
3736         case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3737         case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3738         case ENCODER_OBJECT_ID_TITFP513:
3739         case ENCODER_OBJECT_ID_VT1623:
3740         case ENCODER_OBJECT_ID_HDMI_SI1930:
3741         case ENCODER_OBJECT_ID_TRAVIS:
3742         case ENCODER_OBJECT_ID_NUTMEG:
3743                 /* these are handled by the primary encoders */
3744                 amdgpu_encoder->is_ext_encoder = true;
3745                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3746                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3747                                          DRM_MODE_ENCODER_LVDS, NULL);
3748                 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3749                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3750                                          DRM_MODE_ENCODER_DAC, NULL);
3751                 else
3752                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3753                                          DRM_MODE_ENCODER_TMDS, NULL);
3754                 drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
3755                 break;
3756         }
3757 }
3758
3759 static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
3760         .bandwidth_update = &dce_v11_0_bandwidth_update,
3761         .vblank_get_counter = &dce_v11_0_vblank_get_counter,
3762         .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3763         .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3764         .hpd_sense = &dce_v11_0_hpd_sense,
3765         .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
3766         .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
3767         .page_flip = &dce_v11_0_page_flip,
3768         .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
3769         .add_encoder = &dce_v11_0_encoder_add,
3770         .add_connector = &amdgpu_connector_add,
3771 };
3772
3773 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
3774 {
3775         adev->mode_info.funcs = &dce_v11_0_display_funcs;
3776 }
3777
3778 static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
3779         .set = dce_v11_0_set_crtc_irq_state,
3780         .process = dce_v11_0_crtc_irq,
3781 };
3782
3783 static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
3784         .set = dce_v11_0_set_pageflip_irq_state,
3785         .process = dce_v11_0_pageflip_irq,
3786 };
3787
3788 static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
3789         .set = dce_v11_0_set_hpd_irq_state,
3790         .process = dce_v11_0_hpd_irq,
3791 };
3792
3793 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
3794 {
3795         if (adev->mode_info.num_crtc > 0)
3796                 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3797         else
3798                 adev->crtc_irq.num_types = 0;
3799         adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
3800
3801         adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3802         adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
3803
3804         adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3805         adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
3806 }
3807
3808 const struct amdgpu_ip_block_version dce_v11_0_ip_block =
3809 {
3810         .type = AMD_IP_BLOCK_TYPE_DCE,
3811         .major = 11,
3812         .minor = 0,
3813         .rev = 0,
3814         .funcs = &dce_v11_0_ip_funcs,
3815 };
3816
3817 const struct amdgpu_ip_block_version dce_v11_2_ip_block =
3818 {
3819         .type = AMD_IP_BLOCK_TYPE_DCE,
3820         .major = 11,
3821         .minor = 2,
3822         .rev = 0,
3823         .funcs = &dce_v11_0_ip_funcs,
3824 };
This page took 0.252428 seconds and 4 git commands to generate.