]> Git Repo - linux.git/blob - drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
Merge tag 'rpmsg-v5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/andersson...
[linux.git] / drivers / gpu / drm / amd / amdgpu / dce_v11_0.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <drm/drm_fourcc.h>
25 #include <drm/drm_vblank.h>
26
27 #include "amdgpu.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_i2c.h"
30 #include "vid.h"
31 #include "atom.h"
32 #include "amdgpu_atombios.h"
33 #include "atombios_crtc.h"
34 #include "atombios_encoders.h"
35 #include "amdgpu_pll.h"
36 #include "amdgpu_connectors.h"
37 #include "amdgpu_display.h"
38 #include "dce_v11_0.h"
39
40 #include "dce/dce_11_0_d.h"
41 #include "dce/dce_11_0_sh_mask.h"
42 #include "dce/dce_11_0_enum.h"
43 #include "oss/oss_3_0_d.h"
44 #include "oss/oss_3_0_sh_mask.h"
45 #include "gmc/gmc_8_1_d.h"
46 #include "gmc/gmc_8_1_sh_mask.h"
47
48 #include "ivsrcid/ivsrcid_vislands30.h"
49
50 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev);
51 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev);
52
53 static const u32 crtc_offsets[] =
54 {
55         CRTC0_REGISTER_OFFSET,
56         CRTC1_REGISTER_OFFSET,
57         CRTC2_REGISTER_OFFSET,
58         CRTC3_REGISTER_OFFSET,
59         CRTC4_REGISTER_OFFSET,
60         CRTC5_REGISTER_OFFSET,
61         CRTC6_REGISTER_OFFSET
62 };
63
64 static const u32 hpd_offsets[] =
65 {
66         HPD0_REGISTER_OFFSET,
67         HPD1_REGISTER_OFFSET,
68         HPD2_REGISTER_OFFSET,
69         HPD3_REGISTER_OFFSET,
70         HPD4_REGISTER_OFFSET,
71         HPD5_REGISTER_OFFSET
72 };
73
74 static const uint32_t dig_offsets[] = {
75         DIG0_REGISTER_OFFSET,
76         DIG1_REGISTER_OFFSET,
77         DIG2_REGISTER_OFFSET,
78         DIG3_REGISTER_OFFSET,
79         DIG4_REGISTER_OFFSET,
80         DIG5_REGISTER_OFFSET,
81         DIG6_REGISTER_OFFSET,
82         DIG7_REGISTER_OFFSET,
83         DIG8_REGISTER_OFFSET
84 };
85
86 static const struct {
87         uint32_t        reg;
88         uint32_t        vblank;
89         uint32_t        vline;
90         uint32_t        hpd;
91
92 } interrupt_status_offsets[] = { {
93         .reg = mmDISP_INTERRUPT_STATUS,
94         .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
95         .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
96         .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
97 }, {
98         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
99         .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
100         .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
101         .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
102 }, {
103         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
104         .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
105         .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
106         .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
107 }, {
108         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
109         .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
110         .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
111         .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
112 }, {
113         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
114         .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
115         .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
116         .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
117 }, {
118         .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
119         .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
120         .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
121         .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
122 } };
123
124 static const u32 cz_golden_settings_a11[] =
125 {
126         mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
127         mmFBC_MISC, 0x1f311fff, 0x14300000,
128 };
129
130 static const u32 cz_mgcg_cgcg_init[] =
131 {
132         mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
133         mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
134 };
135
136 static const u32 stoney_golden_settings_a11[] =
137 {
138         mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
139         mmFBC_MISC, 0x1f311fff, 0x14302000,
140 };
141
142 static const u32 polaris11_golden_settings_a11[] =
143 {
144         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
145         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
146         mmFBC_DEBUG1, 0xffffffff, 0x00000008,
147         mmFBC_MISC, 0x9f313fff, 0x14302008,
148         mmHDMI_CONTROL, 0x313f031f, 0x00000011,
149 };
150
151 static const u32 polaris10_golden_settings_a11[] =
152 {
153         mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
154         mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
155         mmFBC_MISC, 0x9f313fff, 0x14302008,
156         mmHDMI_CONTROL, 0x313f031f, 0x00000011,
157 };
158
159 static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
160 {
161         switch (adev->asic_type) {
162         case CHIP_CARRIZO:
163                 amdgpu_device_program_register_sequence(adev,
164                                                         cz_mgcg_cgcg_init,
165                                                         ARRAY_SIZE(cz_mgcg_cgcg_init));
166                 amdgpu_device_program_register_sequence(adev,
167                                                         cz_golden_settings_a11,
168                                                         ARRAY_SIZE(cz_golden_settings_a11));
169                 break;
170         case CHIP_STONEY:
171                 amdgpu_device_program_register_sequence(adev,
172                                                         stoney_golden_settings_a11,
173                                                         ARRAY_SIZE(stoney_golden_settings_a11));
174                 break;
175         case CHIP_POLARIS11:
176         case CHIP_POLARIS12:
177                 amdgpu_device_program_register_sequence(adev,
178                                                         polaris11_golden_settings_a11,
179                                                         ARRAY_SIZE(polaris11_golden_settings_a11));
180                 break;
181         case CHIP_POLARIS10:
182         case CHIP_VEGAM:
183                 amdgpu_device_program_register_sequence(adev,
184                                                         polaris10_golden_settings_a11,
185                                                         ARRAY_SIZE(polaris10_golden_settings_a11));
186                 break;
187         default:
188                 break;
189         }
190 }
191
192 static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev,
193                                      u32 block_offset, u32 reg)
194 {
195         unsigned long flags;
196         u32 r;
197
198         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
199         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
200         r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
201         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
202
203         return r;
204 }
205
206 static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev,
207                                       u32 block_offset, u32 reg, u32 v)
208 {
209         unsigned long flags;
210
211         spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
212         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
213         WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
214         spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
215 }
216
217 static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
218 {
219         if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
220                 return 0;
221         else
222                 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
223 }
224
225 static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
226 {
227         unsigned i;
228
229         /* Enable pflip interrupts */
230         for (i = 0; i < adev->mode_info.num_crtc; i++)
231                 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
232 }
233
234 static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
235 {
236         unsigned i;
237
238         /* Disable pflip interrupts */
239         for (i = 0; i < adev->mode_info.num_crtc; i++)
240                 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
241 }
242
243 /**
244  * dce_v11_0_page_flip - pageflip callback.
245  *
246  * @adev: amdgpu_device pointer
247  * @crtc_id: crtc to cleanup pageflip on
248  * @crtc_base: new address of the crtc (GPU MC address)
249  *
250  * Triggers the actual pageflip by updating the primary
251  * surface base address.
252  */
253 static void dce_v11_0_page_flip(struct amdgpu_device *adev,
254                                 int crtc_id, u64 crtc_base, bool async)
255 {
256         struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
257         struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
258         u32 tmp;
259
260         /* flip immediate for async, default is vsync */
261         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
262         tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
263                             GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
264         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
265         /* update pitch */
266         WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
267                fb->pitches[0] / fb->format->cpp[0]);
268         /* update the scanout addresses */
269         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
270                upper_32_bits(crtc_base));
271         /* writing to the low address triggers the update */
272         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
273                lower_32_bits(crtc_base));
274         /* post the write */
275         RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
276 }
277
278 static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
279                                         u32 *vbl, u32 *position)
280 {
281         if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
282                 return -EINVAL;
283
284         *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
285         *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
286
287         return 0;
288 }
289
290 /**
291  * dce_v11_0_hpd_sense - hpd sense callback.
292  *
293  * @adev: amdgpu_device pointer
294  * @hpd: hpd (hotplug detect) pin
295  *
296  * Checks if a digital monitor is connected (evergreen+).
297  * Returns true if connected, false if not connected.
298  */
299 static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev,
300                                enum amdgpu_hpd_id hpd)
301 {
302         bool connected = false;
303
304         if (hpd >= adev->mode_info.num_hpd)
305                 return connected;
306
307         if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) &
308             DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK)
309                 connected = true;
310
311         return connected;
312 }
313
314 /**
315  * dce_v11_0_hpd_set_polarity - hpd set polarity callback.
316  *
317  * @adev: amdgpu_device pointer
318  * @hpd: hpd (hotplug detect) pin
319  *
320  * Set the polarity of the hpd pin (evergreen+).
321  */
322 static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev,
323                                       enum amdgpu_hpd_id hpd)
324 {
325         u32 tmp;
326         bool connected = dce_v11_0_hpd_sense(adev, hpd);
327
328         if (hpd >= adev->mode_info.num_hpd)
329                 return;
330
331         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
332         if (connected)
333                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0);
334         else
335                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1);
336         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
337 }
338
339 /**
340  * dce_v11_0_hpd_init - hpd setup callback.
341  *
342  * @adev: amdgpu_device pointer
343  *
344  * Setup the hpd pins used by the card (evergreen+).
345  * Enable the pin, set the polarity, and enable the hpd interrupts.
346  */
347 static void dce_v11_0_hpd_init(struct amdgpu_device *adev)
348 {
349         struct drm_device *dev = adev->ddev;
350         struct drm_connector *connector;
351         u32 tmp;
352
353         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
354                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
355
356                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
357                         continue;
358
359                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
360                     connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
361                         /* don't try to enable hpd on eDP or LVDS avoid breaking the
362                          * aux dp channel on imac and help (but not completely fix)
363                          * https://bugzilla.redhat.com/show_bug.cgi?id=726143
364                          * also avoid interrupt storms during dpms.
365                          */
366                         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
367                         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
368                         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
369                         continue;
370                 }
371
372                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
373                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1);
374                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
375
376                 tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]);
377                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
378                                     DC_HPD_CONNECT_INT_DELAY,
379                                     AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS);
380                 tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL,
381                                     DC_HPD_DISCONNECT_INT_DELAY,
382                                     AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS);
383                 WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
384
385                 dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
386                 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
387         }
388 }
389
390 /**
391  * dce_v11_0_hpd_fini - hpd tear down callback.
392  *
393  * @adev: amdgpu_device pointer
394  *
395  * Tear down the hpd pins used by the card (evergreen+).
396  * Disable the hpd interrupts.
397  */
398 static void dce_v11_0_hpd_fini(struct amdgpu_device *adev)
399 {
400         struct drm_device *dev = adev->ddev;
401         struct drm_connector *connector;
402         u32 tmp;
403
404         list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
405                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
406
407                 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
408                         continue;
409
410                 tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
411                 tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0);
412                 WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
413
414                 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
415         }
416 }
417
418 static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
419 {
420         return mmDC_GPIO_HPD_A;
421 }
422
423 static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev)
424 {
425         u32 crtc_hung = 0;
426         u32 crtc_status[6];
427         u32 i, j, tmp;
428
429         for (i = 0; i < adev->mode_info.num_crtc; i++) {
430                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
431                 if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) {
432                         crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
433                         crtc_hung |= (1 << i);
434                 }
435         }
436
437         for (j = 0; j < 10; j++) {
438                 for (i = 0; i < adev->mode_info.num_crtc; i++) {
439                         if (crtc_hung & (1 << i)) {
440                                 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
441                                 if (tmp != crtc_status[i])
442                                         crtc_hung &= ~(1 << i);
443                         }
444                 }
445                 if (crtc_hung == 0)
446                         return false;
447                 udelay(100);
448         }
449
450         return true;
451 }
452
453 static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev,
454                                            bool render)
455 {
456         u32 tmp;
457
458         /* Lockout access through VGA aperture*/
459         tmp = RREG32(mmVGA_HDP_CONTROL);
460         if (render)
461                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
462         else
463                 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
464         WREG32(mmVGA_HDP_CONTROL, tmp);
465
466         /* disable VGA render */
467         tmp = RREG32(mmVGA_RENDER_CONTROL);
468         if (render)
469                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
470         else
471                 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
472         WREG32(mmVGA_RENDER_CONTROL, tmp);
473 }
474
475 static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
476 {
477         int num_crtc = 0;
478
479         switch (adev->asic_type) {
480         case CHIP_CARRIZO:
481                 num_crtc = 3;
482                 break;
483         case CHIP_STONEY:
484                 num_crtc = 2;
485                 break;
486         case CHIP_POLARIS10:
487         case CHIP_VEGAM:
488                 num_crtc = 6;
489                 break;
490         case CHIP_POLARIS11:
491         case CHIP_POLARIS12:
492                 num_crtc = 5;
493                 break;
494         default:
495                 num_crtc = 0;
496         }
497         return num_crtc;
498 }
499
500 void dce_v11_0_disable_dce(struct amdgpu_device *adev)
501 {
502         /*Disable VGA render and enabled crtc, if has DCE engine*/
503         if (amdgpu_atombios_has_dce_engine_info(adev)) {
504                 u32 tmp;
505                 int crtc_enabled, i;
506
507                 dce_v11_0_set_vga_render_state(adev, false);
508
509                 /*Disable crtc*/
510                 for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) {
511                         crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
512                                                                          CRTC_CONTROL, CRTC_MASTER_EN);
513                         if (crtc_enabled) {
514                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
515                                 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
516                                 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
517                                 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
518                                 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
519                         }
520                 }
521         }
522 }
523
524 static void dce_v11_0_program_fmt(struct drm_encoder *encoder)
525 {
526         struct drm_device *dev = encoder->dev;
527         struct amdgpu_device *adev = dev->dev_private;
528         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
529         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
530         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
531         int bpc = 0;
532         u32 tmp = 0;
533         enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
534
535         if (connector) {
536                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
537                 bpc = amdgpu_connector_get_monitor_bpc(connector);
538                 dither = amdgpu_connector->dither;
539         }
540
541         /* LVDS/eDP FMT is set up by atom */
542         if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
543                 return;
544
545         /* not needed for analog */
546         if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
547             (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
548                 return;
549
550         if (bpc == 0)
551                 return;
552
553         switch (bpc) {
554         case 6:
555                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
556                         /* XXX sort out optimal dither settings */
557                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
558                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
559                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
560                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0);
561                 } else {
562                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
563                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0);
564                 }
565                 break;
566         case 8:
567                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
568                         /* XXX sort out optimal dither settings */
569                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
570                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
571                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
572                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
573                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1);
574                 } else {
575                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
576                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1);
577                 }
578                 break;
579         case 10:
580                 if (dither == AMDGPU_FMT_DITHER_ENABLE) {
581                         /* XXX sort out optimal dither settings */
582                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1);
583                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1);
584                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1);
585                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1);
586                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2);
587                 } else {
588                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1);
589                         tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2);
590                 }
591                 break;
592         default:
593                 /* not needed */
594                 break;
595         }
596
597         WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
598 }
599
600
601 /* display watermark setup */
602 /**
603  * dce_v11_0_line_buffer_adjust - Set up the line buffer
604  *
605  * @adev: amdgpu_device pointer
606  * @amdgpu_crtc: the selected display controller
607  * @mode: the current display mode on the selected display
608  * controller
609  *
610  * Setup up the line buffer allocation for
611  * the selected display controller (CIK).
612  * Returns the line buffer size in pixels.
613  */
614 static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev,
615                                        struct amdgpu_crtc *amdgpu_crtc,
616                                        struct drm_display_mode *mode)
617 {
618         u32 tmp, buffer_alloc, i, mem_cfg;
619         u32 pipe_offset = amdgpu_crtc->crtc_id;
620         /*
621          * Line Buffer Setup
622          * There are 6 line buffers, one for each display controllers.
623          * There are 3 partitions per LB. Select the number of partitions
624          * to enable based on the display width.  For display widths larger
625          * than 4096, you need use to use 2 display controllers and combine
626          * them using the stereo blender.
627          */
628         if (amdgpu_crtc->base.enabled && mode) {
629                 if (mode->crtc_hdisplay < 1920) {
630                         mem_cfg = 1;
631                         buffer_alloc = 2;
632                 } else if (mode->crtc_hdisplay < 2560) {
633                         mem_cfg = 2;
634                         buffer_alloc = 2;
635                 } else if (mode->crtc_hdisplay < 4096) {
636                         mem_cfg = 0;
637                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
638                 } else {
639                         DRM_DEBUG_KMS("Mode too big for LB!\n");
640                         mem_cfg = 0;
641                         buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
642                 }
643         } else {
644                 mem_cfg = 1;
645                 buffer_alloc = 0;
646         }
647
648         tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset);
649         tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg);
650         WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp);
651
652         tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
653         tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc);
654         WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp);
655
656         for (i = 0; i < adev->usec_timeout; i++) {
657                 tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset);
658                 if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED))
659                         break;
660                 udelay(1);
661         }
662
663         if (amdgpu_crtc->base.enabled && mode) {
664                 switch (mem_cfg) {
665                 case 0:
666                 default:
667                         return 4096 * 2;
668                 case 1:
669                         return 1920 * 2;
670                 case 2:
671                         return 2560 * 2;
672                 }
673         }
674
675         /* controller not enabled, so no lb used */
676         return 0;
677 }
678
679 /**
680  * cik_get_number_of_dram_channels - get the number of dram channels
681  *
682  * @adev: amdgpu_device pointer
683  *
684  * Look up the number of video ram channels (CIK).
685  * Used for display watermark bandwidth calculations
686  * Returns the number of dram channels
687  */
688 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
689 {
690         u32 tmp = RREG32(mmMC_SHARED_CHMAP);
691
692         switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
693         case 0:
694         default:
695                 return 1;
696         case 1:
697                 return 2;
698         case 2:
699                 return 4;
700         case 3:
701                 return 8;
702         case 4:
703                 return 3;
704         case 5:
705                 return 6;
706         case 6:
707                 return 10;
708         case 7:
709                 return 12;
710         case 8:
711                 return 16;
712         }
713 }
714
715 struct dce10_wm_params {
716         u32 dram_channels; /* number of dram channels */
717         u32 yclk;          /* bandwidth per dram data pin in kHz */
718         u32 sclk;          /* engine clock in kHz */
719         u32 disp_clk;      /* display clock in kHz */
720         u32 src_width;     /* viewport width */
721         u32 active_time;   /* active display time in ns */
722         u32 blank_time;    /* blank time in ns */
723         bool interlaced;    /* mode is interlaced */
724         fixed20_12 vsc;    /* vertical scale ratio */
725         u32 num_heads;     /* number of active crtcs */
726         u32 bytes_per_pixel; /* bytes per pixel display + overlay */
727         u32 lb_size;       /* line buffer allocated to pipe */
728         u32 vtaps;         /* vertical scaler taps */
729 };
730
731 /**
732  * dce_v11_0_dram_bandwidth - get the dram bandwidth
733  *
734  * @wm: watermark calculation data
735  *
736  * Calculate the raw dram bandwidth (CIK).
737  * Used for display watermark bandwidth calculations
738  * Returns the dram bandwidth in MBytes/s
739  */
740 static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm)
741 {
742         /* Calculate raw DRAM Bandwidth */
743         fixed20_12 dram_efficiency; /* 0.7 */
744         fixed20_12 yclk, dram_channels, bandwidth;
745         fixed20_12 a;
746
747         a.full = dfixed_const(1000);
748         yclk.full = dfixed_const(wm->yclk);
749         yclk.full = dfixed_div(yclk, a);
750         dram_channels.full = dfixed_const(wm->dram_channels * 4);
751         a.full = dfixed_const(10);
752         dram_efficiency.full = dfixed_const(7);
753         dram_efficiency.full = dfixed_div(dram_efficiency, a);
754         bandwidth.full = dfixed_mul(dram_channels, yclk);
755         bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
756
757         return dfixed_trunc(bandwidth);
758 }
759
760 /**
761  * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display
762  *
763  * @wm: watermark calculation data
764  *
765  * Calculate the dram bandwidth used for display (CIK).
766  * Used for display watermark bandwidth calculations
767  * Returns the dram bandwidth for display in MBytes/s
768  */
769 static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm)
770 {
771         /* Calculate DRAM Bandwidth and the part allocated to display. */
772         fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
773         fixed20_12 yclk, dram_channels, bandwidth;
774         fixed20_12 a;
775
776         a.full = dfixed_const(1000);
777         yclk.full = dfixed_const(wm->yclk);
778         yclk.full = dfixed_div(yclk, a);
779         dram_channels.full = dfixed_const(wm->dram_channels * 4);
780         a.full = dfixed_const(10);
781         disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
782         disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
783         bandwidth.full = dfixed_mul(dram_channels, yclk);
784         bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
785
786         return dfixed_trunc(bandwidth);
787 }
788
789 /**
790  * dce_v11_0_data_return_bandwidth - get the data return bandwidth
791  *
792  * @wm: watermark calculation data
793  *
794  * Calculate the data return bandwidth used for display (CIK).
795  * Used for display watermark bandwidth calculations
796  * Returns the data return bandwidth in MBytes/s
797  */
798 static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm)
799 {
800         /* Calculate the display Data return Bandwidth */
801         fixed20_12 return_efficiency; /* 0.8 */
802         fixed20_12 sclk, bandwidth;
803         fixed20_12 a;
804
805         a.full = dfixed_const(1000);
806         sclk.full = dfixed_const(wm->sclk);
807         sclk.full = dfixed_div(sclk, a);
808         a.full = dfixed_const(10);
809         return_efficiency.full = dfixed_const(8);
810         return_efficiency.full = dfixed_div(return_efficiency, a);
811         a.full = dfixed_const(32);
812         bandwidth.full = dfixed_mul(a, sclk);
813         bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
814
815         return dfixed_trunc(bandwidth);
816 }
817
818 /**
819  * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth
820  *
821  * @wm: watermark calculation data
822  *
823  * Calculate the dmif bandwidth used for display (CIK).
824  * Used for display watermark bandwidth calculations
825  * Returns the dmif bandwidth in MBytes/s
826  */
827 static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm)
828 {
829         /* Calculate the DMIF Request Bandwidth */
830         fixed20_12 disp_clk_request_efficiency; /* 0.8 */
831         fixed20_12 disp_clk, bandwidth;
832         fixed20_12 a, b;
833
834         a.full = dfixed_const(1000);
835         disp_clk.full = dfixed_const(wm->disp_clk);
836         disp_clk.full = dfixed_div(disp_clk, a);
837         a.full = dfixed_const(32);
838         b.full = dfixed_mul(a, disp_clk);
839
840         a.full = dfixed_const(10);
841         disp_clk_request_efficiency.full = dfixed_const(8);
842         disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
843
844         bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
845
846         return dfixed_trunc(bandwidth);
847 }
848
849 /**
850  * dce_v11_0_available_bandwidth - get the min available bandwidth
851  *
852  * @wm: watermark calculation data
853  *
854  * Calculate the min available bandwidth used for display (CIK).
855  * Used for display watermark bandwidth calculations
856  * Returns the min available bandwidth in MBytes/s
857  */
858 static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm)
859 {
860         /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
861         u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm);
862         u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm);
863         u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm);
864
865         return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
866 }
867
868 /**
869  * dce_v11_0_average_bandwidth - get the average available bandwidth
870  *
871  * @wm: watermark calculation data
872  *
873  * Calculate the average available bandwidth used for display (CIK).
874  * Used for display watermark bandwidth calculations
875  * Returns the average available bandwidth in MBytes/s
876  */
877 static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm)
878 {
879         /* Calculate the display mode Average Bandwidth
880          * DisplayMode should contain the source and destination dimensions,
881          * timing, etc.
882          */
883         fixed20_12 bpp;
884         fixed20_12 line_time;
885         fixed20_12 src_width;
886         fixed20_12 bandwidth;
887         fixed20_12 a;
888
889         a.full = dfixed_const(1000);
890         line_time.full = dfixed_const(wm->active_time + wm->blank_time);
891         line_time.full = dfixed_div(line_time, a);
892         bpp.full = dfixed_const(wm->bytes_per_pixel);
893         src_width.full = dfixed_const(wm->src_width);
894         bandwidth.full = dfixed_mul(src_width, bpp);
895         bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
896         bandwidth.full = dfixed_div(bandwidth, line_time);
897
898         return dfixed_trunc(bandwidth);
899 }
900
901 /**
902  * dce_v11_0_latency_watermark - get the latency watermark
903  *
904  * @wm: watermark calculation data
905  *
906  * Calculate the latency watermark (CIK).
907  * Used for display watermark bandwidth calculations
908  * Returns the latency watermark in ns
909  */
910 static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
911 {
912         /* First calculate the latency in ns */
913         u32 mc_latency = 2000; /* 2000 ns. */
914         u32 available_bandwidth = dce_v11_0_available_bandwidth(wm);
915         u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
916         u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
917         u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
918         u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
919                 (wm->num_heads * cursor_line_pair_return_time);
920         u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
921         u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
922         u32 tmp, dmif_size = 12288;
923         fixed20_12 a, b, c;
924
925         if (wm->num_heads == 0)
926                 return 0;
927
928         a.full = dfixed_const(2);
929         b.full = dfixed_const(1);
930         if ((wm->vsc.full > a.full) ||
931             ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
932             (wm->vtaps >= 5) ||
933             ((wm->vsc.full >= a.full) && wm->interlaced))
934                 max_src_lines_per_dst_line = 4;
935         else
936                 max_src_lines_per_dst_line = 2;
937
938         a.full = dfixed_const(available_bandwidth);
939         b.full = dfixed_const(wm->num_heads);
940         a.full = dfixed_div(a, b);
941         tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
942         tmp = min(dfixed_trunc(a), tmp);
943
944         lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
945
946         a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
947         b.full = dfixed_const(1000);
948         c.full = dfixed_const(lb_fill_bw);
949         b.full = dfixed_div(c, b);
950         a.full = dfixed_div(a, b);
951         line_fill_time = dfixed_trunc(a);
952
953         if (line_fill_time < wm->active_time)
954                 return latency;
955         else
956                 return latency + (line_fill_time - wm->active_time);
957
958 }
959
960 /**
961  * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check
962  * average and available dram bandwidth
963  *
964  * @wm: watermark calculation data
965  *
966  * Check if the display average bandwidth fits in the display
967  * dram bandwidth (CIK).
968  * Used for display watermark bandwidth calculations
969  * Returns true if the display fits, false if not.
970  */
971 static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm)
972 {
973         if (dce_v11_0_average_bandwidth(wm) <=
974             (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads))
975                 return true;
976         else
977                 return false;
978 }
979
980 /**
981  * dce_v11_0_average_bandwidth_vs_available_bandwidth - check
982  * average and available bandwidth
983  *
984  * @wm: watermark calculation data
985  *
986  * Check if the display average bandwidth fits in the display
987  * available bandwidth (CIK).
988  * Used for display watermark bandwidth calculations
989  * Returns true if the display fits, false if not.
990  */
991 static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm)
992 {
993         if (dce_v11_0_average_bandwidth(wm) <=
994             (dce_v11_0_available_bandwidth(wm) / wm->num_heads))
995                 return true;
996         else
997                 return false;
998 }
999
1000 /**
1001  * dce_v11_0_check_latency_hiding - check latency hiding
1002  *
1003  * @wm: watermark calculation data
1004  *
1005  * Check latency hiding (CIK).
1006  * Used for display watermark bandwidth calculations
1007  * Returns true if the display fits, false if not.
1008  */
1009 static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm)
1010 {
1011         u32 lb_partitions = wm->lb_size / wm->src_width;
1012         u32 line_time = wm->active_time + wm->blank_time;
1013         u32 latency_tolerant_lines;
1014         u32 latency_hiding;
1015         fixed20_12 a;
1016
1017         a.full = dfixed_const(1);
1018         if (wm->vsc.full > a.full)
1019                 latency_tolerant_lines = 1;
1020         else {
1021                 if (lb_partitions <= (wm->vtaps + 1))
1022                         latency_tolerant_lines = 1;
1023                 else
1024                         latency_tolerant_lines = 2;
1025         }
1026
1027         latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
1028
1029         if (dce_v11_0_latency_watermark(wm) <= latency_hiding)
1030                 return true;
1031         else
1032                 return false;
1033 }
1034
1035 /**
1036  * dce_v11_0_program_watermarks - program display watermarks
1037  *
1038  * @adev: amdgpu_device pointer
1039  * @amdgpu_crtc: the selected display controller
1040  * @lb_size: line buffer size
1041  * @num_heads: number of display controllers in use
1042  *
1043  * Calculate and program the display watermarks for the
1044  * selected display controller (CIK).
1045  */
1046 static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
1047                                         struct amdgpu_crtc *amdgpu_crtc,
1048                                         u32 lb_size, u32 num_heads)
1049 {
1050         struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
1051         struct dce10_wm_params wm_low, wm_high;
1052         u32 active_time;
1053         u32 line_time = 0;
1054         u32 latency_watermark_a = 0, latency_watermark_b = 0;
1055         u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
1056
1057         if (amdgpu_crtc->base.enabled && num_heads && mode) {
1058                 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
1059                                             (u32)mode->clock);
1060                 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
1061                                           (u32)mode->clock);
1062                 line_time = min(line_time, (u32)65535);
1063
1064                 /* watermark for high clocks */
1065                 if (adev->pm.dpm_enabled) {
1066                         wm_high.yclk =
1067                                 amdgpu_dpm_get_mclk(adev, false) * 10;
1068                         wm_high.sclk =
1069                                 amdgpu_dpm_get_sclk(adev, false) * 10;
1070                 } else {
1071                         wm_high.yclk = adev->pm.current_mclk * 10;
1072                         wm_high.sclk = adev->pm.current_sclk * 10;
1073                 }
1074
1075                 wm_high.disp_clk = mode->clock;
1076                 wm_high.src_width = mode->crtc_hdisplay;
1077                 wm_high.active_time = active_time;
1078                 wm_high.blank_time = line_time - wm_high.active_time;
1079                 wm_high.interlaced = false;
1080                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1081                         wm_high.interlaced = true;
1082                 wm_high.vsc = amdgpu_crtc->vsc;
1083                 wm_high.vtaps = 1;
1084                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1085                         wm_high.vtaps = 2;
1086                 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1087                 wm_high.lb_size = lb_size;
1088                 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1089                 wm_high.num_heads = num_heads;
1090
1091                 /* set for high clocks */
1092                 latency_watermark_a = min(dce_v11_0_latency_watermark(&wm_high), (u32)65535);
1093
1094                 /* possibly force display priority to high */
1095                 /* should really do this at mode validation time... */
1096                 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1097                     !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1098                     !dce_v11_0_check_latency_hiding(&wm_high) ||
1099                     (adev->mode_info.disp_priority == 2)) {
1100                         DRM_DEBUG_KMS("force priority to high\n");
1101                 }
1102
1103                 /* watermark for low clocks */
1104                 if (adev->pm.dpm_enabled) {
1105                         wm_low.yclk =
1106                                 amdgpu_dpm_get_mclk(adev, true) * 10;
1107                         wm_low.sclk =
1108                                 amdgpu_dpm_get_sclk(adev, true) * 10;
1109                 } else {
1110                         wm_low.yclk = adev->pm.current_mclk * 10;
1111                         wm_low.sclk = adev->pm.current_sclk * 10;
1112                 }
1113
1114                 wm_low.disp_clk = mode->clock;
1115                 wm_low.src_width = mode->crtc_hdisplay;
1116                 wm_low.active_time = active_time;
1117                 wm_low.blank_time = line_time - wm_low.active_time;
1118                 wm_low.interlaced = false;
1119                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1120                         wm_low.interlaced = true;
1121                 wm_low.vsc = amdgpu_crtc->vsc;
1122                 wm_low.vtaps = 1;
1123                 if (amdgpu_crtc->rmx_type != RMX_OFF)
1124                         wm_low.vtaps = 2;
1125                 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1126                 wm_low.lb_size = lb_size;
1127                 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1128                 wm_low.num_heads = num_heads;
1129
1130                 /* set for low clocks */
1131                 latency_watermark_b = min(dce_v11_0_latency_watermark(&wm_low), (u32)65535);
1132
1133                 /* possibly force display priority to high */
1134                 /* should really do this at mode validation time... */
1135                 if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1136                     !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1137                     !dce_v11_0_check_latency_hiding(&wm_low) ||
1138                     (adev->mode_info.disp_priority == 2)) {
1139                         DRM_DEBUG_KMS("force priority to high\n");
1140                 }
1141                 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1142         }
1143
1144         /* select wm A */
1145         wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1146         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1);
1147         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1148         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1149         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a);
1150         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1151         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1152         /* select wm B */
1153         tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2);
1154         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1155         tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset);
1156         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b);
1157         tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time);
1158         WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1159         /* restore original selection */
1160         WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1161
1162         /* save values for DPM */
1163         amdgpu_crtc->line_time = line_time;
1164         amdgpu_crtc->wm_high = latency_watermark_a;
1165         amdgpu_crtc->wm_low = latency_watermark_b;
1166         /* Save number of lines the linebuffer leads before the scanout */
1167         amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1168 }
1169
1170 /**
1171  * dce_v11_0_bandwidth_update - program display watermarks
1172  *
1173  * @adev: amdgpu_device pointer
1174  *
1175  * Calculate and program the display watermarks and line
1176  * buffer allocation (CIK).
1177  */
1178 static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev)
1179 {
1180         struct drm_display_mode *mode = NULL;
1181         u32 num_heads = 0, lb_size;
1182         int i;
1183
1184         amdgpu_display_update_priority(adev);
1185
1186         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1187                 if (adev->mode_info.crtcs[i]->base.enabled)
1188                         num_heads++;
1189         }
1190         for (i = 0; i < adev->mode_info.num_crtc; i++) {
1191                 mode = &adev->mode_info.crtcs[i]->base.mode;
1192                 lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1193                 dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1194                                             lb_size, num_heads);
1195         }
1196 }
1197
1198 static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev)
1199 {
1200         int i;
1201         u32 offset, tmp;
1202
1203         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1204                 offset = adev->mode_info.audio.pin[i].offset;
1205                 tmp = RREG32_AUDIO_ENDPT(offset,
1206                                          ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1207                 if (((tmp &
1208                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1209                 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1210                         adev->mode_info.audio.pin[i].connected = false;
1211                 else
1212                         adev->mode_info.audio.pin[i].connected = true;
1213         }
1214 }
1215
1216 static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev)
1217 {
1218         int i;
1219
1220         dce_v11_0_audio_get_connected_pins(adev);
1221
1222         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1223                 if (adev->mode_info.audio.pin[i].connected)
1224                         return &adev->mode_info.audio.pin[i];
1225         }
1226         DRM_ERROR("No connected audio pins found!\n");
1227         return NULL;
1228 }
1229
1230 static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1231 {
1232         struct amdgpu_device *adev = encoder->dev->dev_private;
1233         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1234         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1235         u32 tmp;
1236
1237         if (!dig || !dig->afmt || !dig->afmt->pin)
1238                 return;
1239
1240         tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset);
1241         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id);
1242         WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp);
1243 }
1244
1245 static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder,
1246                                                 struct drm_display_mode *mode)
1247 {
1248         struct amdgpu_device *adev = encoder->dev->dev_private;
1249         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1250         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1251         struct drm_connector *connector;
1252         struct amdgpu_connector *amdgpu_connector = NULL;
1253         u32 tmp;
1254         int interlace = 0;
1255
1256         if (!dig || !dig->afmt || !dig->afmt->pin)
1257                 return;
1258
1259         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1260                 if (connector->encoder == encoder) {
1261                         amdgpu_connector = to_amdgpu_connector(connector);
1262                         break;
1263                 }
1264         }
1265
1266         if (!amdgpu_connector) {
1267                 DRM_ERROR("Couldn't find encoder's connector\n");
1268                 return;
1269         }
1270
1271         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1272                 interlace = 1;
1273         if (connector->latency_present[interlace]) {
1274                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1275                                     VIDEO_LIPSYNC, connector->video_latency[interlace]);
1276                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1277                                     AUDIO_LIPSYNC, connector->audio_latency[interlace]);
1278         } else {
1279                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1280                                     VIDEO_LIPSYNC, 0);
1281                 tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC,
1282                                     AUDIO_LIPSYNC, 0);
1283         }
1284         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1285                            ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1286 }
1287
1288 static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1289 {
1290         struct amdgpu_device *adev = encoder->dev->dev_private;
1291         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1292         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1293         struct drm_connector *connector;
1294         struct amdgpu_connector *amdgpu_connector = NULL;
1295         u32 tmp;
1296         u8 *sadb = NULL;
1297         int sad_count;
1298
1299         if (!dig || !dig->afmt || !dig->afmt->pin)
1300                 return;
1301
1302         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1303                 if (connector->encoder == encoder) {
1304                         amdgpu_connector = to_amdgpu_connector(connector);
1305                         break;
1306                 }
1307         }
1308
1309         if (!amdgpu_connector) {
1310                 DRM_ERROR("Couldn't find encoder's connector\n");
1311                 return;
1312         }
1313
1314         sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1315         if (sad_count < 0) {
1316                 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1317                 sad_count = 0;
1318         }
1319
1320         /* program the speaker allocation */
1321         tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1322                                  ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1323         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1324                             DP_CONNECTION, 0);
1325         /* set HDMI mode */
1326         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1327                             HDMI_CONNECTION, 1);
1328         if (sad_count)
1329                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1330                                     SPEAKER_ALLOCATION, sadb[0]);
1331         else
1332                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER,
1333                                     SPEAKER_ALLOCATION, 5); /* stereo */
1334         WREG32_AUDIO_ENDPT(dig->afmt->pin->offset,
1335                            ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1336
1337         kfree(sadb);
1338 }
1339
1340 static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder)
1341 {
1342         struct amdgpu_device *adev = encoder->dev->dev_private;
1343         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1344         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1345         struct drm_connector *connector;
1346         struct amdgpu_connector *amdgpu_connector = NULL;
1347         struct cea_sad *sads;
1348         int i, sad_count;
1349
1350         static const u16 eld_reg_to_type[][2] = {
1351                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1352                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1353                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1354                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1355                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1356                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1357                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1358                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1359                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1360                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1361                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1362                 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1363         };
1364
1365         if (!dig || !dig->afmt || !dig->afmt->pin)
1366                 return;
1367
1368         list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
1369                 if (connector->encoder == encoder) {
1370                         amdgpu_connector = to_amdgpu_connector(connector);
1371                         break;
1372                 }
1373         }
1374
1375         if (!amdgpu_connector) {
1376                 DRM_ERROR("Couldn't find encoder's connector\n");
1377                 return;
1378         }
1379
1380         sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1381         if (sad_count <= 0) {
1382                 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1383                 return;
1384         }
1385         BUG_ON(!sads);
1386
1387         for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1388                 u32 tmp = 0;
1389                 u8 stereo_freqs = 0;
1390                 int max_channels = -1;
1391                 int j;
1392
1393                 for (j = 0; j < sad_count; j++) {
1394                         struct cea_sad *sad = &sads[j];
1395
1396                         if (sad->format == eld_reg_to_type[i][1]) {
1397                                 if (sad->channels > max_channels) {
1398                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1399                                                             MAX_CHANNELS, sad->channels);
1400                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1401                                                             DESCRIPTOR_BYTE_2, sad->byte2);
1402                                         tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1403                                                             SUPPORTED_FREQUENCIES, sad->freq);
1404                                         max_channels = sad->channels;
1405                                 }
1406
1407                                 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1408                                         stereo_freqs |= sad->freq;
1409                                 else
1410                                         break;
1411                         }
1412                 }
1413
1414                 tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
1415                                     SUPPORTED_FREQUENCIES_STEREO, stereo_freqs);
1416                 WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp);
1417         }
1418
1419         kfree(sads);
1420 }
1421
1422 static void dce_v11_0_audio_enable(struct amdgpu_device *adev,
1423                                   struct amdgpu_audio_pin *pin,
1424                                   bool enable)
1425 {
1426         if (!pin)
1427                 return;
1428
1429         WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1430                            enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1431 }
1432
1433 static const u32 pin_offsets[] =
1434 {
1435         AUD0_REGISTER_OFFSET,
1436         AUD1_REGISTER_OFFSET,
1437         AUD2_REGISTER_OFFSET,
1438         AUD3_REGISTER_OFFSET,
1439         AUD4_REGISTER_OFFSET,
1440         AUD5_REGISTER_OFFSET,
1441         AUD6_REGISTER_OFFSET,
1442         AUD7_REGISTER_OFFSET,
1443 };
1444
1445 static int dce_v11_0_audio_init(struct amdgpu_device *adev)
1446 {
1447         int i;
1448
1449         if (!amdgpu_audio)
1450                 return 0;
1451
1452         adev->mode_info.audio.enabled = true;
1453
1454         switch (adev->asic_type) {
1455         case CHIP_CARRIZO:
1456         case CHIP_STONEY:
1457                 adev->mode_info.audio.num_pins = 7;
1458                 break;
1459         case CHIP_POLARIS10:
1460         case CHIP_VEGAM:
1461                 adev->mode_info.audio.num_pins = 8;
1462                 break;
1463         case CHIP_POLARIS11:
1464         case CHIP_POLARIS12:
1465                 adev->mode_info.audio.num_pins = 6;
1466                 break;
1467         default:
1468                 return -EINVAL;
1469         }
1470
1471         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1472                 adev->mode_info.audio.pin[i].channels = -1;
1473                 adev->mode_info.audio.pin[i].rate = -1;
1474                 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1475                 adev->mode_info.audio.pin[i].status_bits = 0;
1476                 adev->mode_info.audio.pin[i].category_code = 0;
1477                 adev->mode_info.audio.pin[i].connected = false;
1478                 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1479                 adev->mode_info.audio.pin[i].id = i;
1480                 /* disable audio.  it will be set up later */
1481                 /* XXX remove once we switch to ip funcs */
1482                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1483         }
1484
1485         return 0;
1486 }
1487
1488 static void dce_v11_0_audio_fini(struct amdgpu_device *adev)
1489 {
1490         int i;
1491
1492         if (!amdgpu_audio)
1493                 return;
1494
1495         if (!adev->mode_info.audio.enabled)
1496                 return;
1497
1498         for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1499                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1500
1501         adev->mode_info.audio.enabled = false;
1502 }
1503
1504 /*
1505  * update the N and CTS parameters for a given pixel clock rate
1506  */
1507 static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1508 {
1509         struct drm_device *dev = encoder->dev;
1510         struct amdgpu_device *adev = dev->dev_private;
1511         struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1512         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1513         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1514         u32 tmp;
1515
1516         tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset);
1517         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz);
1518         WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp);
1519         tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset);
1520         tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz);
1521         WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp);
1522
1523         tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset);
1524         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz);
1525         WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp);
1526         tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset);
1527         tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz);
1528         WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp);
1529
1530         tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset);
1531         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz);
1532         WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp);
1533         tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset);
1534         tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz);
1535         WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp);
1536
1537 }
1538
1539 /*
1540  * build a HDMI Video Info Frame
1541  */
1542 static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1543                                                void *buffer, size_t size)
1544 {
1545         struct drm_device *dev = encoder->dev;
1546         struct amdgpu_device *adev = dev->dev_private;
1547         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1548         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1549         uint8_t *frame = buffer + 3;
1550         uint8_t *header = buffer;
1551
1552         WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset,
1553                 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1554         WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset,
1555                 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1556         WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset,
1557                 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1558         WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset,
1559                 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1560 }
1561
1562 static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1563 {
1564         struct drm_device *dev = encoder->dev;
1565         struct amdgpu_device *adev = dev->dev_private;
1566         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1567         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1568         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1569         u32 dto_phase = 24 * 1000;
1570         u32 dto_modulo = clock;
1571         u32 tmp;
1572
1573         if (!dig || !dig->afmt)
1574                 return;
1575
1576         /* XXX two dtos; generally use dto0 for hdmi */
1577         /* Express [24MHz / target pixel clock] as an exact rational
1578          * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
1579          * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1580          */
1581         tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE);
1582         tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL,
1583                             amdgpu_crtc->crtc_id);
1584         WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp);
1585         WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1586         WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1587 }
1588
1589 /*
1590  * update the info frames with the data from the current display mode
1591  */
1592 static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder,
1593                                   struct drm_display_mode *mode)
1594 {
1595         struct drm_device *dev = encoder->dev;
1596         struct amdgpu_device *adev = dev->dev_private;
1597         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1598         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1599         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1600         u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1601         struct hdmi_avi_infoframe frame;
1602         ssize_t err;
1603         u32 tmp;
1604         int bpc = 8;
1605
1606         if (!dig || !dig->afmt)
1607                 return;
1608
1609         /* Silent, r600_hdmi_enable will raise WARN for us */
1610         if (!dig->afmt->enabled)
1611                 return;
1612
1613         /* hdmi deep color mode general control packets setup, if bpc > 8 */
1614         if (encoder->crtc) {
1615                 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1616                 bpc = amdgpu_crtc->bpc;
1617         }
1618
1619         /* disable audio prior to setting up hw */
1620         dig->afmt->pin = dce_v11_0_audio_get_pin(adev);
1621         dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1622
1623         dce_v11_0_audio_set_dto(encoder, mode->clock);
1624
1625         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1626         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1);
1627         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */
1628
1629         WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000);
1630
1631         tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset);
1632         switch (bpc) {
1633         case 0:
1634         case 6:
1635         case 8:
1636         case 16:
1637         default:
1638                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0);
1639                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0);
1640                 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1641                           connector->name, bpc);
1642                 break;
1643         case 10:
1644                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1645                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1);
1646                 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1647                           connector->name);
1648                 break;
1649         case 12:
1650                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1);
1651                 tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2);
1652                 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1653                           connector->name);
1654                 break;
1655         }
1656         WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp);
1657
1658         tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset);
1659         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */
1660         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */
1661         tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */
1662         WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp);
1663
1664         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1665         /* enable audio info frames (frames won't be set until audio is enabled) */
1666         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1);
1667         /* required for audio info values to be updated */
1668         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1);
1669         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1670
1671         tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset);
1672         /* required for audio info values to be updated */
1673         tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
1674         WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1675
1676         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1677         /* anything other than 0 */
1678         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2);
1679         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1680
1681         WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */
1682
1683         tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1684         /* set the default audio delay */
1685         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1);
1686         /* should be suffient for all audio modes and small enough for all hblanks */
1687         tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3);
1688         WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1689
1690         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1691         /* allow 60958 channel status fields to be updated */
1692         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
1693         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1694
1695         tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset);
1696         if (bpc > 8)
1697                 /* clear SW CTS value */
1698                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0);
1699         else
1700                 /* select SW CTS value */
1701                 tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1);
1702         /* allow hw to sent ACR packets when required */
1703         tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1);
1704         WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp);
1705
1706         dce_v11_0_afmt_update_ACR(encoder, mode->clock);
1707
1708         tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset);
1709         tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1);
1710         WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp);
1711
1712         tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset);
1713         tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2);
1714         WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp);
1715
1716         tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset);
1717         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3);
1718         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4);
1719         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5);
1720         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6);
1721         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7);
1722         tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8);
1723         WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp);
1724
1725         dce_v11_0_audio_write_speaker_allocation(encoder);
1726
1727         WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset,
1728                (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1729
1730         dce_v11_0_afmt_audio_select_pin(encoder);
1731         dce_v11_0_audio_write_sad_regs(encoder);
1732         dce_v11_0_audio_write_latency_fields(encoder, mode);
1733
1734         err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1735         if (err < 0) {
1736                 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1737                 return;
1738         }
1739
1740         err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1741         if (err < 0) {
1742                 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1743                 return;
1744         }
1745
1746         dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1747
1748         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset);
1749         /* enable AVI info frames */
1750         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1);
1751         /* required for audio info values to be updated */
1752         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1);
1753         WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp);
1754
1755         tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset);
1756         tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2);
1757         WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp);
1758
1759         tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset);
1760         /* send audio packets */
1761         tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1);
1762         WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp);
1763
1764         WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF);
1765         WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF);
1766         WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001);
1767         WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001);
1768
1769         /* enable audio after to setting up hw */
1770         dce_v11_0_audio_enable(adev, dig->afmt->pin, true);
1771 }
1772
1773 static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1774 {
1775         struct drm_device *dev = encoder->dev;
1776         struct amdgpu_device *adev = dev->dev_private;
1777         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1778         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1779
1780         if (!dig || !dig->afmt)
1781                 return;
1782
1783         /* Silent, r600_hdmi_enable will raise WARN for us */
1784         if (enable && dig->afmt->enabled)
1785                 return;
1786         if (!enable && !dig->afmt->enabled)
1787                 return;
1788
1789         if (!enable && dig->afmt->pin) {
1790                 dce_v11_0_audio_enable(adev, dig->afmt->pin, false);
1791                 dig->afmt->pin = NULL;
1792         }
1793
1794         dig->afmt->enabled = enable;
1795
1796         DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1797                   enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1798 }
1799
1800 static int dce_v11_0_afmt_init(struct amdgpu_device *adev)
1801 {
1802         int i;
1803
1804         for (i = 0; i < adev->mode_info.num_dig; i++)
1805                 adev->mode_info.afmt[i] = NULL;
1806
1807         /* DCE11 has audio blocks tied to DIG encoders */
1808         for (i = 0; i < adev->mode_info.num_dig; i++) {
1809                 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1810                 if (adev->mode_info.afmt[i]) {
1811                         adev->mode_info.afmt[i]->offset = dig_offsets[i];
1812                         adev->mode_info.afmt[i]->id = i;
1813                 } else {
1814                         int j;
1815                         for (j = 0; j < i; j++) {
1816                                 kfree(adev->mode_info.afmt[j]);
1817                                 adev->mode_info.afmt[j] = NULL;
1818                         }
1819                         return -ENOMEM;
1820                 }
1821         }
1822         return 0;
1823 }
1824
1825 static void dce_v11_0_afmt_fini(struct amdgpu_device *adev)
1826 {
1827         int i;
1828
1829         for (i = 0; i < adev->mode_info.num_dig; i++) {
1830                 kfree(adev->mode_info.afmt[i]);
1831                 adev->mode_info.afmt[i] = NULL;
1832         }
1833 }
1834
1835 static const u32 vga_control_regs[6] =
1836 {
1837         mmD1VGA_CONTROL,
1838         mmD2VGA_CONTROL,
1839         mmD3VGA_CONTROL,
1840         mmD4VGA_CONTROL,
1841         mmD5VGA_CONTROL,
1842         mmD6VGA_CONTROL,
1843 };
1844
1845 static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable)
1846 {
1847         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1848         struct drm_device *dev = crtc->dev;
1849         struct amdgpu_device *adev = dev->dev_private;
1850         u32 vga_control;
1851
1852         vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1853         if (enable)
1854                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1855         else
1856                 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1857 }
1858
1859 static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable)
1860 {
1861         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1862         struct drm_device *dev = crtc->dev;
1863         struct amdgpu_device *adev = dev->dev_private;
1864
1865         if (enable)
1866                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1867         else
1868                 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1869 }
1870
1871 static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
1872                                      struct drm_framebuffer *fb,
1873                                      int x, int y, int atomic)
1874 {
1875         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1876         struct drm_device *dev = crtc->dev;
1877         struct amdgpu_device *adev = dev->dev_private;
1878         struct drm_framebuffer *target_fb;
1879         struct drm_gem_object *obj;
1880         struct amdgpu_bo *abo;
1881         uint64_t fb_location, tiling_flags;
1882         uint32_t fb_format, fb_pitch_pixels;
1883         u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE);
1884         u32 pipe_config;
1885         u32 tmp, viewport_w, viewport_h;
1886         int r;
1887         bool bypass_lut = false;
1888         struct drm_format_name_buf format_name;
1889
1890         /* no fb bound */
1891         if (!atomic && !crtc->primary->fb) {
1892                 DRM_DEBUG_KMS("No FB bound\n");
1893                 return 0;
1894         }
1895
1896         if (atomic)
1897                 target_fb = fb;
1898         else
1899                 target_fb = crtc->primary->fb;
1900
1901         /* If atomic, assume fb object is pinned & idle & fenced and
1902          * just update base pointers
1903          */
1904         obj = target_fb->obj[0];
1905         abo = gem_to_amdgpu_bo(obj);
1906         r = amdgpu_bo_reserve(abo, false);
1907         if (unlikely(r != 0))
1908                 return r;
1909
1910         if (!atomic) {
1911                 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1912                 if (unlikely(r != 0)) {
1913                         amdgpu_bo_unreserve(abo);
1914                         return -EINVAL;
1915                 }
1916         }
1917         fb_location = amdgpu_bo_gpu_offset(abo);
1918
1919         amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1920         amdgpu_bo_unreserve(abo);
1921
1922         pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1923
1924         switch (target_fb->format->format) {
1925         case DRM_FORMAT_C8:
1926                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0);
1927                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1928                 break;
1929         case DRM_FORMAT_XRGB4444:
1930         case DRM_FORMAT_ARGB4444:
1931                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1932                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2);
1933 #ifdef __BIG_ENDIAN
1934                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1935                                         ENDIAN_8IN16);
1936 #endif
1937                 break;
1938         case DRM_FORMAT_XRGB1555:
1939         case DRM_FORMAT_ARGB1555:
1940                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1941                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1942 #ifdef __BIG_ENDIAN
1943                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1944                                         ENDIAN_8IN16);
1945 #endif
1946                 break;
1947         case DRM_FORMAT_BGRX5551:
1948         case DRM_FORMAT_BGRA5551:
1949                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1950                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5);
1951 #ifdef __BIG_ENDIAN
1952                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1953                                         ENDIAN_8IN16);
1954 #endif
1955                 break;
1956         case DRM_FORMAT_RGB565:
1957                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1);
1958                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1959 #ifdef __BIG_ENDIAN
1960                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1961                                         ENDIAN_8IN16);
1962 #endif
1963                 break;
1964         case DRM_FORMAT_XRGB8888:
1965         case DRM_FORMAT_ARGB8888:
1966                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1967                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1968 #ifdef __BIG_ENDIAN
1969                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1970                                         ENDIAN_8IN32);
1971 #endif
1972                 break;
1973         case DRM_FORMAT_XRGB2101010:
1974         case DRM_FORMAT_ARGB2101010:
1975                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1976                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1);
1977 #ifdef __BIG_ENDIAN
1978                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1979                                         ENDIAN_8IN32);
1980 #endif
1981                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1982                 bypass_lut = true;
1983                 break;
1984         case DRM_FORMAT_BGRX1010102:
1985         case DRM_FORMAT_BGRA1010102:
1986                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1987                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4);
1988 #ifdef __BIG_ENDIAN
1989                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
1990                                         ENDIAN_8IN32);
1991 #endif
1992                 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1993                 bypass_lut = true;
1994                 break;
1995         case DRM_FORMAT_XBGR8888:
1996         case DRM_FORMAT_ABGR8888:
1997                 fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2);
1998                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0);
1999                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2);
2000                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2);
2001 #ifdef __BIG_ENDIAN
2002                 fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP,
2003                                         ENDIAN_8IN32);
2004 #endif
2005                 break;
2006         default:
2007                 DRM_ERROR("Unsupported screen format %s\n",
2008                           drm_get_format_name(target_fb->format->format, &format_name));
2009                 return -EINVAL;
2010         }
2011
2012         if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
2013                 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
2014
2015                 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2016                 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2017                 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2018                 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
2019                 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2020
2021                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks);
2022                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2023                                           ARRAY_2D_TILED_THIN1);
2024                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT,
2025                                           tile_split);
2026                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw);
2027                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh);
2028                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT,
2029                                           mtaspect);
2030                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE,
2031                                           ADDR_SURF_MICRO_TILING_DISPLAY);
2032         } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
2033                 fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE,
2034                                           ARRAY_1D_TILED_THIN1);
2035         }
2036
2037         fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG,
2038                                   pipe_config);
2039
2040         dce_v11_0_vga_enable(crtc, false);
2041
2042         /* Make sure surface address is updated at vertical blank rather than
2043          * horizontal blank
2044          */
2045         tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
2046         tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
2047                             GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0);
2048         WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2049
2050         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2051                upper_32_bits(fb_location));
2052         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2053                upper_32_bits(fb_location));
2054         WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2055                (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
2056         WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2057                (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
2058         WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
2059         WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
2060
2061         /*
2062          * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
2063          * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
2064          * retain the full precision throughout the pipeline.
2065          */
2066         tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset);
2067         if (bypass_lut)
2068                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1);
2069         else
2070                 tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0);
2071         WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp);
2072
2073         if (bypass_lut)
2074                 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
2075
2076         WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
2077         WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
2078         WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
2079         WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
2080         WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
2081         WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
2082
2083         fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
2084         WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
2085
2086         dce_v11_0_grph_enable(crtc, true);
2087
2088         WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
2089                target_fb->height);
2090
2091         x &= ~3;
2092         y &= ~1;
2093         WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
2094                (x << 16) | y);
2095         viewport_w = crtc->mode.hdisplay;
2096         viewport_h = (crtc->mode.vdisplay + 1) & ~1;
2097         WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
2098                (viewport_w << 16) | viewport_h);
2099
2100         /* set pageflip to happen anywhere in vblank interval */
2101         WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
2102
2103         if (!atomic && fb && fb != crtc->primary->fb) {
2104                 abo = gem_to_amdgpu_bo(fb->obj[0]);
2105                 r = amdgpu_bo_reserve(abo, true);
2106                 if (unlikely(r != 0))
2107                         return r;
2108                 amdgpu_bo_unpin(abo);
2109                 amdgpu_bo_unreserve(abo);
2110         }
2111
2112         /* Bytes per pixel may have changed */
2113         dce_v11_0_bandwidth_update(adev);
2114
2115         return 0;
2116 }
2117
2118 static void dce_v11_0_set_interleave(struct drm_crtc *crtc,
2119                                      struct drm_display_mode *mode)
2120 {
2121         struct drm_device *dev = crtc->dev;
2122         struct amdgpu_device *adev = dev->dev_private;
2123         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2124         u32 tmp;
2125
2126         tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset);
2127         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2128                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1);
2129         else
2130                 tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0);
2131         WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp);
2132 }
2133
2134 static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc)
2135 {
2136         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2137         struct drm_device *dev = crtc->dev;
2138         struct amdgpu_device *adev = dev->dev_private;
2139         u16 *r, *g, *b;
2140         int i;
2141         u32 tmp;
2142
2143         DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2144
2145         tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2146         tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0);
2147         WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2148
2149         tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset);
2150         tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1);
2151         WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2152
2153         tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2154         tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0);
2155         WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2156
2157         WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2158
2159         WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2160         WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2161         WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2162
2163         WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2164         WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2165         WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2166
2167         WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2168         WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2169
2170         WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2171         r = crtc->gamma_store;
2172         g = r + crtc->gamma_size;
2173         b = g + crtc->gamma_size;
2174         for (i = 0; i < 256; i++) {
2175                 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2176                        ((*r++ & 0xffc0) << 14) |
2177                        ((*g++ & 0xffc0) << 4) |
2178                        (*b++ >> 6));
2179         }
2180
2181         tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2182         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0);
2183         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0);
2184         tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0);
2185         WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2186
2187         tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset);
2188         tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0);
2189         WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2190
2191         tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset);
2192         tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0);
2193         WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2194
2195         tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset);
2196         tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0);
2197         WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2198
2199         /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2200         WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0);
2201         /* XXX this only needs to be programmed once per crtc at startup,
2202          * not sure where the best place for it is
2203          */
2204         tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset);
2205         tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1);
2206         WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2207 }
2208
2209 static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder)
2210 {
2211         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2212         struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2213
2214         switch (amdgpu_encoder->encoder_id) {
2215         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2216                 if (dig->linkb)
2217                         return 1;
2218                 else
2219                         return 0;
2220                 break;
2221         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2222                 if (dig->linkb)
2223                         return 3;
2224                 else
2225                         return 2;
2226                 break;
2227         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2228                 if (dig->linkb)
2229                         return 5;
2230                 else
2231                         return 4;
2232                 break;
2233         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2234                 return 6;
2235                 break;
2236         default:
2237                 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2238                 return 0;
2239         }
2240 }
2241
2242 /**
2243  * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc.
2244  *
2245  * @crtc: drm crtc
2246  *
2247  * Returns the PPLL (Pixel PLL) to be used by the crtc.  For DP monitors
2248  * a single PPLL can be used for all DP crtcs/encoders.  For non-DP
2249  * monitors a dedicated PPLL must be used.  If a particular board has
2250  * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2251  * as there is no need to program the PLL itself.  If we are not able to
2252  * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2253  * avoid messing up an existing monitor.
2254  *
2255  * Asic specific PLL information
2256  *
2257  * DCE 10.x
2258  * Tonga
2259  * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2260  * CI
2261  * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2262  *
2263  */
2264 static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
2265 {
2266         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2267         struct drm_device *dev = crtc->dev;
2268         struct amdgpu_device *adev = dev->dev_private;
2269         u32 pll_in_use;
2270         int pll;
2271
2272         if ((adev->asic_type == CHIP_POLARIS10) ||
2273             (adev->asic_type == CHIP_POLARIS11) ||
2274             (adev->asic_type == CHIP_POLARIS12) ||
2275             (adev->asic_type == CHIP_VEGAM)) {
2276                 struct amdgpu_encoder *amdgpu_encoder =
2277                         to_amdgpu_encoder(amdgpu_crtc->encoder);
2278                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2279
2280                 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2281                         return ATOM_DP_DTO;
2282
2283                 switch (amdgpu_encoder->encoder_id) {
2284                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2285                         if (dig->linkb)
2286                                 return ATOM_COMBOPHY_PLL1;
2287                         else
2288                                 return ATOM_COMBOPHY_PLL0;
2289                         break;
2290                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2291                         if (dig->linkb)
2292                                 return ATOM_COMBOPHY_PLL3;
2293                         else
2294                                 return ATOM_COMBOPHY_PLL2;
2295                         break;
2296                 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2297                         if (dig->linkb)
2298                                 return ATOM_COMBOPHY_PLL5;
2299                         else
2300                                 return ATOM_COMBOPHY_PLL4;
2301                         break;
2302                 default:
2303                         DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2304                         return ATOM_PPLL_INVALID;
2305                 }
2306         }
2307
2308         if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2309                 if (adev->clock.dp_extclk)
2310                         /* skip PPLL programming if using ext clock */
2311                         return ATOM_PPLL_INVALID;
2312                 else {
2313                         /* use the same PPLL for all DP monitors */
2314                         pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2315                         if (pll != ATOM_PPLL_INVALID)
2316                                 return pll;
2317                 }
2318         } else {
2319                 /* use the same PPLL for all monitors with the same clock */
2320                 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2321                 if (pll != ATOM_PPLL_INVALID)
2322                         return pll;
2323         }
2324
2325         /* XXX need to determine what plls are available on each DCE11 part */
2326         pll_in_use = amdgpu_pll_get_use_mask(crtc);
2327         if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
2328                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2329                         return ATOM_PPLL1;
2330                 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2331                         return ATOM_PPLL0;
2332                 DRM_ERROR("unable to allocate a PPLL\n");
2333                 return ATOM_PPLL_INVALID;
2334         } else {
2335                 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2336                         return ATOM_PPLL2;
2337                 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2338                         return ATOM_PPLL1;
2339                 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2340                         return ATOM_PPLL0;
2341                 DRM_ERROR("unable to allocate a PPLL\n");
2342                 return ATOM_PPLL_INVALID;
2343         }
2344         return ATOM_PPLL_INVALID;
2345 }
2346
2347 static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2348 {
2349         struct amdgpu_device *adev = crtc->dev->dev_private;
2350         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2351         uint32_t cur_lock;
2352
2353         cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2354         if (lock)
2355                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1);
2356         else
2357                 cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0);
2358         WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2359 }
2360
2361 static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
2362 {
2363         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2364         struct amdgpu_device *adev = crtc->dev->dev_private;
2365         u32 tmp;
2366
2367         tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2368         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
2369         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2370 }
2371
2372 static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
2373 {
2374         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2375         struct amdgpu_device *adev = crtc->dev->dev_private;
2376         u32 tmp;
2377
2378         WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2379                upper_32_bits(amdgpu_crtc->cursor_addr));
2380         WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2381                lower_32_bits(amdgpu_crtc->cursor_addr));
2382
2383         tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
2384         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
2385         tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
2386         WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
2387 }
2388
2389 static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
2390                                         int x, int y)
2391 {
2392         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2393         struct amdgpu_device *adev = crtc->dev->dev_private;
2394         int xorigin = 0, yorigin = 0;
2395
2396         amdgpu_crtc->cursor_x = x;
2397         amdgpu_crtc->cursor_y = y;
2398
2399         /* avivo cursor are offset into the total surface */
2400         x += crtc->x;
2401         y += crtc->y;
2402         DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2403
2404         if (x < 0) {
2405                 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2406                 x = 0;
2407         }
2408         if (y < 0) {
2409                 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2410                 y = 0;
2411         }
2412
2413         WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2414         WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2415         WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2416                ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2417
2418         return 0;
2419 }
2420
2421 static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc,
2422                                       int x, int y)
2423 {
2424         int ret;
2425
2426         dce_v11_0_lock_cursor(crtc, true);
2427         ret = dce_v11_0_cursor_move_locked(crtc, x, y);
2428         dce_v11_0_lock_cursor(crtc, false);
2429
2430         return ret;
2431 }
2432
2433 static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
2434                                       struct drm_file *file_priv,
2435                                       uint32_t handle,
2436                                       uint32_t width,
2437                                       uint32_t height,
2438                                       int32_t hot_x,
2439                                       int32_t hot_y)
2440 {
2441         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2442         struct drm_gem_object *obj;
2443         struct amdgpu_bo *aobj;
2444         int ret;
2445
2446         if (!handle) {
2447                 /* turn off cursor */
2448                 dce_v11_0_hide_cursor(crtc);
2449                 obj = NULL;
2450                 goto unpin;
2451         }
2452
2453         if ((width > amdgpu_crtc->max_cursor_width) ||
2454             (height > amdgpu_crtc->max_cursor_height)) {
2455                 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2456                 return -EINVAL;
2457         }
2458
2459         obj = drm_gem_object_lookup(file_priv, handle);
2460         if (!obj) {
2461                 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2462                 return -ENOENT;
2463         }
2464
2465         aobj = gem_to_amdgpu_bo(obj);
2466         ret = amdgpu_bo_reserve(aobj, false);
2467         if (ret != 0) {
2468                 drm_gem_object_put_unlocked(obj);
2469                 return ret;
2470         }
2471
2472         ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2473         amdgpu_bo_unreserve(aobj);
2474         if (ret) {
2475                 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2476                 drm_gem_object_put_unlocked(obj);
2477                 return ret;
2478         }
2479         amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2480
2481         dce_v11_0_lock_cursor(crtc, true);
2482
2483         if (width != amdgpu_crtc->cursor_width ||
2484             height != amdgpu_crtc->cursor_height ||
2485             hot_x != amdgpu_crtc->cursor_hot_x ||
2486             hot_y != amdgpu_crtc->cursor_hot_y) {
2487                 int x, y;
2488
2489                 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2490                 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2491
2492                 dce_v11_0_cursor_move_locked(crtc, x, y);
2493
2494                 amdgpu_crtc->cursor_width = width;
2495                 amdgpu_crtc->cursor_height = height;
2496                 amdgpu_crtc->cursor_hot_x = hot_x;
2497                 amdgpu_crtc->cursor_hot_y = hot_y;
2498         }
2499
2500         dce_v11_0_show_cursor(crtc);
2501         dce_v11_0_lock_cursor(crtc, false);
2502
2503 unpin:
2504         if (amdgpu_crtc->cursor_bo) {
2505                 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2506                 ret = amdgpu_bo_reserve(aobj, true);
2507                 if (likely(ret == 0)) {
2508                         amdgpu_bo_unpin(aobj);
2509                         amdgpu_bo_unreserve(aobj);
2510                 }
2511                 drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
2512         }
2513
2514         amdgpu_crtc->cursor_bo = obj;
2515         return 0;
2516 }
2517
2518 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2519 {
2520         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2521
2522         if (amdgpu_crtc->cursor_bo) {
2523                 dce_v11_0_lock_cursor(crtc, true);
2524
2525                 dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2526                                              amdgpu_crtc->cursor_y);
2527
2528                 dce_v11_0_show_cursor(crtc);
2529
2530                 dce_v11_0_lock_cursor(crtc, false);
2531         }
2532 }
2533
2534 static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2535                                     u16 *blue, uint32_t size,
2536                                     struct drm_modeset_acquire_ctx *ctx)
2537 {
2538         dce_v11_0_crtc_load_lut(crtc);
2539
2540         return 0;
2541 }
2542
2543 static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
2544 {
2545         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2546
2547         drm_crtc_cleanup(crtc);
2548         kfree(amdgpu_crtc);
2549 }
2550
2551 static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
2552         .cursor_set2 = dce_v11_0_crtc_cursor_set2,
2553         .cursor_move = dce_v11_0_crtc_cursor_move,
2554         .gamma_set = dce_v11_0_crtc_gamma_set,
2555         .set_config = amdgpu_display_crtc_set_config,
2556         .destroy = dce_v11_0_crtc_destroy,
2557         .page_flip_target = amdgpu_display_crtc_page_flip_target,
2558 };
2559
2560 static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2561 {
2562         struct drm_device *dev = crtc->dev;
2563         struct amdgpu_device *adev = dev->dev_private;
2564         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2565         unsigned type;
2566
2567         switch (mode) {
2568         case DRM_MODE_DPMS_ON:
2569                 amdgpu_crtc->enabled = true;
2570                 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2571                 dce_v11_0_vga_enable(crtc, true);
2572                 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2573                 dce_v11_0_vga_enable(crtc, false);
2574                 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2575                 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2576                                                 amdgpu_crtc->crtc_id);
2577                 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2578                 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2579                 drm_crtc_vblank_on(crtc);
2580                 dce_v11_0_crtc_load_lut(crtc);
2581                 break;
2582         case DRM_MODE_DPMS_STANDBY:
2583         case DRM_MODE_DPMS_SUSPEND:
2584         case DRM_MODE_DPMS_OFF:
2585                 drm_crtc_vblank_off(crtc);
2586                 if (amdgpu_crtc->enabled) {
2587                         dce_v11_0_vga_enable(crtc, true);
2588                         amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2589                         dce_v11_0_vga_enable(crtc, false);
2590                 }
2591                 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2592                 amdgpu_crtc->enabled = false;
2593                 break;
2594         }
2595         /* adjust pm to dpms */
2596         amdgpu_pm_compute_clocks(adev);
2597 }
2598
2599 static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc)
2600 {
2601         /* disable crtc pair power gating before programming */
2602         amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2603         amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2604         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2605 }
2606
2607 static void dce_v11_0_crtc_commit(struct drm_crtc *crtc)
2608 {
2609         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2610         amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2611 }
2612
2613 static void dce_v11_0_crtc_disable(struct drm_crtc *crtc)
2614 {
2615         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2616         struct drm_device *dev = crtc->dev;
2617         struct amdgpu_device *adev = dev->dev_private;
2618         struct amdgpu_atom_ss ss;
2619         int i;
2620
2621         dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2622         if (crtc->primary->fb) {
2623                 int r;
2624                 struct amdgpu_bo *abo;
2625
2626                 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2627                 r = amdgpu_bo_reserve(abo, true);
2628                 if (unlikely(r))
2629                         DRM_ERROR("failed to reserve abo before unpin\n");
2630                 else {
2631                         amdgpu_bo_unpin(abo);
2632                         amdgpu_bo_unreserve(abo);
2633                 }
2634         }
2635         /* disable the GRPH */
2636         dce_v11_0_grph_enable(crtc, false);
2637
2638         amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2639
2640         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2641                 if (adev->mode_info.crtcs[i] &&
2642                     adev->mode_info.crtcs[i]->enabled &&
2643                     i != amdgpu_crtc->crtc_id &&
2644                     amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2645                         /* one other crtc is using this pll don't turn
2646                          * off the pll
2647                          */
2648                         goto done;
2649                 }
2650         }
2651
2652         switch (amdgpu_crtc->pll_id) {
2653         case ATOM_PPLL0:
2654         case ATOM_PPLL1:
2655         case ATOM_PPLL2:
2656                 /* disable the ppll */
2657                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2658                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2659                 break;
2660         case ATOM_COMBOPHY_PLL0:
2661         case ATOM_COMBOPHY_PLL1:
2662         case ATOM_COMBOPHY_PLL2:
2663         case ATOM_COMBOPHY_PLL3:
2664         case ATOM_COMBOPHY_PLL4:
2665         case ATOM_COMBOPHY_PLL5:
2666                 /* disable the ppll */
2667                 amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id,
2668                                                  0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2669                 break;
2670         default:
2671                 break;
2672         }
2673 done:
2674         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2675         amdgpu_crtc->adjusted_clock = 0;
2676         amdgpu_crtc->encoder = NULL;
2677         amdgpu_crtc->connector = NULL;
2678 }
2679
2680 static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
2681                                   struct drm_display_mode *mode,
2682                                   struct drm_display_mode *adjusted_mode,
2683                                   int x, int y, struct drm_framebuffer *old_fb)
2684 {
2685         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2686         struct drm_device *dev = crtc->dev;
2687         struct amdgpu_device *adev = dev->dev_private;
2688
2689         if (!amdgpu_crtc->adjusted_clock)
2690                 return -EINVAL;
2691
2692         if ((adev->asic_type == CHIP_POLARIS10) ||
2693             (adev->asic_type == CHIP_POLARIS11) ||
2694             (adev->asic_type == CHIP_POLARIS12) ||
2695             (adev->asic_type == CHIP_VEGAM)) {
2696                 struct amdgpu_encoder *amdgpu_encoder =
2697                         to_amdgpu_encoder(amdgpu_crtc->encoder);
2698                 int encoder_mode =
2699                         amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder);
2700
2701                 /* SetPixelClock calculates the plls and ss values now */
2702                 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id,
2703                                                  amdgpu_crtc->pll_id,
2704                                                  encoder_mode, amdgpu_encoder->encoder_id,
2705                                                  adjusted_mode->clock, 0, 0, 0, 0,
2706                                                  amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss);
2707         } else {
2708                 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2709         }
2710         amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2711         dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2712         amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2713         amdgpu_atombios_crtc_scaler_setup(crtc);
2714         dce_v11_0_cursor_reset(crtc);
2715         /* update the hw version fpr dpm */
2716         amdgpu_crtc->hw_mode = *adjusted_mode;
2717
2718         return 0;
2719 }
2720
2721 static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc,
2722                                      const struct drm_display_mode *mode,
2723                                      struct drm_display_mode *adjusted_mode)
2724 {
2725         struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2726         struct drm_device *dev = crtc->dev;
2727         struct drm_encoder *encoder;
2728
2729         /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2730         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2731                 if (encoder->crtc == crtc) {
2732                         amdgpu_crtc->encoder = encoder;
2733                         amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2734                         break;
2735                 }
2736         }
2737         if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2738                 amdgpu_crtc->encoder = NULL;
2739                 amdgpu_crtc->connector = NULL;
2740                 return false;
2741         }
2742         if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2743                 return false;
2744         if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2745                 return false;
2746         /* pick pll */
2747         amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc);
2748         /* if we can't get a PPLL for a non-DP encoder, fail */
2749         if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2750             !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2751                 return false;
2752
2753         return true;
2754 }
2755
2756 static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2757                                   struct drm_framebuffer *old_fb)
2758 {
2759         return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2760 }
2761
2762 static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2763                                          struct drm_framebuffer *fb,
2764                                          int x, int y, enum mode_set_atomic state)
2765 {
2766        return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1);
2767 }
2768
2769 static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
2770         .dpms = dce_v11_0_crtc_dpms,
2771         .mode_fixup = dce_v11_0_crtc_mode_fixup,
2772         .mode_set = dce_v11_0_crtc_mode_set,
2773         .mode_set_base = dce_v11_0_crtc_set_base,
2774         .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic,
2775         .prepare = dce_v11_0_crtc_prepare,
2776         .commit = dce_v11_0_crtc_commit,
2777         .disable = dce_v11_0_crtc_disable,
2778 };
2779
2780 static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
2781 {
2782         struct amdgpu_crtc *amdgpu_crtc;
2783
2784         amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2785                               (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2786         if (amdgpu_crtc == NULL)
2787                 return -ENOMEM;
2788
2789         drm_crtc_init(adev->ddev, &amdgpu_crtc->base, &dce_v11_0_crtc_funcs);
2790
2791         drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2792         amdgpu_crtc->crtc_id = index;
2793         adev->mode_info.crtcs[index] = amdgpu_crtc;
2794
2795         amdgpu_crtc->max_cursor_width = 128;
2796         amdgpu_crtc->max_cursor_height = 128;
2797         adev->ddev->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2798         adev->ddev->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2799
2800         switch (amdgpu_crtc->crtc_id) {
2801         case 0:
2802         default:
2803                 amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET;
2804                 break;
2805         case 1:
2806                 amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET;
2807                 break;
2808         case 2:
2809                 amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET;
2810                 break;
2811         case 3:
2812                 amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET;
2813                 break;
2814         case 4:
2815                 amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET;
2816                 break;
2817         case 5:
2818                 amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET;
2819                 break;
2820         }
2821
2822         amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2823         amdgpu_crtc->adjusted_clock = 0;
2824         amdgpu_crtc->encoder = NULL;
2825         amdgpu_crtc->connector = NULL;
2826         drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs);
2827
2828         return 0;
2829 }
2830
2831 static int dce_v11_0_early_init(void *handle)
2832 {
2833         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2834
2835         adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg;
2836         adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg;
2837
2838         dce_v11_0_set_display_funcs(adev);
2839
2840         adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev);
2841
2842         switch (adev->asic_type) {
2843         case CHIP_CARRIZO:
2844                 adev->mode_info.num_hpd = 6;
2845                 adev->mode_info.num_dig = 9;
2846                 break;
2847         case CHIP_STONEY:
2848                 adev->mode_info.num_hpd = 6;
2849                 adev->mode_info.num_dig = 9;
2850                 break;
2851         case CHIP_POLARIS10:
2852         case CHIP_VEGAM:
2853                 adev->mode_info.num_hpd = 6;
2854                 adev->mode_info.num_dig = 6;
2855                 break;
2856         case CHIP_POLARIS11:
2857         case CHIP_POLARIS12:
2858                 adev->mode_info.num_hpd = 5;
2859                 adev->mode_info.num_dig = 5;
2860                 break;
2861         default:
2862                 /* FIXME: not supported yet */
2863                 return -EINVAL;
2864         }
2865
2866         dce_v11_0_set_irq_funcs(adev);
2867
2868         return 0;
2869 }
2870
2871 static int dce_v11_0_sw_init(void *handle)
2872 {
2873         int r, i;
2874         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2875
2876         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2877                 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2878                 if (r)
2879                         return r;
2880         }
2881
2882         for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) {
2883                 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2884                 if (r)
2885                         return r;
2886         }
2887
2888         /* HPD hotplug */
2889         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
2890         if (r)
2891                 return r;
2892
2893         adev->ddev->mode_config.funcs = &amdgpu_mode_funcs;
2894
2895         adev->ddev->mode_config.async_page_flip = true;
2896
2897         adev->ddev->mode_config.max_width = 16384;
2898         adev->ddev->mode_config.max_height = 16384;
2899
2900         adev->ddev->mode_config.preferred_depth = 24;
2901         adev->ddev->mode_config.prefer_shadow = 1;
2902
2903         adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
2904
2905         r = amdgpu_display_modeset_create_props(adev);
2906         if (r)
2907                 return r;
2908
2909         adev->ddev->mode_config.max_width = 16384;
2910         adev->ddev->mode_config.max_height = 16384;
2911
2912
2913         /* allocate crtcs */
2914         for (i = 0; i < adev->mode_info.num_crtc; i++) {
2915                 r = dce_v11_0_crtc_init(adev, i);
2916                 if (r)
2917                         return r;
2918         }
2919
2920         if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2921                 amdgpu_display_print_display_setup(adev->ddev);
2922         else
2923                 return -EINVAL;
2924
2925         /* setup afmt */
2926         r = dce_v11_0_afmt_init(adev);
2927         if (r)
2928                 return r;
2929
2930         r = dce_v11_0_audio_init(adev);
2931         if (r)
2932                 return r;
2933
2934         drm_kms_helper_poll_init(adev->ddev);
2935
2936         adev->mode_info.mode_config_initialized = true;
2937         return 0;
2938 }
2939
2940 static int dce_v11_0_sw_fini(void *handle)
2941 {
2942         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2943
2944         kfree(adev->mode_info.bios_hardcoded_edid);
2945
2946         drm_kms_helper_poll_fini(adev->ddev);
2947
2948         dce_v11_0_audio_fini(adev);
2949
2950         dce_v11_0_afmt_fini(adev);
2951
2952         drm_mode_config_cleanup(adev->ddev);
2953         adev->mode_info.mode_config_initialized = false;
2954
2955         return 0;
2956 }
2957
2958 static int dce_v11_0_hw_init(void *handle)
2959 {
2960         int i;
2961         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2962
2963         dce_v11_0_init_golden_registers(adev);
2964
2965         /* disable vga render */
2966         dce_v11_0_set_vga_render_state(adev, false);
2967         /* init dig PHYs, disp eng pll */
2968         amdgpu_atombios_crtc_powergate_init(adev);
2969         amdgpu_atombios_encoder_init_dig(adev);
2970         if ((adev->asic_type == CHIP_POLARIS10) ||
2971             (adev->asic_type == CHIP_POLARIS11) ||
2972             (adev->asic_type == CHIP_POLARIS12) ||
2973             (adev->asic_type == CHIP_VEGAM)) {
2974                 amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
2975                                                    DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
2976                 amdgpu_atombios_crtc_set_dce_clock(adev, 0,
2977                                                    DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS);
2978         } else {
2979                 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2980         }
2981
2982         /* initialize hpd */
2983         dce_v11_0_hpd_init(adev);
2984
2985         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2986                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2987         }
2988
2989         dce_v11_0_pageflip_interrupt_init(adev);
2990
2991         return 0;
2992 }
2993
2994 static int dce_v11_0_hw_fini(void *handle)
2995 {
2996         int i;
2997         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2998
2999         dce_v11_0_hpd_fini(adev);
3000
3001         for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
3002                 dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
3003         }
3004
3005         dce_v11_0_pageflip_interrupt_fini(adev);
3006
3007         return 0;
3008 }
3009
3010 static int dce_v11_0_suspend(void *handle)
3011 {
3012         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3013
3014         adev->mode_info.bl_level =
3015                 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
3016
3017         return dce_v11_0_hw_fini(handle);
3018 }
3019
3020 static int dce_v11_0_resume(void *handle)
3021 {
3022         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3023         int ret;
3024
3025         amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
3026                                                            adev->mode_info.bl_level);
3027
3028         ret = dce_v11_0_hw_init(handle);
3029
3030         /* turn on the BL */
3031         if (adev->mode_info.bl_encoder) {
3032                 u8 bl_level = amdgpu_display_backlight_get_level(adev,
3033                                                                   adev->mode_info.bl_encoder);
3034                 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
3035                                                     bl_level);
3036         }
3037
3038         return ret;
3039 }
3040
3041 static bool dce_v11_0_is_idle(void *handle)
3042 {
3043         return true;
3044 }
3045
3046 static int dce_v11_0_wait_for_idle(void *handle)
3047 {
3048         return 0;
3049 }
3050
3051 static int dce_v11_0_soft_reset(void *handle)
3052 {
3053         u32 srbm_soft_reset = 0, tmp;
3054         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3055
3056         if (dce_v11_0_is_display_hung(adev))
3057                 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
3058
3059         if (srbm_soft_reset) {
3060                 tmp = RREG32(mmSRBM_SOFT_RESET);
3061                 tmp |= srbm_soft_reset;
3062                 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
3063                 WREG32(mmSRBM_SOFT_RESET, tmp);
3064                 tmp = RREG32(mmSRBM_SOFT_RESET);
3065
3066                 udelay(50);
3067
3068                 tmp &= ~srbm_soft_reset;
3069                 WREG32(mmSRBM_SOFT_RESET, tmp);
3070                 tmp = RREG32(mmSRBM_SOFT_RESET);
3071
3072                 /* Wait a little for things to settle down */
3073                 udelay(50);
3074         }
3075         return 0;
3076 }
3077
3078 static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
3079                                                      int crtc,
3080                                                      enum amdgpu_interrupt_state state)
3081 {
3082         u32 lb_interrupt_mask;
3083
3084         if (crtc >= adev->mode_info.num_crtc) {
3085                 DRM_DEBUG("invalid crtc %d\n", crtc);
3086                 return;
3087         }
3088
3089         switch (state) {
3090         case AMDGPU_IRQ_STATE_DISABLE:
3091                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3092                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3093                                                   VBLANK_INTERRUPT_MASK, 0);
3094                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3095                 break;
3096         case AMDGPU_IRQ_STATE_ENABLE:
3097                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3098                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3099                                                   VBLANK_INTERRUPT_MASK, 1);
3100                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3101                 break;
3102         default:
3103                 break;
3104         }
3105 }
3106
3107 static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
3108                                                     int crtc,
3109                                                     enum amdgpu_interrupt_state state)
3110 {
3111         u32 lb_interrupt_mask;
3112
3113         if (crtc >= adev->mode_info.num_crtc) {
3114                 DRM_DEBUG("invalid crtc %d\n", crtc);
3115                 return;
3116         }
3117
3118         switch (state) {
3119         case AMDGPU_IRQ_STATE_DISABLE:
3120                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3121                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3122                                                   VLINE_INTERRUPT_MASK, 0);
3123                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3124                 break;
3125         case AMDGPU_IRQ_STATE_ENABLE:
3126                 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]);
3127                 lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK,
3128                                                   VLINE_INTERRUPT_MASK, 1);
3129                 WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask);
3130                 break;
3131         default:
3132                 break;
3133         }
3134 }
3135
3136 static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev,
3137                                         struct amdgpu_irq_src *source,
3138                                         unsigned hpd,
3139                                         enum amdgpu_interrupt_state state)
3140 {
3141         u32 tmp;
3142
3143         if (hpd >= adev->mode_info.num_hpd) {
3144                 DRM_DEBUG("invalid hdp %d\n", hpd);
3145                 return 0;
3146         }
3147
3148         switch (state) {
3149         case AMDGPU_IRQ_STATE_DISABLE:
3150                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3151                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0);
3152                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3153                 break;
3154         case AMDGPU_IRQ_STATE_ENABLE:
3155                 tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3156                 tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1);
3157                 WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3158                 break;
3159         default:
3160                 break;
3161         }
3162
3163         return 0;
3164 }
3165
3166 static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev,
3167                                         struct amdgpu_irq_src *source,
3168                                         unsigned type,
3169                                         enum amdgpu_interrupt_state state)
3170 {
3171         switch (type) {
3172         case AMDGPU_CRTC_IRQ_VBLANK1:
3173                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3174                 break;
3175         case AMDGPU_CRTC_IRQ_VBLANK2:
3176                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3177                 break;
3178         case AMDGPU_CRTC_IRQ_VBLANK3:
3179                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3180                 break;
3181         case AMDGPU_CRTC_IRQ_VBLANK4:
3182                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3183                 break;
3184         case AMDGPU_CRTC_IRQ_VBLANK5:
3185                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3186                 break;
3187         case AMDGPU_CRTC_IRQ_VBLANK6:
3188                 dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3189                 break;
3190         case AMDGPU_CRTC_IRQ_VLINE1:
3191                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state);
3192                 break;
3193         case AMDGPU_CRTC_IRQ_VLINE2:
3194                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state);
3195                 break;
3196         case AMDGPU_CRTC_IRQ_VLINE3:
3197                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state);
3198                 break;
3199         case AMDGPU_CRTC_IRQ_VLINE4:
3200                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state);
3201                 break;
3202         case AMDGPU_CRTC_IRQ_VLINE5:
3203                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state);
3204                 break;
3205          case AMDGPU_CRTC_IRQ_VLINE6:
3206                 dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state);
3207                 break;
3208         default:
3209                 break;
3210         }
3211         return 0;
3212 }
3213
3214 static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev,
3215                                             struct amdgpu_irq_src *src,
3216                                             unsigned type,
3217                                             enum amdgpu_interrupt_state state)
3218 {
3219         u32 reg;
3220
3221         if (type >= adev->mode_info.num_crtc) {
3222                 DRM_ERROR("invalid pageflip crtc %d\n", type);
3223                 return -EINVAL;
3224         }
3225
3226         reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3227         if (state == AMDGPU_IRQ_STATE_DISABLE)
3228                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3229                        reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3230         else
3231                 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3232                        reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3233
3234         return 0;
3235 }
3236
3237 static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3238                                   struct amdgpu_irq_src *source,
3239                                   struct amdgpu_iv_entry *entry)
3240 {
3241         unsigned long flags;
3242         unsigned crtc_id;
3243         struct amdgpu_crtc *amdgpu_crtc;
3244         struct amdgpu_flip_work *works;
3245
3246         crtc_id = (entry->src_id - 8) >> 1;
3247         amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3248
3249         if (crtc_id >= adev->mode_info.num_crtc) {
3250                 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3251                 return -EINVAL;
3252         }
3253
3254         if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3255             GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3256                 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3257                        GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3258
3259         /* IRQ could occur when in initial stage */
3260         if(amdgpu_crtc == NULL)
3261                 return 0;
3262
3263         spin_lock_irqsave(&adev->ddev->event_lock, flags);
3264         works = amdgpu_crtc->pflip_works;
3265         if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
3266                 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3267                                                  "AMDGPU_FLIP_SUBMITTED(%d)\n",
3268                                                  amdgpu_crtc->pflip_status,
3269                                                  AMDGPU_FLIP_SUBMITTED);
3270                 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3271                 return 0;
3272         }
3273
3274         /* page flip completed. clean up */
3275         amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3276         amdgpu_crtc->pflip_works = NULL;
3277
3278         /* wakeup usersapce */
3279         if(works->event)
3280                 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3281
3282         spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3283
3284         drm_crtc_vblank_put(&amdgpu_crtc->base);
3285         schedule_work(&works->unpin_work);
3286
3287         return 0;
3288 }
3289
3290 static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev,
3291                                   int hpd)
3292 {
3293         u32 tmp;
3294
3295         if (hpd >= adev->mode_info.num_hpd) {
3296                 DRM_DEBUG("invalid hdp %d\n", hpd);
3297                 return;
3298         }
3299
3300         tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]);
3301         tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1);
3302         WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp);
3303 }
3304
3305 static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev,
3306                                           int crtc)
3307 {
3308         u32 tmp;
3309
3310         if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3311                 DRM_DEBUG("invalid crtc %d\n", crtc);
3312                 return;
3313         }
3314
3315         tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]);
3316         tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1);
3317         WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp);
3318 }
3319
3320 static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev,
3321                                          int crtc)
3322 {
3323         u32 tmp;
3324
3325         if (crtc < 0 || crtc >= adev->mode_info.num_crtc) {
3326                 DRM_DEBUG("invalid crtc %d\n", crtc);
3327                 return;
3328         }
3329
3330         tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]);
3331         tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1);
3332         WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp);
3333 }
3334
3335 static int dce_v11_0_crtc_irq(struct amdgpu_device *adev,
3336                                 struct amdgpu_irq_src *source,
3337                                 struct amdgpu_iv_entry *entry)
3338 {
3339         unsigned crtc = entry->src_id - 1;
3340         uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3341         unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3342                                                                     crtc);
3343
3344         switch (entry->src_data[0]) {
3345         case 0: /* vblank */
3346                 if (disp_int & interrupt_status_offsets[crtc].vblank)
3347                         dce_v11_0_crtc_vblank_int_ack(adev, crtc);
3348                 else
3349                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3350
3351                 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3352                         drm_handle_vblank(adev->ddev, crtc);
3353                 }
3354                 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3355
3356                 break;
3357         case 1: /* vline */
3358                 if (disp_int & interrupt_status_offsets[crtc].vline)
3359                         dce_v11_0_crtc_vline_int_ack(adev, crtc);
3360                 else
3361                         DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3362
3363                 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3364
3365                 break;
3366         default:
3367                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3368                 break;
3369         }
3370
3371         return 0;
3372 }
3373
3374 static int dce_v11_0_hpd_irq(struct amdgpu_device *adev,
3375                              struct amdgpu_irq_src *source,
3376                              struct amdgpu_iv_entry *entry)
3377 {
3378         uint32_t disp_int, mask;
3379         unsigned hpd;
3380
3381         if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3382                 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3383                 return 0;
3384         }
3385
3386         hpd = entry->src_data[0];
3387         disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3388         mask = interrupt_status_offsets[hpd].hpd;
3389
3390         if (disp_int & mask) {
3391                 dce_v11_0_hpd_int_ack(adev, hpd);
3392                 schedule_work(&adev->hotplug_work);
3393                 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3394         }
3395
3396         return 0;
3397 }
3398
3399 static int dce_v11_0_set_clockgating_state(void *handle,
3400                                           enum amd_clockgating_state state)
3401 {
3402         return 0;
3403 }
3404
3405 static int dce_v11_0_set_powergating_state(void *handle,
3406                                           enum amd_powergating_state state)
3407 {
3408         return 0;
3409 }
3410
3411 static const struct amd_ip_funcs dce_v11_0_ip_funcs = {
3412         .name = "dce_v11_0",
3413         .early_init = dce_v11_0_early_init,
3414         .late_init = NULL,
3415         .sw_init = dce_v11_0_sw_init,
3416         .sw_fini = dce_v11_0_sw_fini,
3417         .hw_init = dce_v11_0_hw_init,
3418         .hw_fini = dce_v11_0_hw_fini,
3419         .suspend = dce_v11_0_suspend,
3420         .resume = dce_v11_0_resume,
3421         .is_idle = dce_v11_0_is_idle,
3422         .wait_for_idle = dce_v11_0_wait_for_idle,
3423         .soft_reset = dce_v11_0_soft_reset,
3424         .set_clockgating_state = dce_v11_0_set_clockgating_state,
3425         .set_powergating_state = dce_v11_0_set_powergating_state,
3426 };
3427
3428 static void
3429 dce_v11_0_encoder_mode_set(struct drm_encoder *encoder,
3430                           struct drm_display_mode *mode,
3431                           struct drm_display_mode *adjusted_mode)
3432 {
3433         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3434
3435         amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3436
3437         /* need to call this here rather than in prepare() since we need some crtc info */
3438         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3439
3440         /* set scaler clears this on some chips */
3441         dce_v11_0_set_interleave(encoder->crtc, mode);
3442
3443         if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3444                 dce_v11_0_afmt_enable(encoder, true);
3445                 dce_v11_0_afmt_setmode(encoder, adjusted_mode);
3446         }
3447 }
3448
3449 static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder)
3450 {
3451         struct amdgpu_device *adev = encoder->dev->dev_private;
3452         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3453         struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3454
3455         if ((amdgpu_encoder->active_device &
3456              (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3457             (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3458              ENCODER_OBJECT_ID_NONE)) {
3459                 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3460                 if (dig) {
3461                         dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder);
3462                         if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3463                                 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3464                 }
3465         }
3466
3467         amdgpu_atombios_scratch_regs_lock(adev, true);
3468
3469         if (connector) {
3470                 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3471
3472                 /* select the clock/data port if it uses a router */
3473                 if (amdgpu_connector->router.cd_valid)
3474                         amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3475
3476                 /* turn eDP panel on for mode set */
3477                 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3478                         amdgpu_atombios_encoder_set_edp_panel_power(connector,
3479                                                              ATOM_TRANSMITTER_ACTION_POWER_ON);
3480         }
3481
3482         /* this is needed for the pll/ss setup to work correctly in some cases */
3483         amdgpu_atombios_encoder_set_crtc_source(encoder);
3484         /* set up the FMT blocks */
3485         dce_v11_0_program_fmt(encoder);
3486 }
3487
3488 static void dce_v11_0_encoder_commit(struct drm_encoder *encoder)
3489 {
3490         struct drm_device *dev = encoder->dev;
3491         struct amdgpu_device *adev = dev->dev_private;
3492
3493         /* need to call this here as we need the crtc set up */
3494         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3495         amdgpu_atombios_scratch_regs_lock(adev, false);
3496 }
3497
3498 static void dce_v11_0_encoder_disable(struct drm_encoder *encoder)
3499 {
3500         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3501         struct amdgpu_encoder_atom_dig *dig;
3502
3503         amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3504
3505         if (amdgpu_atombios_encoder_is_digital(encoder)) {
3506                 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3507                         dce_v11_0_afmt_enable(encoder, false);
3508                 dig = amdgpu_encoder->enc_priv;
3509                 dig->dig_encoder = -1;
3510         }
3511         amdgpu_encoder->active_device = 0;
3512 }
3513
3514 /* these are handled by the primary encoders */
3515 static void dce_v11_0_ext_prepare(struct drm_encoder *encoder)
3516 {
3517
3518 }
3519
3520 static void dce_v11_0_ext_commit(struct drm_encoder *encoder)
3521 {
3522
3523 }
3524
3525 static void
3526 dce_v11_0_ext_mode_set(struct drm_encoder *encoder,
3527                       struct drm_display_mode *mode,
3528                       struct drm_display_mode *adjusted_mode)
3529 {
3530
3531 }
3532
3533 static void dce_v11_0_ext_disable(struct drm_encoder *encoder)
3534 {
3535
3536 }
3537
3538 static void
3539 dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode)
3540 {
3541
3542 }
3543
3544 static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = {
3545         .dpms = dce_v11_0_ext_dpms,
3546         .prepare = dce_v11_0_ext_prepare,
3547         .mode_set = dce_v11_0_ext_mode_set,
3548         .commit = dce_v11_0_ext_commit,
3549         .disable = dce_v11_0_ext_disable,
3550         /* no detect for TMDS/LVDS yet */
3551 };
3552
3553 static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = {
3554         .dpms = amdgpu_atombios_encoder_dpms,
3555         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3556         .prepare = dce_v11_0_encoder_prepare,
3557         .mode_set = dce_v11_0_encoder_mode_set,
3558         .commit = dce_v11_0_encoder_commit,
3559         .disable = dce_v11_0_encoder_disable,
3560         .detect = amdgpu_atombios_encoder_dig_detect,
3561 };
3562
3563 static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = {
3564         .dpms = amdgpu_atombios_encoder_dpms,
3565         .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3566         .prepare = dce_v11_0_encoder_prepare,
3567         .mode_set = dce_v11_0_encoder_mode_set,
3568         .commit = dce_v11_0_encoder_commit,
3569         .detect = amdgpu_atombios_encoder_dac_detect,
3570 };
3571
3572 static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder)
3573 {
3574         struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3575         if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3576                 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3577         kfree(amdgpu_encoder->enc_priv);
3578         drm_encoder_cleanup(encoder);
3579         kfree(amdgpu_encoder);
3580 }
3581
3582 static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = {
3583         .destroy = dce_v11_0_encoder_destroy,
3584 };
3585
3586 static void dce_v11_0_encoder_add(struct amdgpu_device *adev,
3587                                  uint32_t encoder_enum,
3588                                  uint32_t supported_device,
3589                                  u16 caps)
3590 {
3591         struct drm_device *dev = adev->ddev;
3592         struct drm_encoder *encoder;
3593         struct amdgpu_encoder *amdgpu_encoder;
3594
3595         /* see if we already added it */
3596         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3597                 amdgpu_encoder = to_amdgpu_encoder(encoder);
3598                 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3599                         amdgpu_encoder->devices |= supported_device;
3600                         return;
3601                 }
3602
3603         }
3604
3605         /* add a new one */
3606         amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3607         if (!amdgpu_encoder)
3608                 return;
3609
3610         encoder = &amdgpu_encoder->base;
3611         switch (adev->mode_info.num_crtc) {
3612         case 1:
3613                 encoder->possible_crtcs = 0x1;
3614                 break;
3615         case 2:
3616         default:
3617                 encoder->possible_crtcs = 0x3;
3618                 break;
3619         case 3:
3620                 encoder->possible_crtcs = 0x7;
3621                 break;
3622         case 4:
3623                 encoder->possible_crtcs = 0xf;
3624                 break;
3625         case 5:
3626                 encoder->possible_crtcs = 0x1f;
3627                 break;
3628         case 6:
3629                 encoder->possible_crtcs = 0x3f;
3630                 break;
3631         }
3632
3633         amdgpu_encoder->enc_priv = NULL;
3634
3635         amdgpu_encoder->encoder_enum = encoder_enum;
3636         amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3637         amdgpu_encoder->devices = supported_device;
3638         amdgpu_encoder->rmx_type = RMX_OFF;
3639         amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3640         amdgpu_encoder->is_ext_encoder = false;
3641         amdgpu_encoder->caps = caps;
3642
3643         switch (amdgpu_encoder->encoder_id) {
3644         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3645         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3646                 drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3647                                  DRM_MODE_ENCODER_DAC, NULL);
3648                 drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs);
3649                 break;
3650         case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3651         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3652         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3653         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3654         case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3655                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3656                         amdgpu_encoder->rmx_type = RMX_FULL;
3657                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3658                                          DRM_MODE_ENCODER_LVDS, NULL);
3659                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3660                 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3661                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3662                                          DRM_MODE_ENCODER_DAC, NULL);
3663                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3664                 } else {
3665                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3666                                          DRM_MODE_ENCODER_TMDS, NULL);
3667                         amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3668                 }
3669                 drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs);
3670                 break;
3671         case ENCODER_OBJECT_ID_SI170B:
3672         case ENCODER_OBJECT_ID_CH7303:
3673         case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3674         case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3675         case ENCODER_OBJECT_ID_TITFP513:
3676         case ENCODER_OBJECT_ID_VT1623:
3677         case ENCODER_OBJECT_ID_HDMI_SI1930:
3678         case ENCODER_OBJECT_ID_TRAVIS:
3679         case ENCODER_OBJECT_ID_NUTMEG:
3680                 /* these are handled by the primary encoders */
3681                 amdgpu_encoder->is_ext_encoder = true;
3682                 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3683                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3684                                          DRM_MODE_ENCODER_LVDS, NULL);
3685                 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3686                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3687                                          DRM_MODE_ENCODER_DAC, NULL);
3688                 else
3689                         drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs,
3690                                          DRM_MODE_ENCODER_TMDS, NULL);
3691                 drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs);
3692                 break;
3693         }
3694 }
3695
3696 static const struct amdgpu_display_funcs dce_v11_0_display_funcs = {
3697         .bandwidth_update = &dce_v11_0_bandwidth_update,
3698         .vblank_get_counter = &dce_v11_0_vblank_get_counter,
3699         .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3700         .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3701         .hpd_sense = &dce_v11_0_hpd_sense,
3702         .hpd_set_polarity = &dce_v11_0_hpd_set_polarity,
3703         .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg,
3704         .page_flip = &dce_v11_0_page_flip,
3705         .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos,
3706         .add_encoder = &dce_v11_0_encoder_add,
3707         .add_connector = &amdgpu_connector_add,
3708 };
3709
3710 static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev)
3711 {
3712         adev->mode_info.funcs = &dce_v11_0_display_funcs;
3713 }
3714
3715 static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = {
3716         .set = dce_v11_0_set_crtc_irq_state,
3717         .process = dce_v11_0_crtc_irq,
3718 };
3719
3720 static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = {
3721         .set = dce_v11_0_set_pageflip_irq_state,
3722         .process = dce_v11_0_pageflip_irq,
3723 };
3724
3725 static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = {
3726         .set = dce_v11_0_set_hpd_irq_state,
3727         .process = dce_v11_0_hpd_irq,
3728 };
3729
3730 static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev)
3731 {
3732         if (adev->mode_info.num_crtc > 0)
3733                 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3734         else
3735                 adev->crtc_irq.num_types = 0;
3736         adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs;
3737
3738         adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3739         adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs;
3740
3741         adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3742         adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs;
3743 }
3744
3745 const struct amdgpu_ip_block_version dce_v11_0_ip_block =
3746 {
3747         .type = AMD_IP_BLOCK_TYPE_DCE,
3748         .major = 11,
3749         .minor = 0,
3750         .rev = 0,
3751         .funcs = &dce_v11_0_ip_funcs,
3752 };
3753
3754 const struct amdgpu_ip_block_version dce_v11_2_ip_block =
3755 {
3756         .type = AMD_IP_BLOCK_TYPE_DCE,
3757         .major = 11,
3758         .minor = 2,
3759         .rev = 0,
3760         .funcs = &dce_v11_0_ip_funcs,
3761 };
This page took 0.258869 seconds and 4 git commands to generate.