2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <drm/drm_fourcc.h>
25 #include <drm/drm_modeset_helper.h>
26 #include <drm/drm_modeset_helper_vtables.h>
27 #include <drm/drm_vblank.h>
30 #include "amdgpu_pm.h"
31 #include "amdgpu_i2c.h"
34 #include "amdgpu_atombios.h"
35 #include "atombios_crtc.h"
36 #include "atombios_encoders.h"
37 #include "amdgpu_pll.h"
38 #include "amdgpu_connectors.h"
39 #include "amdgpu_display.h"
42 #include "dce/dce_8_0_d.h"
43 #include "dce/dce_8_0_sh_mask.h"
45 #include "gca/gfx_7_2_enum.h"
47 #include "gmc/gmc_7_1_d.h"
48 #include "gmc/gmc_7_1_sh_mask.h"
50 #include "oss/oss_2_0_d.h"
51 #include "oss/oss_2_0_sh_mask.h"
53 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev);
54 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev);
56 static const u32 crtc_offsets[6] = {
57 CRTC0_REGISTER_OFFSET,
58 CRTC1_REGISTER_OFFSET,
59 CRTC2_REGISTER_OFFSET,
60 CRTC3_REGISTER_OFFSET,
61 CRTC4_REGISTER_OFFSET,
65 static const u32 hpd_offsets[] = {
74 static const uint32_t dig_offsets[] = {
75 CRTC0_REGISTER_OFFSET,
76 CRTC1_REGISTER_OFFSET,
77 CRTC2_REGISTER_OFFSET,
78 CRTC3_REGISTER_OFFSET,
79 CRTC4_REGISTER_OFFSET,
80 CRTC5_REGISTER_OFFSET,
81 (0x13830 - 0x7030) >> 2,
90 } interrupt_status_offsets[6] = { {
91 .reg = mmDISP_INTERRUPT_STATUS,
92 .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK,
93 .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK,
94 .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK
96 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE,
97 .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK,
98 .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK,
99 .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK
101 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2,
102 .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK,
103 .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK,
104 .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK
106 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3,
107 .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK,
108 .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK,
109 .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK
111 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4,
112 .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK,
113 .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK,
114 .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK
116 .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5,
117 .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK,
118 .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK,
119 .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK
122 static u32 dce_v8_0_audio_endpt_rreg(struct amdgpu_device *adev,
123 u32 block_offset, u32 reg)
128 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
129 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
130 r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset);
131 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
136 static void dce_v8_0_audio_endpt_wreg(struct amdgpu_device *adev,
137 u32 block_offset, u32 reg, u32 v)
141 spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags);
142 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
143 WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v);
144 spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags);
147 static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
149 if (crtc >= adev->mode_info.num_crtc)
152 return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
155 static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
159 /* Enable pflip interrupts */
160 for (i = 0; i < adev->mode_info.num_crtc; i++)
161 amdgpu_irq_get(adev, &adev->pageflip_irq, i);
164 static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
168 /* Disable pflip interrupts */
169 for (i = 0; i < adev->mode_info.num_crtc; i++)
170 amdgpu_irq_put(adev, &adev->pageflip_irq, i);
174 * dce_v8_0_page_flip - pageflip callback.
176 * @adev: amdgpu_device pointer
177 * @crtc_id: crtc to cleanup pageflip on
178 * @crtc_base: new address of the crtc (GPU MC address)
179 * @async: asynchronous flip
181 * Triggers the actual pageflip by updating the primary
182 * surface base address.
184 static void dce_v8_0_page_flip(struct amdgpu_device *adev,
185 int crtc_id, u64 crtc_base, bool async)
187 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
188 struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb;
190 /* flip at hsync for async, default is vsync */
191 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, async ?
192 GRPH_FLIP_CONTROL__GRPH_SURFACE_UPDATE_H_RETRACE_EN_MASK : 0);
194 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset,
195 fb->pitches[0] / fb->format->cpp[0]);
196 /* update the primary scanout addresses */
197 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
198 upper_32_bits(crtc_base));
199 /* writing to the low address triggers the update */
200 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
201 lower_32_bits(crtc_base));
203 RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset);
206 static int dce_v8_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
207 u32 *vbl, u32 *position)
209 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
212 *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]);
213 *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]);
219 * dce_v8_0_hpd_sense - hpd sense callback.
221 * @adev: amdgpu_device pointer
222 * @hpd: hpd (hotplug detect) pin
224 * Checks if a digital monitor is connected (evergreen+).
225 * Returns true if connected, false if not connected.
227 static bool dce_v8_0_hpd_sense(struct amdgpu_device *adev,
228 enum amdgpu_hpd_id hpd)
230 bool connected = false;
232 if (hpd >= adev->mode_info.num_hpd)
235 if (RREG32(mmDC_HPD1_INT_STATUS + hpd_offsets[hpd]) &
236 DC_HPD1_INT_STATUS__DC_HPD1_SENSE_MASK)
243 * dce_v8_0_hpd_set_polarity - hpd set polarity callback.
245 * @adev: amdgpu_device pointer
246 * @hpd: hpd (hotplug detect) pin
248 * Set the polarity of the hpd pin (evergreen+).
250 static void dce_v8_0_hpd_set_polarity(struct amdgpu_device *adev,
251 enum amdgpu_hpd_id hpd)
254 bool connected = dce_v8_0_hpd_sense(adev, hpd);
256 if (hpd >= adev->mode_info.num_hpd)
259 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
261 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
263 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_POLARITY_MASK;
264 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
268 * dce_v8_0_hpd_init - hpd setup callback.
270 * @adev: amdgpu_device pointer
272 * Setup the hpd pins used by the card (evergreen+).
273 * Enable the pin, set the polarity, and enable the hpd interrupts.
275 static void dce_v8_0_hpd_init(struct amdgpu_device *adev)
277 struct drm_device *dev = adev_to_drm(adev);
278 struct drm_connector *connector;
279 struct drm_connector_list_iter iter;
282 drm_connector_list_iter_begin(dev, &iter);
283 drm_for_each_connector_iter(connector, &iter) {
284 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
286 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
289 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
290 tmp |= DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
291 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
293 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
294 connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
295 /* don't try to enable hpd on eDP or LVDS avoid breaking the
296 * aux dp channel on imac and help (but not completely fix)
297 * https://bugzilla.redhat.com/show_bug.cgi?id=726143
298 * also avoid interrupt storms during dpms.
300 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
301 tmp &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
302 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
306 dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd);
307 amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
309 drm_connector_list_iter_end(&iter);
313 * dce_v8_0_hpd_fini - hpd tear down callback.
315 * @adev: amdgpu_device pointer
317 * Tear down the hpd pins used by the card (evergreen+).
318 * Disable the hpd interrupts.
320 static void dce_v8_0_hpd_fini(struct amdgpu_device *adev)
322 struct drm_device *dev = adev_to_drm(adev);
323 struct drm_connector *connector;
324 struct drm_connector_list_iter iter;
327 drm_connector_list_iter_begin(dev, &iter);
328 drm_for_each_connector_iter(connector, &iter) {
329 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
331 if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd)
334 tmp = RREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]);
335 tmp &= ~DC_HPD1_CONTROL__DC_HPD1_EN_MASK;
336 WREG32(mmDC_HPD1_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp);
338 amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd);
340 drm_connector_list_iter_end(&iter);
343 static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev)
345 return mmDC_GPIO_HPD_A;
348 static bool dce_v8_0_is_display_hung(struct amdgpu_device *adev)
354 for (i = 0; i < adev->mode_info.num_crtc; i++) {
355 if (RREG32(mmCRTC_CONTROL + crtc_offsets[i]) & CRTC_CONTROL__CRTC_MASTER_EN_MASK) {
356 crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
357 crtc_hung |= (1 << i);
361 for (j = 0; j < 10; j++) {
362 for (i = 0; i < adev->mode_info.num_crtc; i++) {
363 if (crtc_hung & (1 << i)) {
364 tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]);
365 if (tmp != crtc_status[i])
366 crtc_hung &= ~(1 << i);
377 static void dce_v8_0_set_vga_render_state(struct amdgpu_device *adev,
382 /* Lockout access through VGA aperture*/
383 tmp = RREG32(mmVGA_HDP_CONTROL);
385 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0);
387 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
388 WREG32(mmVGA_HDP_CONTROL, tmp);
390 /* disable VGA render */
391 tmp = RREG32(mmVGA_RENDER_CONTROL);
393 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1);
395 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
396 WREG32(mmVGA_RENDER_CONTROL, tmp);
399 static int dce_v8_0_get_num_crtc(struct amdgpu_device *adev)
403 switch (adev->asic_type) {
421 void dce_v8_0_disable_dce(struct amdgpu_device *adev)
423 /*Disable VGA render and enabled crtc, if has DCE engine*/
424 if (amdgpu_atombios_has_dce_engine_info(adev)) {
428 dce_v8_0_set_vga_render_state(adev, false);
431 for (i = 0; i < dce_v8_0_get_num_crtc(adev); i++) {
432 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
433 CRTC_CONTROL, CRTC_MASTER_EN);
435 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
436 tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]);
437 tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0);
438 WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp);
439 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
445 static void dce_v8_0_program_fmt(struct drm_encoder *encoder)
447 struct drm_device *dev = encoder->dev;
448 struct amdgpu_device *adev = drm_to_adev(dev);
449 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
450 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
451 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
454 enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE;
457 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
458 bpc = amdgpu_connector_get_monitor_bpc(connector);
459 dither = amdgpu_connector->dither;
462 /* LVDS/eDP FMT is set up by atom */
463 if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
466 /* not needed for analog */
467 if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) ||
468 (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2))
476 if (dither == AMDGPU_FMT_DITHER_ENABLE)
477 /* XXX sort out optimal dither settings */
478 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
479 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
480 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
481 (0 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
483 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
484 (0 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
487 if (dither == AMDGPU_FMT_DITHER_ENABLE)
488 /* XXX sort out optimal dither settings */
489 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
490 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
491 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
492 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
493 (1 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
495 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
496 (1 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
499 if (dither == AMDGPU_FMT_DITHER_ENABLE)
500 /* XXX sort out optimal dither settings */
501 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK |
502 FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK |
503 FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK |
504 FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK |
505 (2 << FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT));
507 tmp |= (FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK |
508 (2 << FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT));
515 WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp);
519 /* display watermark setup */
521 * dce_v8_0_line_buffer_adjust - Set up the line buffer
523 * @adev: amdgpu_device pointer
524 * @amdgpu_crtc: the selected display controller
525 * @mode: the current display mode on the selected display
528 * Setup up the line buffer allocation for
529 * the selected display controller (CIK).
530 * Returns the line buffer size in pixels.
532 static u32 dce_v8_0_line_buffer_adjust(struct amdgpu_device *adev,
533 struct amdgpu_crtc *amdgpu_crtc,
534 struct drm_display_mode *mode)
536 u32 tmp, buffer_alloc, i;
537 u32 pipe_offset = amdgpu_crtc->crtc_id * 0x8;
540 * There are 6 line buffers, one for each display controllers.
541 * There are 3 partitions per LB. Select the number of partitions
542 * to enable based on the display width. For display widths larger
543 * than 4096, you need use to use 2 display controllers and combine
544 * them using the stereo blender.
546 if (amdgpu_crtc->base.enabled && mode) {
547 if (mode->crtc_hdisplay < 1920) {
550 } else if (mode->crtc_hdisplay < 2560) {
553 } else if (mode->crtc_hdisplay < 4096) {
555 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
557 DRM_DEBUG_KMS("Mode too big for LB!\n");
559 buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4;
566 WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset,
567 (tmp << LB_MEMORY_CTRL__LB_MEMORY_CONFIG__SHIFT) |
568 (0x6B0 << LB_MEMORY_CTRL__LB_MEMORY_SIZE__SHIFT));
570 WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
571 (buffer_alloc << PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATED__SHIFT));
572 for (i = 0; i < adev->usec_timeout; i++) {
573 if (RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
574 PIPE0_DMIF_BUFFER_CONTROL__DMIF_BUFFERS_ALLOCATION_COMPLETED_MASK)
579 if (amdgpu_crtc->base.enabled && mode) {
591 /* controller not enabled, so no lb used */
596 * cik_get_number_of_dram_channels - get the number of dram channels
598 * @adev: amdgpu_device pointer
600 * Look up the number of video ram channels (CIK).
601 * Used for display watermark bandwidth calculations
602 * Returns the number of dram channels
604 static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev)
606 u32 tmp = RREG32(mmMC_SHARED_CHMAP);
608 switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) {
631 struct dce8_wm_params {
632 u32 dram_channels; /* number of dram channels */
633 u32 yclk; /* bandwidth per dram data pin in kHz */
634 u32 sclk; /* engine clock in kHz */
635 u32 disp_clk; /* display clock in kHz */
636 u32 src_width; /* viewport width */
637 u32 active_time; /* active display time in ns */
638 u32 blank_time; /* blank time in ns */
639 bool interlaced; /* mode is interlaced */
640 fixed20_12 vsc; /* vertical scale ratio */
641 u32 num_heads; /* number of active crtcs */
642 u32 bytes_per_pixel; /* bytes per pixel display + overlay */
643 u32 lb_size; /* line buffer allocated to pipe */
644 u32 vtaps; /* vertical scaler taps */
648 * dce_v8_0_dram_bandwidth - get the dram bandwidth
650 * @wm: watermark calculation data
652 * Calculate the raw dram bandwidth (CIK).
653 * Used for display watermark bandwidth calculations
654 * Returns the dram bandwidth in MBytes/s
656 static u32 dce_v8_0_dram_bandwidth(struct dce8_wm_params *wm)
658 /* Calculate raw DRAM Bandwidth */
659 fixed20_12 dram_efficiency; /* 0.7 */
660 fixed20_12 yclk, dram_channels, bandwidth;
663 a.full = dfixed_const(1000);
664 yclk.full = dfixed_const(wm->yclk);
665 yclk.full = dfixed_div(yclk, a);
666 dram_channels.full = dfixed_const(wm->dram_channels * 4);
667 a.full = dfixed_const(10);
668 dram_efficiency.full = dfixed_const(7);
669 dram_efficiency.full = dfixed_div(dram_efficiency, a);
670 bandwidth.full = dfixed_mul(dram_channels, yclk);
671 bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
673 return dfixed_trunc(bandwidth);
677 * dce_v8_0_dram_bandwidth_for_display - get the dram bandwidth for display
679 * @wm: watermark calculation data
681 * Calculate the dram bandwidth used for display (CIK).
682 * Used for display watermark bandwidth calculations
683 * Returns the dram bandwidth for display in MBytes/s
685 static u32 dce_v8_0_dram_bandwidth_for_display(struct dce8_wm_params *wm)
687 /* Calculate DRAM Bandwidth and the part allocated to display. */
688 fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
689 fixed20_12 yclk, dram_channels, bandwidth;
692 a.full = dfixed_const(1000);
693 yclk.full = dfixed_const(wm->yclk);
694 yclk.full = dfixed_div(yclk, a);
695 dram_channels.full = dfixed_const(wm->dram_channels * 4);
696 a.full = dfixed_const(10);
697 disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
698 disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
699 bandwidth.full = dfixed_mul(dram_channels, yclk);
700 bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
702 return dfixed_trunc(bandwidth);
706 * dce_v8_0_data_return_bandwidth - get the data return bandwidth
708 * @wm: watermark calculation data
710 * Calculate the data return bandwidth used for display (CIK).
711 * Used for display watermark bandwidth calculations
712 * Returns the data return bandwidth in MBytes/s
714 static u32 dce_v8_0_data_return_bandwidth(struct dce8_wm_params *wm)
716 /* Calculate the display Data return Bandwidth */
717 fixed20_12 return_efficiency; /* 0.8 */
718 fixed20_12 sclk, bandwidth;
721 a.full = dfixed_const(1000);
722 sclk.full = dfixed_const(wm->sclk);
723 sclk.full = dfixed_div(sclk, a);
724 a.full = dfixed_const(10);
725 return_efficiency.full = dfixed_const(8);
726 return_efficiency.full = dfixed_div(return_efficiency, a);
727 a.full = dfixed_const(32);
728 bandwidth.full = dfixed_mul(a, sclk);
729 bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
731 return dfixed_trunc(bandwidth);
735 * dce_v8_0_dmif_request_bandwidth - get the dmif bandwidth
737 * @wm: watermark calculation data
739 * Calculate the dmif bandwidth used for display (CIK).
740 * Used for display watermark bandwidth calculations
741 * Returns the dmif bandwidth in MBytes/s
743 static u32 dce_v8_0_dmif_request_bandwidth(struct dce8_wm_params *wm)
745 /* Calculate the DMIF Request Bandwidth */
746 fixed20_12 disp_clk_request_efficiency; /* 0.8 */
747 fixed20_12 disp_clk, bandwidth;
750 a.full = dfixed_const(1000);
751 disp_clk.full = dfixed_const(wm->disp_clk);
752 disp_clk.full = dfixed_div(disp_clk, a);
753 a.full = dfixed_const(32);
754 b.full = dfixed_mul(a, disp_clk);
756 a.full = dfixed_const(10);
757 disp_clk_request_efficiency.full = dfixed_const(8);
758 disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
760 bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency);
762 return dfixed_trunc(bandwidth);
766 * dce_v8_0_available_bandwidth - get the min available bandwidth
768 * @wm: watermark calculation data
770 * Calculate the min available bandwidth used for display (CIK).
771 * Used for display watermark bandwidth calculations
772 * Returns the min available bandwidth in MBytes/s
774 static u32 dce_v8_0_available_bandwidth(struct dce8_wm_params *wm)
776 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
777 u32 dram_bandwidth = dce_v8_0_dram_bandwidth(wm);
778 u32 data_return_bandwidth = dce_v8_0_data_return_bandwidth(wm);
779 u32 dmif_req_bandwidth = dce_v8_0_dmif_request_bandwidth(wm);
781 return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
785 * dce_v8_0_average_bandwidth - get the average available bandwidth
787 * @wm: watermark calculation data
789 * Calculate the average available bandwidth used for display (CIK).
790 * Used for display watermark bandwidth calculations
791 * Returns the average available bandwidth in MBytes/s
793 static u32 dce_v8_0_average_bandwidth(struct dce8_wm_params *wm)
795 /* Calculate the display mode Average Bandwidth
796 * DisplayMode should contain the source and destination dimensions,
800 fixed20_12 line_time;
801 fixed20_12 src_width;
802 fixed20_12 bandwidth;
805 a.full = dfixed_const(1000);
806 line_time.full = dfixed_const(wm->active_time + wm->blank_time);
807 line_time.full = dfixed_div(line_time, a);
808 bpp.full = dfixed_const(wm->bytes_per_pixel);
809 src_width.full = dfixed_const(wm->src_width);
810 bandwidth.full = dfixed_mul(src_width, bpp);
811 bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
812 bandwidth.full = dfixed_div(bandwidth, line_time);
814 return dfixed_trunc(bandwidth);
818 * dce_v8_0_latency_watermark - get the latency watermark
820 * @wm: watermark calculation data
822 * Calculate the latency watermark (CIK).
823 * Used for display watermark bandwidth calculations
824 * Returns the latency watermark in ns
826 static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
828 /* First calculate the latency in ns */
829 u32 mc_latency = 2000; /* 2000 ns. */
830 u32 available_bandwidth = dce_v8_0_available_bandwidth(wm);
831 u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
832 u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
833 u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
834 u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
835 (wm->num_heads * cursor_line_pair_return_time);
836 u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
837 u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
838 u32 tmp, dmif_size = 12288;
841 if (wm->num_heads == 0)
844 a.full = dfixed_const(2);
845 b.full = dfixed_const(1);
846 if ((wm->vsc.full > a.full) ||
847 ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
849 ((wm->vsc.full >= a.full) && wm->interlaced))
850 max_src_lines_per_dst_line = 4;
852 max_src_lines_per_dst_line = 2;
854 a.full = dfixed_const(available_bandwidth);
855 b.full = dfixed_const(wm->num_heads);
856 a.full = dfixed_div(a, b);
857 tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
858 tmp = min(dfixed_trunc(a), tmp);
860 lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
862 a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
863 b.full = dfixed_const(1000);
864 c.full = dfixed_const(lb_fill_bw);
865 b.full = dfixed_div(c, b);
866 a.full = dfixed_div(a, b);
867 line_fill_time = dfixed_trunc(a);
869 if (line_fill_time < wm->active_time)
872 return latency + (line_fill_time - wm->active_time);
877 * dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display - check
878 * average and available dram bandwidth
880 * @wm: watermark calculation data
882 * Check if the display average bandwidth fits in the display
883 * dram bandwidth (CIK).
884 * Used for display watermark bandwidth calculations
885 * Returns true if the display fits, false if not.
887 static bool dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce8_wm_params *wm)
889 if (dce_v8_0_average_bandwidth(wm) <=
890 (dce_v8_0_dram_bandwidth_for_display(wm) / wm->num_heads))
897 * dce_v8_0_average_bandwidth_vs_available_bandwidth - check
898 * average and available bandwidth
900 * @wm: watermark calculation data
902 * Check if the display average bandwidth fits in the display
903 * available bandwidth (CIK).
904 * Used for display watermark bandwidth calculations
905 * Returns true if the display fits, false if not.
907 static bool dce_v8_0_average_bandwidth_vs_available_bandwidth(struct dce8_wm_params *wm)
909 if (dce_v8_0_average_bandwidth(wm) <=
910 (dce_v8_0_available_bandwidth(wm) / wm->num_heads))
917 * dce_v8_0_check_latency_hiding - check latency hiding
919 * @wm: watermark calculation data
921 * Check latency hiding (CIK).
922 * Used for display watermark bandwidth calculations
923 * Returns true if the display fits, false if not.
925 static bool dce_v8_0_check_latency_hiding(struct dce8_wm_params *wm)
927 u32 lb_partitions = wm->lb_size / wm->src_width;
928 u32 line_time = wm->active_time + wm->blank_time;
929 u32 latency_tolerant_lines;
933 a.full = dfixed_const(1);
934 if (wm->vsc.full > a.full)
935 latency_tolerant_lines = 1;
937 if (lb_partitions <= (wm->vtaps + 1))
938 latency_tolerant_lines = 1;
940 latency_tolerant_lines = 2;
943 latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
945 if (dce_v8_0_latency_watermark(wm) <= latency_hiding)
952 * dce_v8_0_program_watermarks - program display watermarks
954 * @adev: amdgpu_device pointer
955 * @amdgpu_crtc: the selected display controller
956 * @lb_size: line buffer size
957 * @num_heads: number of display controllers in use
959 * Calculate and program the display watermarks for the
960 * selected display controller (CIK).
962 static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
963 struct amdgpu_crtc *amdgpu_crtc,
964 u32 lb_size, u32 num_heads)
966 struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
967 struct dce8_wm_params wm_low, wm_high;
970 u32 latency_watermark_a = 0, latency_watermark_b = 0;
971 u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
973 if (amdgpu_crtc->base.enabled && num_heads && mode) {
974 active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
976 line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
978 line_time = min_t(u32, line_time, 65535);
980 /* watermark for high clocks */
981 if (adev->pm.dpm_enabled) {
983 amdgpu_dpm_get_mclk(adev, false) * 10;
985 amdgpu_dpm_get_sclk(adev, false) * 10;
987 wm_high.yclk = adev->pm.current_mclk * 10;
988 wm_high.sclk = adev->pm.current_sclk * 10;
991 wm_high.disp_clk = mode->clock;
992 wm_high.src_width = mode->crtc_hdisplay;
993 wm_high.active_time = active_time;
994 wm_high.blank_time = line_time - wm_high.active_time;
995 wm_high.interlaced = false;
996 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
997 wm_high.interlaced = true;
998 wm_high.vsc = amdgpu_crtc->vsc;
1000 if (amdgpu_crtc->rmx_type != RMX_OFF)
1002 wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
1003 wm_high.lb_size = lb_size;
1004 wm_high.dram_channels = cik_get_number_of_dram_channels(adev);
1005 wm_high.num_heads = num_heads;
1007 /* set for high clocks */
1008 latency_watermark_a = min_t(u32, dce_v8_0_latency_watermark(&wm_high), 65535);
1010 /* possibly force display priority to high */
1011 /* should really do this at mode validation time... */
1012 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
1013 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_high) ||
1014 !dce_v8_0_check_latency_hiding(&wm_high) ||
1015 (adev->mode_info.disp_priority == 2)) {
1016 DRM_DEBUG_KMS("force priority to high\n");
1019 /* watermark for low clocks */
1020 if (adev->pm.dpm_enabled) {
1022 amdgpu_dpm_get_mclk(adev, true) * 10;
1024 amdgpu_dpm_get_sclk(adev, true) * 10;
1026 wm_low.yclk = adev->pm.current_mclk * 10;
1027 wm_low.sclk = adev->pm.current_sclk * 10;
1030 wm_low.disp_clk = mode->clock;
1031 wm_low.src_width = mode->crtc_hdisplay;
1032 wm_low.active_time = active_time;
1033 wm_low.blank_time = line_time - wm_low.active_time;
1034 wm_low.interlaced = false;
1035 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1036 wm_low.interlaced = true;
1037 wm_low.vsc = amdgpu_crtc->vsc;
1039 if (amdgpu_crtc->rmx_type != RMX_OFF)
1041 wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
1042 wm_low.lb_size = lb_size;
1043 wm_low.dram_channels = cik_get_number_of_dram_channels(adev);
1044 wm_low.num_heads = num_heads;
1046 /* set for low clocks */
1047 latency_watermark_b = min_t(u32, dce_v8_0_latency_watermark(&wm_low), 65535);
1049 /* possibly force display priority to high */
1050 /* should really do this at mode validation time... */
1051 if (!dce_v8_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
1052 !dce_v8_0_average_bandwidth_vs_available_bandwidth(&wm_low) ||
1053 !dce_v8_0_check_latency_hiding(&wm_low) ||
1054 (adev->mode_info.disp_priority == 2)) {
1055 DRM_DEBUG_KMS("force priority to high\n");
1057 lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay);
1061 wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1063 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1064 tmp |= (1 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1065 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1066 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1067 ((latency_watermark_a << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1068 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1070 tmp = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset);
1071 tmp &= ~(3 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1072 tmp |= (2 << DPG_WATERMARK_MASK_CONTROL__URGENCY_WATERMARK_MASK__SHIFT);
1073 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp);
1074 WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset,
1075 ((latency_watermark_b << DPG_PIPE_URGENCY_CONTROL__URGENCY_LOW_WATERMARK__SHIFT) |
1076 (line_time << DPG_PIPE_URGENCY_CONTROL__URGENCY_HIGH_WATERMARK__SHIFT)));
1077 /* restore original selection */
1078 WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask);
1080 /* save values for DPM */
1081 amdgpu_crtc->line_time = line_time;
1082 amdgpu_crtc->wm_high = latency_watermark_a;
1083 amdgpu_crtc->wm_low = latency_watermark_b;
1084 /* Save number of lines the linebuffer leads before the scanout */
1085 amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines;
1089 * dce_v8_0_bandwidth_update - program display watermarks
1091 * @adev: amdgpu_device pointer
1093 * Calculate and program the display watermarks and line
1094 * buffer allocation (CIK).
1096 static void dce_v8_0_bandwidth_update(struct amdgpu_device *adev)
1098 struct drm_display_mode *mode = NULL;
1099 u32 num_heads = 0, lb_size;
1102 amdgpu_display_update_priority(adev);
1104 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1105 if (adev->mode_info.crtcs[i]->base.enabled)
1108 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1109 mode = &adev->mode_info.crtcs[i]->base.mode;
1110 lb_size = dce_v8_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode);
1111 dce_v8_0_program_watermarks(adev, adev->mode_info.crtcs[i],
1112 lb_size, num_heads);
1116 static void dce_v8_0_audio_get_connected_pins(struct amdgpu_device *adev)
1121 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1122 offset = adev->mode_info.audio.pin[i].offset;
1123 tmp = RREG32_AUDIO_ENDPT(offset,
1124 ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
1126 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >>
1127 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1)
1128 adev->mode_info.audio.pin[i].connected = false;
1130 adev->mode_info.audio.pin[i].connected = true;
1134 static struct amdgpu_audio_pin *dce_v8_0_audio_get_pin(struct amdgpu_device *adev)
1138 dce_v8_0_audio_get_connected_pins(adev);
1140 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1141 if (adev->mode_info.audio.pin[i].connected)
1142 return &adev->mode_info.audio.pin[i];
1144 DRM_ERROR("No connected audio pins found!\n");
1148 static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder)
1150 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
1151 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1152 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1155 if (!dig || !dig->afmt || !dig->afmt->pin)
1158 offset = dig->afmt->offset;
1160 WREG32(mmAFMT_AUDIO_SRC_CONTROL + offset,
1161 (dig->afmt->pin->id << AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT));
1164 static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder,
1165 struct drm_display_mode *mode)
1167 struct drm_device *dev = encoder->dev;
1168 struct amdgpu_device *adev = drm_to_adev(dev);
1169 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1170 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1171 struct drm_connector *connector;
1172 struct drm_connector_list_iter iter;
1173 struct amdgpu_connector *amdgpu_connector = NULL;
1174 u32 tmp = 0, offset;
1176 if (!dig || !dig->afmt || !dig->afmt->pin)
1179 offset = dig->afmt->pin->offset;
1181 drm_connector_list_iter_begin(dev, &iter);
1182 drm_for_each_connector_iter(connector, &iter) {
1183 if (connector->encoder == encoder) {
1184 amdgpu_connector = to_amdgpu_connector(connector);
1188 drm_connector_list_iter_end(&iter);
1190 if (!amdgpu_connector) {
1191 DRM_ERROR("Couldn't find encoder's connector\n");
1195 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
1196 if (connector->latency_present[1])
1198 (connector->video_latency[1] <<
1199 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1200 (connector->audio_latency[1] <<
1201 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1205 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1207 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1209 if (connector->latency_present[0])
1211 (connector->video_latency[0] <<
1212 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1213 (connector->audio_latency[0] <<
1214 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1218 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT) |
1220 AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT);
1223 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
1226 static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
1228 struct drm_device *dev = encoder->dev;
1229 struct amdgpu_device *adev = drm_to_adev(dev);
1230 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1231 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1232 struct drm_connector *connector;
1233 struct drm_connector_list_iter iter;
1234 struct amdgpu_connector *amdgpu_connector = NULL;
1239 if (!dig || !dig->afmt || !dig->afmt->pin)
1242 offset = dig->afmt->pin->offset;
1244 drm_connector_list_iter_begin(dev, &iter);
1245 drm_for_each_connector_iter(connector, &iter) {
1246 if (connector->encoder == encoder) {
1247 amdgpu_connector = to_amdgpu_connector(connector);
1251 drm_connector_list_iter_end(&iter);
1253 if (!amdgpu_connector) {
1254 DRM_ERROR("Couldn't find encoder's connector\n");
1258 sad_count = drm_edid_to_speaker_allocation(amdgpu_connector_edid(connector), &sadb);
1259 if (sad_count < 0) {
1260 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
1264 /* program the speaker allocation */
1265 tmp = RREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
1266 tmp &= ~(AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK |
1267 AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK);
1269 tmp |= AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK;
1271 tmp |= (sadb[0] << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT);
1273 tmp |= (5 << AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT); /* stereo */
1274 WREG32_AUDIO_ENDPT(offset, ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
1279 static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
1281 struct drm_device *dev = encoder->dev;
1282 struct amdgpu_device *adev = drm_to_adev(dev);
1283 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1284 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1286 struct drm_connector *connector;
1287 struct drm_connector_list_iter iter;
1288 struct amdgpu_connector *amdgpu_connector = NULL;
1289 struct cea_sad *sads;
1292 static const u16 eld_reg_to_type[][2] = {
1293 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
1294 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
1295 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
1296 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
1297 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
1298 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
1299 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
1300 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
1301 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
1302 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
1303 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
1304 { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
1307 if (!dig || !dig->afmt || !dig->afmt->pin)
1310 offset = dig->afmt->pin->offset;
1312 drm_connector_list_iter_begin(dev, &iter);
1313 drm_for_each_connector_iter(connector, &iter) {
1314 if (connector->encoder == encoder) {
1315 amdgpu_connector = to_amdgpu_connector(connector);
1319 drm_connector_list_iter_end(&iter);
1321 if (!amdgpu_connector) {
1322 DRM_ERROR("Couldn't find encoder's connector\n");
1326 sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads);
1328 DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
1333 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
1335 u8 stereo_freqs = 0;
1336 int max_channels = -1;
1339 for (j = 0; j < sad_count; j++) {
1340 struct cea_sad *sad = &sads[j];
1342 if (sad->format == eld_reg_to_type[i][1]) {
1343 if (sad->channels > max_channels) {
1344 value = (sad->channels <<
1345 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT) |
1347 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT) |
1349 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT);
1350 max_channels = sad->channels;
1353 if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
1354 stereo_freqs |= sad->freq;
1360 value |= (stereo_freqs <<
1361 AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT);
1363 WREG32_AUDIO_ENDPT(offset, eld_reg_to_type[i][0], value);
1369 static void dce_v8_0_audio_enable(struct amdgpu_device *adev,
1370 struct amdgpu_audio_pin *pin,
1376 WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
1377 enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0);
1380 static const u32 pin_offsets[7] = {
1390 static int dce_v8_0_audio_init(struct amdgpu_device *adev)
1397 adev->mode_info.audio.enabled = true;
1399 if (adev->asic_type == CHIP_KAVERI) /* KV: 4 streams, 7 endpoints */
1400 adev->mode_info.audio.num_pins = 7;
1401 else if ((adev->asic_type == CHIP_KABINI) ||
1402 (adev->asic_type == CHIP_MULLINS)) /* KB/ML: 2 streams, 3 endpoints */
1403 adev->mode_info.audio.num_pins = 3;
1404 else if ((adev->asic_type == CHIP_BONAIRE) ||
1405 (adev->asic_type == CHIP_HAWAII))/* BN/HW: 6 streams, 7 endpoints */
1406 adev->mode_info.audio.num_pins = 7;
1408 adev->mode_info.audio.num_pins = 3;
1410 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1411 adev->mode_info.audio.pin[i].channels = -1;
1412 adev->mode_info.audio.pin[i].rate = -1;
1413 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1414 adev->mode_info.audio.pin[i].status_bits = 0;
1415 adev->mode_info.audio.pin[i].category_code = 0;
1416 adev->mode_info.audio.pin[i].connected = false;
1417 adev->mode_info.audio.pin[i].offset = pin_offsets[i];
1418 adev->mode_info.audio.pin[i].id = i;
1419 /* disable audio. it will be set up later */
1420 /* XXX remove once we switch to ip funcs */
1421 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1427 static void dce_v8_0_audio_fini(struct amdgpu_device *adev)
1434 if (!adev->mode_info.audio.enabled)
1437 for (i = 0; i < adev->mode_info.audio.num_pins; i++)
1438 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
1440 adev->mode_info.audio.enabled = false;
1444 * update the N and CTS parameters for a given pixel clock rate
1446 static void dce_v8_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock)
1448 struct drm_device *dev = encoder->dev;
1449 struct amdgpu_device *adev = drm_to_adev(dev);
1450 struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock);
1451 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1452 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1453 uint32_t offset = dig->afmt->offset;
1455 WREG32(mmHDMI_ACR_32_0 + offset, (acr.cts_32khz << HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT));
1456 WREG32(mmHDMI_ACR_32_1 + offset, acr.n_32khz);
1458 WREG32(mmHDMI_ACR_44_0 + offset, (acr.cts_44_1khz << HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT));
1459 WREG32(mmHDMI_ACR_44_1 + offset, acr.n_44_1khz);
1461 WREG32(mmHDMI_ACR_48_0 + offset, (acr.cts_48khz << HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT));
1462 WREG32(mmHDMI_ACR_48_1 + offset, acr.n_48khz);
1466 * build a HDMI Video Info Frame
1468 static void dce_v8_0_afmt_update_avi_infoframe(struct drm_encoder *encoder,
1469 void *buffer, size_t size)
1471 struct drm_device *dev = encoder->dev;
1472 struct amdgpu_device *adev = drm_to_adev(dev);
1473 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1474 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1475 uint32_t offset = dig->afmt->offset;
1476 uint8_t *frame = buffer + 3;
1477 uint8_t *header = buffer;
1479 WREG32(mmAFMT_AVI_INFO0 + offset,
1480 frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
1481 WREG32(mmAFMT_AVI_INFO1 + offset,
1482 frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
1483 WREG32(mmAFMT_AVI_INFO2 + offset,
1484 frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
1485 WREG32(mmAFMT_AVI_INFO3 + offset,
1486 frame[0xC] | (frame[0xD] << 8) | (header[1] << 24));
1489 static void dce_v8_0_audio_set_dto(struct drm_encoder *encoder, u32 clock)
1491 struct drm_device *dev = encoder->dev;
1492 struct amdgpu_device *adev = drm_to_adev(dev);
1493 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1494 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1495 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1496 u32 dto_phase = 24 * 1000;
1497 u32 dto_modulo = clock;
1499 if (!dig || !dig->afmt)
1502 /* XXX two dtos; generally use dto0 for hdmi */
1503 /* Express [24MHz / target pixel clock] as an exact rational
1504 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
1505 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
1507 WREG32(mmDCCG_AUDIO_DTO_SOURCE, (amdgpu_crtc->crtc_id << DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT));
1508 WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase);
1509 WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo);
1513 * update the info frames with the data from the current display mode
1515 static void dce_v8_0_afmt_setmode(struct drm_encoder *encoder,
1516 struct drm_display_mode *mode)
1518 struct drm_device *dev = encoder->dev;
1519 struct amdgpu_device *adev = drm_to_adev(dev);
1520 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1521 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1522 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
1523 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
1524 struct hdmi_avi_infoframe frame;
1525 uint32_t offset, val;
1529 if (!dig || !dig->afmt)
1532 /* Silent, r600_hdmi_enable will raise WARN for us */
1533 if (!dig->afmt->enabled)
1536 offset = dig->afmt->offset;
1538 /* hdmi deep color mode general control packets setup, if bpc > 8 */
1539 if (encoder->crtc) {
1540 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc);
1541 bpc = amdgpu_crtc->bpc;
1544 /* disable audio prior to setting up hw */
1545 dig->afmt->pin = dce_v8_0_audio_get_pin(adev);
1546 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1548 dce_v8_0_audio_set_dto(encoder, mode->clock);
1550 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1551 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK); /* send null packets when required */
1553 WREG32(mmAFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
1555 val = RREG32(mmHDMI_CONTROL + offset);
1556 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1557 val &= ~HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK;
1565 DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n",
1566 connector->name, bpc);
1569 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1570 val |= 1 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1571 DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n",
1575 val |= HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK;
1576 val |= 2 << HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT;
1577 DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n",
1582 WREG32(mmHDMI_CONTROL + offset, val);
1584 WREG32(mmHDMI_VBI_PACKET_CONTROL + offset,
1585 HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK | /* send null packets when required */
1586 HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK | /* send general control packets */
1587 HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK); /* send general control packets every frame */
1589 WREG32(mmHDMI_INFOFRAME_CONTROL0 + offset,
1590 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK | /* enable audio info frames (frames won't be set until audio is enabled) */
1591 HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK); /* required for audio info values to be updated */
1593 WREG32(mmAFMT_INFOFRAME_CONTROL0 + offset,
1594 AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK); /* required for audio info values to be updated */
1596 WREG32(mmHDMI_INFOFRAME_CONTROL1 + offset,
1597 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT)); /* anything other than 0 */
1599 WREG32(mmHDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
1601 WREG32(mmHDMI_AUDIO_PACKET_CONTROL + offset,
1602 (1 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT) | /* set the default audio delay */
1603 (3 << HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT)); /* should be suffient for all audio modes and small enough for all hblanks */
1605 WREG32(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1606 AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK); /* allow 60958 channel status fields to be updated */
1608 /* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
1611 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1612 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1614 WREG32(mmHDMI_ACR_PACKET_CONTROL + offset,
1615 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK | /* select SW CTS value */
1616 HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK); /* allow hw to sent ACR packets when required */
1618 dce_v8_0_afmt_update_ACR(encoder, mode->clock);
1620 WREG32(mmAFMT_60958_0 + offset,
1621 (1 << AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT));
1623 WREG32(mmAFMT_60958_1 + offset,
1624 (2 << AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT));
1626 WREG32(mmAFMT_60958_2 + offset,
1627 (3 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT) |
1628 (4 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT) |
1629 (5 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT) |
1630 (6 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT) |
1631 (7 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT) |
1632 (8 << AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT));
1634 dce_v8_0_audio_write_speaker_allocation(encoder);
1637 WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + offset,
1638 (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT));
1640 dce_v8_0_afmt_audio_select_pin(encoder);
1641 dce_v8_0_audio_write_sad_regs(encoder);
1642 dce_v8_0_audio_write_latency_fields(encoder, mode);
1644 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode);
1646 DRM_ERROR("failed to setup AVI infoframe: %zd\n", err);
1650 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
1652 DRM_ERROR("failed to pack AVI infoframe: %zd\n", err);
1656 dce_v8_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer));
1658 WREG32_OR(mmHDMI_INFOFRAME_CONTROL0 + offset,
1659 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_SEND_MASK | /* enable AVI info frames */
1660 HDMI_INFOFRAME_CONTROL0__HDMI_AVI_INFO_CONT_MASK); /* required for audio info values to be updated */
1662 WREG32_P(mmHDMI_INFOFRAME_CONTROL1 + offset,
1663 (2 << HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE__SHIFT), /* anything other than 0 */
1664 ~HDMI_INFOFRAME_CONTROL1__HDMI_AVI_INFO_LINE_MASK);
1666 WREG32_OR(mmAFMT_AUDIO_PACKET_CONTROL + offset,
1667 AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK); /* send audio packets */
1669 WREG32(mmAFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
1670 WREG32(mmAFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
1671 WREG32(mmAFMT_RAMP_CONTROL2 + offset, 0x00000001);
1672 WREG32(mmAFMT_RAMP_CONTROL3 + offset, 0x00000001);
1674 /* enable audio after setting up hw */
1675 dce_v8_0_audio_enable(adev, dig->afmt->pin, true);
1678 static void dce_v8_0_afmt_enable(struct drm_encoder *encoder, bool enable)
1680 struct drm_device *dev = encoder->dev;
1681 struct amdgpu_device *adev = drm_to_adev(dev);
1682 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
1683 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
1685 if (!dig || !dig->afmt)
1688 /* Silent, r600_hdmi_enable will raise WARN for us */
1689 if (enable && dig->afmt->enabled)
1691 if (!enable && !dig->afmt->enabled)
1694 if (!enable && dig->afmt->pin) {
1695 dce_v8_0_audio_enable(adev, dig->afmt->pin, false);
1696 dig->afmt->pin = NULL;
1699 dig->afmt->enabled = enable;
1701 DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n",
1702 enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id);
1705 static int dce_v8_0_afmt_init(struct amdgpu_device *adev)
1709 for (i = 0; i < adev->mode_info.num_dig; i++)
1710 adev->mode_info.afmt[i] = NULL;
1712 /* DCE8 has audio blocks tied to DIG encoders */
1713 for (i = 0; i < adev->mode_info.num_dig; i++) {
1714 adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL);
1715 if (adev->mode_info.afmt[i]) {
1716 adev->mode_info.afmt[i]->offset = dig_offsets[i];
1717 adev->mode_info.afmt[i]->id = i;
1720 for (j = 0; j < i; j++) {
1721 kfree(adev->mode_info.afmt[j]);
1722 adev->mode_info.afmt[j] = NULL;
1730 static void dce_v8_0_afmt_fini(struct amdgpu_device *adev)
1734 for (i = 0; i < adev->mode_info.num_dig; i++) {
1735 kfree(adev->mode_info.afmt[i]);
1736 adev->mode_info.afmt[i] = NULL;
1740 static const u32 vga_control_regs[6] = {
1749 static void dce_v8_0_vga_enable(struct drm_crtc *crtc, bool enable)
1751 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1752 struct drm_device *dev = crtc->dev;
1753 struct amdgpu_device *adev = drm_to_adev(dev);
1756 vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1;
1758 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1);
1760 WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control);
1763 static void dce_v8_0_grph_enable(struct drm_crtc *crtc, bool enable)
1765 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1766 struct drm_device *dev = crtc->dev;
1767 struct amdgpu_device *adev = drm_to_adev(dev);
1770 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1);
1772 WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0);
1775 static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
1776 struct drm_framebuffer *fb,
1777 int x, int y, int atomic)
1779 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1780 struct drm_device *dev = crtc->dev;
1781 struct amdgpu_device *adev = drm_to_adev(dev);
1782 struct drm_framebuffer *target_fb;
1783 struct drm_gem_object *obj;
1784 struct amdgpu_bo *abo;
1785 uint64_t fb_location, tiling_flags;
1786 uint32_t fb_format, fb_pitch_pixels;
1787 u32 fb_swap = (GRPH_ENDIAN_NONE << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1789 u32 viewport_w, viewport_h;
1791 bool bypass_lut = false;
1794 if (!atomic && !crtc->primary->fb) {
1795 DRM_DEBUG_KMS("No FB bound\n");
1802 target_fb = crtc->primary->fb;
1804 /* If atomic, assume fb object is pinned & idle & fenced and
1805 * just update base pointers
1807 obj = target_fb->obj[0];
1808 abo = gem_to_amdgpu_bo(obj);
1809 r = amdgpu_bo_reserve(abo, false);
1810 if (unlikely(r != 0))
1814 r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM);
1815 if (unlikely(r != 0)) {
1816 amdgpu_bo_unreserve(abo);
1820 fb_location = amdgpu_bo_gpu_offset(abo);
1822 amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
1823 amdgpu_bo_unreserve(abo);
1825 pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
1827 switch (target_fb->format->format) {
1829 fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1830 (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1832 case DRM_FORMAT_XRGB4444:
1833 case DRM_FORMAT_ARGB4444:
1834 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1835 (GRPH_FORMAT_ARGB4444 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1837 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1840 case DRM_FORMAT_XRGB1555:
1841 case DRM_FORMAT_ARGB1555:
1842 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1843 (GRPH_FORMAT_ARGB1555 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1845 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1848 case DRM_FORMAT_BGRX5551:
1849 case DRM_FORMAT_BGRA5551:
1850 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1851 (GRPH_FORMAT_BGRA5551 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1853 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1856 case DRM_FORMAT_RGB565:
1857 fb_format = ((GRPH_DEPTH_16BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1858 (GRPH_FORMAT_ARGB565 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1860 fb_swap = (GRPH_ENDIAN_8IN16 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1863 case DRM_FORMAT_XRGB8888:
1864 case DRM_FORMAT_ARGB8888:
1865 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1866 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1868 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1871 case DRM_FORMAT_XRGB2101010:
1872 case DRM_FORMAT_ARGB2101010:
1873 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1874 (GRPH_FORMAT_ARGB2101010 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1876 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1878 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1881 case DRM_FORMAT_BGRX1010102:
1882 case DRM_FORMAT_BGRA1010102:
1883 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1884 (GRPH_FORMAT_BGRA1010102 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1886 fb_swap = (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1888 /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
1891 case DRM_FORMAT_XBGR8888:
1892 case DRM_FORMAT_ABGR8888:
1893 fb_format = ((GRPH_DEPTH_32BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) |
1894 (GRPH_FORMAT_ARGB8888 << GRPH_CONTROL__GRPH_FORMAT__SHIFT));
1895 fb_swap = ((GRPH_RED_SEL_B << GRPH_SWAP_CNTL__GRPH_RED_CROSSBAR__SHIFT) |
1896 (GRPH_BLUE_SEL_R << GRPH_SWAP_CNTL__GRPH_BLUE_CROSSBAR__SHIFT));
1898 fb_swap |= (GRPH_ENDIAN_8IN32 << GRPH_SWAP_CNTL__GRPH_ENDIAN_SWAP__SHIFT);
1902 DRM_ERROR("Unsupported screen format %p4cc\n",
1903 &target_fb->format->format);
1907 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) {
1908 unsigned bankw, bankh, mtaspect, tile_split, num_banks;
1910 bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
1911 bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
1912 mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
1913 tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
1914 num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
1916 fb_format |= (num_banks << GRPH_CONTROL__GRPH_NUM_BANKS__SHIFT);
1917 fb_format |= (GRPH_ARRAY_2D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1918 fb_format |= (tile_split << GRPH_CONTROL__GRPH_TILE_SPLIT__SHIFT);
1919 fb_format |= (bankw << GRPH_CONTROL__GRPH_BANK_WIDTH__SHIFT);
1920 fb_format |= (bankh << GRPH_CONTROL__GRPH_BANK_HEIGHT__SHIFT);
1921 fb_format |= (mtaspect << GRPH_CONTROL__GRPH_MACRO_TILE_ASPECT__SHIFT);
1922 fb_format |= (DISPLAY_MICRO_TILING << GRPH_CONTROL__GRPH_MICRO_TILE_MODE__SHIFT);
1923 } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) {
1924 fb_format |= (GRPH_ARRAY_1D_TILED_THIN1 << GRPH_CONTROL__GRPH_ARRAY_MODE__SHIFT);
1927 fb_format |= (pipe_config << GRPH_CONTROL__GRPH_PIPE_CONFIG__SHIFT);
1929 dce_v8_0_vga_enable(crtc, false);
1931 /* Make sure surface address is updated at vertical blank rather than
1934 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, 0);
1936 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1937 upper_32_bits(fb_location));
1938 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
1939 upper_32_bits(fb_location));
1940 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1941 (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK);
1942 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
1943 (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK);
1944 WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format);
1945 WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap);
1948 * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
1949 * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
1950 * retain the full precision throughout the pipeline.
1952 WREG32_P(mmGRPH_LUT_10BIT_BYPASS_CONTROL + amdgpu_crtc->crtc_offset,
1953 (bypass_lut ? LUT_10BIT_BYPASS_EN : 0),
1954 ~LUT_10BIT_BYPASS_EN);
1957 DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
1959 WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0);
1960 WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0);
1961 WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0);
1962 WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0);
1963 WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width);
1964 WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height);
1966 fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0];
1967 WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels);
1969 dce_v8_0_grph_enable(crtc, true);
1971 WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset,
1976 WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset,
1978 viewport_w = crtc->mode.hdisplay;
1979 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1980 WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset,
1981 (viewport_w << 16) | viewport_h);
1983 /* set pageflip to happen anywhere in vblank interval */
1984 WREG32(mmMASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0);
1986 if (!atomic && fb && fb != crtc->primary->fb) {
1987 abo = gem_to_amdgpu_bo(fb->obj[0]);
1988 r = amdgpu_bo_reserve(abo, true);
1989 if (unlikely(r != 0))
1991 amdgpu_bo_unpin(abo);
1992 amdgpu_bo_unreserve(abo);
1995 /* Bytes per pixel may have changed */
1996 dce_v8_0_bandwidth_update(adev);
2001 static void dce_v8_0_set_interleave(struct drm_crtc *crtc,
2002 struct drm_display_mode *mode)
2004 struct drm_device *dev = crtc->dev;
2005 struct amdgpu_device *adev = drm_to_adev(dev);
2006 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2008 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2009 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset,
2010 LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT);
2012 WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, 0);
2015 static void dce_v8_0_crtc_load_lut(struct drm_crtc *crtc)
2017 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2018 struct drm_device *dev = crtc->dev;
2019 struct amdgpu_device *adev = drm_to_adev(dev);
2023 DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id);
2025 WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2026 ((INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_GRPH_MODE__SHIFT) |
2027 (INPUT_CSC_BYPASS << INPUT_CSC_CONTROL__INPUT_CSC_OVL_MODE__SHIFT)));
2028 WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset,
2029 PRESCALE_GRPH_CONTROL__GRPH_PRESCALE_BYPASS_MASK);
2030 WREG32(mmPRESCALE_OVL_CONTROL + amdgpu_crtc->crtc_offset,
2031 PRESCALE_OVL_CONTROL__OVL_PRESCALE_BYPASS_MASK);
2032 WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2033 ((INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__GRPH_INPUT_GAMMA_MODE__SHIFT) |
2034 (INPUT_GAMMA_USE_LUT << INPUT_GAMMA_CONTROL__OVL_INPUT_GAMMA_MODE__SHIFT)));
2036 WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0);
2038 WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0);
2039 WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0);
2040 WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0);
2042 WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff);
2043 WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff);
2044 WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff);
2046 WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0);
2047 WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007);
2049 WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0);
2050 r = crtc->gamma_store;
2051 g = r + crtc->gamma_size;
2052 b = g + crtc->gamma_size;
2053 for (i = 0; i < 256; i++) {
2054 WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset,
2055 ((*r++ & 0xffc0) << 14) |
2056 ((*g++ & 0xffc0) << 4) |
2060 WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2061 ((DEGAMMA_BYPASS << DEGAMMA_CONTROL__GRPH_DEGAMMA_MODE__SHIFT) |
2062 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__OVL_DEGAMMA_MODE__SHIFT) |
2063 (DEGAMMA_BYPASS << DEGAMMA_CONTROL__CURSOR_DEGAMMA_MODE__SHIFT)));
2064 WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset,
2065 ((GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__GRPH_GAMUT_REMAP_MODE__SHIFT) |
2066 (GAMUT_REMAP_BYPASS << GAMUT_REMAP_CONTROL__OVL_GAMUT_REMAP_MODE__SHIFT)));
2067 WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset,
2068 ((REGAMMA_BYPASS << REGAMMA_CONTROL__GRPH_REGAMMA_MODE__SHIFT) |
2069 (REGAMMA_BYPASS << REGAMMA_CONTROL__OVL_REGAMMA_MODE__SHIFT)));
2070 WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset,
2071 ((OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_GRPH_MODE__SHIFT) |
2072 (OUTPUT_CSC_BYPASS << OUTPUT_CSC_CONTROL__OUTPUT_CSC_OVL_MODE__SHIFT)));
2073 /* XXX match this to the depth of the crtc fmt block, move to modeset? */
2074 WREG32(0x1a50 + amdgpu_crtc->crtc_offset, 0);
2075 /* XXX this only needs to be programmed once per crtc at startup,
2076 * not sure where the best place for it is
2078 WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset,
2079 ALPHA_CONTROL__CURSOR_ALPHA_BLND_ENA_MASK);
2082 static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder)
2084 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
2085 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
2087 switch (amdgpu_encoder->encoder_id) {
2088 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
2093 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
2098 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
2103 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
2106 DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id);
2112 * dce_v8_0_pick_pll - Allocate a PPLL for use by the crtc.
2116 * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
2117 * a single PPLL can be used for all DP crtcs/encoders. For non-DP
2118 * monitors a dedicated PPLL must be used. If a particular board has
2119 * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
2120 * as there is no need to program the PLL itself. If we are not able to
2121 * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
2122 * avoid messing up an existing monitor.
2124 * Asic specific PLL information
2128 * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP)
2130 * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
2133 static u32 dce_v8_0_pick_pll(struct drm_crtc *crtc)
2135 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2136 struct drm_device *dev = crtc->dev;
2137 struct amdgpu_device *adev = drm_to_adev(dev);
2141 if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) {
2142 if (adev->clock.dp_extclk)
2143 /* skip PPLL programming if using ext clock */
2144 return ATOM_PPLL_INVALID;
2146 /* use the same PPLL for all DP monitors */
2147 pll = amdgpu_pll_get_shared_dp_ppll(crtc);
2148 if (pll != ATOM_PPLL_INVALID)
2152 /* use the same PPLL for all monitors with the same clock */
2153 pll = amdgpu_pll_get_shared_nondp_ppll(crtc);
2154 if (pll != ATOM_PPLL_INVALID)
2157 /* otherwise, pick one of the plls */
2158 if ((adev->asic_type == CHIP_KABINI) ||
2159 (adev->asic_type == CHIP_MULLINS)) {
2160 /* KB/ML has PPLL1 and PPLL2 */
2161 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2162 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2164 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2166 DRM_ERROR("unable to allocate a PPLL\n");
2167 return ATOM_PPLL_INVALID;
2169 /* CI/KV has PPLL0, PPLL1, and PPLL2 */
2170 pll_in_use = amdgpu_pll_get_use_mask(crtc);
2171 if (!(pll_in_use & (1 << ATOM_PPLL2)))
2173 if (!(pll_in_use & (1 << ATOM_PPLL1)))
2175 if (!(pll_in_use & (1 << ATOM_PPLL0)))
2177 DRM_ERROR("unable to allocate a PPLL\n");
2178 return ATOM_PPLL_INVALID;
2180 return ATOM_PPLL_INVALID;
2183 static void dce_v8_0_lock_cursor(struct drm_crtc *crtc, bool lock)
2185 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2186 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2189 cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset);
2191 cur_lock |= CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2193 cur_lock &= ~CUR_UPDATE__CURSOR_UPDATE_LOCK_MASK;
2194 WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock);
2197 static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
2199 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2200 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2202 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2203 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2204 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2207 static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
2209 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2210 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2212 WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
2213 upper_32_bits(amdgpu_crtc->cursor_addr));
2214 WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
2215 lower_32_bits(amdgpu_crtc->cursor_addr));
2217 WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
2218 CUR_CONTROL__CURSOR_EN_MASK |
2219 (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
2220 (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
2223 static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
2226 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2227 struct amdgpu_device *adev = drm_to_adev(crtc->dev);
2228 int xorigin = 0, yorigin = 0;
2230 amdgpu_crtc->cursor_x = x;
2231 amdgpu_crtc->cursor_y = y;
2233 /* avivo cursor are offset into the total surface */
2236 DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
2239 xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
2243 yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
2247 WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
2248 WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
2249 WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
2250 ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
2255 static int dce_v8_0_crtc_cursor_move(struct drm_crtc *crtc,
2260 dce_v8_0_lock_cursor(crtc, true);
2261 ret = dce_v8_0_cursor_move_locked(crtc, x, y);
2262 dce_v8_0_lock_cursor(crtc, false);
2267 static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
2268 struct drm_file *file_priv,
2275 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2276 struct drm_gem_object *obj;
2277 struct amdgpu_bo *aobj;
2281 /* turn off cursor */
2282 dce_v8_0_hide_cursor(crtc);
2287 if ((width > amdgpu_crtc->max_cursor_width) ||
2288 (height > amdgpu_crtc->max_cursor_height)) {
2289 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
2293 obj = drm_gem_object_lookup(file_priv, handle);
2295 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id);
2299 aobj = gem_to_amdgpu_bo(obj);
2300 ret = amdgpu_bo_reserve(aobj, false);
2302 drm_gem_object_put(obj);
2306 ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
2307 amdgpu_bo_unreserve(aobj);
2309 DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
2310 drm_gem_object_put(obj);
2313 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
2315 dce_v8_0_lock_cursor(crtc, true);
2317 if (width != amdgpu_crtc->cursor_width ||
2318 height != amdgpu_crtc->cursor_height ||
2319 hot_x != amdgpu_crtc->cursor_hot_x ||
2320 hot_y != amdgpu_crtc->cursor_hot_y) {
2323 x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x;
2324 y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y;
2326 dce_v8_0_cursor_move_locked(crtc, x, y);
2328 amdgpu_crtc->cursor_width = width;
2329 amdgpu_crtc->cursor_height = height;
2330 amdgpu_crtc->cursor_hot_x = hot_x;
2331 amdgpu_crtc->cursor_hot_y = hot_y;
2334 dce_v8_0_show_cursor(crtc);
2335 dce_v8_0_lock_cursor(crtc, false);
2338 if (amdgpu_crtc->cursor_bo) {
2339 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
2340 ret = amdgpu_bo_reserve(aobj, true);
2341 if (likely(ret == 0)) {
2342 amdgpu_bo_unpin(aobj);
2343 amdgpu_bo_unreserve(aobj);
2345 drm_gem_object_put(amdgpu_crtc->cursor_bo);
2348 amdgpu_crtc->cursor_bo = obj;
2352 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2354 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2356 if (amdgpu_crtc->cursor_bo) {
2357 dce_v8_0_lock_cursor(crtc, true);
2359 dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
2360 amdgpu_crtc->cursor_y);
2362 dce_v8_0_show_cursor(crtc);
2364 dce_v8_0_lock_cursor(crtc, false);
2368 static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2369 u16 *blue, uint32_t size,
2370 struct drm_modeset_acquire_ctx *ctx)
2372 dce_v8_0_crtc_load_lut(crtc);
2377 static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
2379 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2381 drm_crtc_cleanup(crtc);
2385 static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
2386 .cursor_set2 = dce_v8_0_crtc_cursor_set2,
2387 .cursor_move = dce_v8_0_crtc_cursor_move,
2388 .gamma_set = dce_v8_0_crtc_gamma_set,
2389 .set_config = amdgpu_display_crtc_set_config,
2390 .destroy = dce_v8_0_crtc_destroy,
2391 .page_flip_target = amdgpu_display_crtc_page_flip_target,
2392 .get_vblank_counter = amdgpu_get_vblank_counter_kms,
2393 .enable_vblank = amdgpu_enable_vblank_kms,
2394 .disable_vblank = amdgpu_disable_vblank_kms,
2395 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
2398 static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2400 struct drm_device *dev = crtc->dev;
2401 struct amdgpu_device *adev = drm_to_adev(dev);
2402 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2406 case DRM_MODE_DPMS_ON:
2407 amdgpu_crtc->enabled = true;
2408 amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE);
2409 dce_v8_0_vga_enable(crtc, true);
2410 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2411 dce_v8_0_vga_enable(crtc, false);
2412 /* Make sure VBLANK and PFLIP interrupts are still enabled */
2413 type = amdgpu_display_crtc_idx_to_irq_type(adev,
2414 amdgpu_crtc->crtc_id);
2415 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2416 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2417 drm_crtc_vblank_on(crtc);
2418 dce_v8_0_crtc_load_lut(crtc);
2420 case DRM_MODE_DPMS_STANDBY:
2421 case DRM_MODE_DPMS_SUSPEND:
2422 case DRM_MODE_DPMS_OFF:
2423 drm_crtc_vblank_off(crtc);
2424 if (amdgpu_crtc->enabled) {
2425 dce_v8_0_vga_enable(crtc, true);
2426 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
2427 dce_v8_0_vga_enable(crtc, false);
2429 amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE);
2430 amdgpu_crtc->enabled = false;
2433 /* adjust pm to dpms */
2434 amdgpu_dpm_compute_clocks(adev);
2437 static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc)
2439 /* disable crtc pair power gating before programming */
2440 amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE);
2441 amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE);
2442 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2445 static void dce_v8_0_crtc_commit(struct drm_crtc *crtc)
2447 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
2448 amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE);
2451 static void dce_v8_0_crtc_disable(struct drm_crtc *crtc)
2453 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2454 struct drm_device *dev = crtc->dev;
2455 struct amdgpu_device *adev = drm_to_adev(dev);
2456 struct amdgpu_atom_ss ss;
2459 dce_v8_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
2460 if (crtc->primary->fb) {
2462 struct amdgpu_bo *abo;
2464 abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]);
2465 r = amdgpu_bo_reserve(abo, true);
2467 DRM_ERROR("failed to reserve abo before unpin\n");
2469 amdgpu_bo_unpin(abo);
2470 amdgpu_bo_unreserve(abo);
2473 /* disable the GRPH */
2474 dce_v8_0_grph_enable(crtc, false);
2476 amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE);
2478 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2479 if (adev->mode_info.crtcs[i] &&
2480 adev->mode_info.crtcs[i]->enabled &&
2481 i != amdgpu_crtc->crtc_id &&
2482 amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) {
2483 /* one other crtc is using this pll don't turn
2490 switch (amdgpu_crtc->pll_id) {
2493 /* disable the ppll */
2494 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2495 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2498 /* disable the ppll */
2499 if ((adev->asic_type == CHIP_KAVERI) ||
2500 (adev->asic_type == CHIP_BONAIRE) ||
2501 (adev->asic_type == CHIP_HAWAII))
2502 amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id,
2503 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
2509 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2510 amdgpu_crtc->adjusted_clock = 0;
2511 amdgpu_crtc->encoder = NULL;
2512 amdgpu_crtc->connector = NULL;
2515 static int dce_v8_0_crtc_mode_set(struct drm_crtc *crtc,
2516 struct drm_display_mode *mode,
2517 struct drm_display_mode *adjusted_mode,
2518 int x, int y, struct drm_framebuffer *old_fb)
2520 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2522 if (!amdgpu_crtc->adjusted_clock)
2525 amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode);
2526 amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode);
2527 dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2528 amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode);
2529 amdgpu_atombios_crtc_scaler_setup(crtc);
2530 dce_v8_0_cursor_reset(crtc);
2531 /* update the hw version fpr dpm */
2532 amdgpu_crtc->hw_mode = *adjusted_mode;
2537 static bool dce_v8_0_crtc_mode_fixup(struct drm_crtc *crtc,
2538 const struct drm_display_mode *mode,
2539 struct drm_display_mode *adjusted_mode)
2541 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2542 struct drm_device *dev = crtc->dev;
2543 struct drm_encoder *encoder;
2545 /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */
2546 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
2547 if (encoder->crtc == crtc) {
2548 amdgpu_crtc->encoder = encoder;
2549 amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder);
2553 if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) {
2554 amdgpu_crtc->encoder = NULL;
2555 amdgpu_crtc->connector = NULL;
2558 if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
2560 if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode))
2563 amdgpu_crtc->pll_id = dce_v8_0_pick_pll(crtc);
2564 /* if we can't get a PPLL for a non-DP encoder, fail */
2565 if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) &&
2566 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder)))
2572 static int dce_v8_0_crtc_set_base(struct drm_crtc *crtc, int x, int y,
2573 struct drm_framebuffer *old_fb)
2575 return dce_v8_0_crtc_do_set_base(crtc, old_fb, x, y, 0);
2578 static int dce_v8_0_crtc_set_base_atomic(struct drm_crtc *crtc,
2579 struct drm_framebuffer *fb,
2580 int x, int y, enum mode_set_atomic state)
2582 return dce_v8_0_crtc_do_set_base(crtc, fb, x, y, 1);
2585 static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
2586 .dpms = dce_v8_0_crtc_dpms,
2587 .mode_fixup = dce_v8_0_crtc_mode_fixup,
2588 .mode_set = dce_v8_0_crtc_mode_set,
2589 .mode_set_base = dce_v8_0_crtc_set_base,
2590 .mode_set_base_atomic = dce_v8_0_crtc_set_base_atomic,
2591 .prepare = dce_v8_0_crtc_prepare,
2592 .commit = dce_v8_0_crtc_commit,
2593 .disable = dce_v8_0_crtc_disable,
2594 .get_scanout_position = amdgpu_crtc_get_scanout_position,
2597 static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
2599 struct amdgpu_crtc *amdgpu_crtc;
2601 amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
2602 (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
2603 if (amdgpu_crtc == NULL)
2606 drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v8_0_crtc_funcs);
2608 drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
2609 amdgpu_crtc->crtc_id = index;
2610 adev->mode_info.crtcs[index] = amdgpu_crtc;
2612 amdgpu_crtc->max_cursor_width = CIK_CURSOR_WIDTH;
2613 amdgpu_crtc->max_cursor_height = CIK_CURSOR_HEIGHT;
2614 adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width;
2615 adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height;
2617 amdgpu_crtc->crtc_offset = crtc_offsets[amdgpu_crtc->crtc_id];
2619 amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
2620 amdgpu_crtc->adjusted_clock = 0;
2621 amdgpu_crtc->encoder = NULL;
2622 amdgpu_crtc->connector = NULL;
2623 drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v8_0_crtc_helper_funcs);
2628 static int dce_v8_0_early_init(void *handle)
2630 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2632 adev->audio_endpt_rreg = &dce_v8_0_audio_endpt_rreg;
2633 adev->audio_endpt_wreg = &dce_v8_0_audio_endpt_wreg;
2635 dce_v8_0_set_display_funcs(adev);
2637 adev->mode_info.num_crtc = dce_v8_0_get_num_crtc(adev);
2639 switch (adev->asic_type) {
2642 adev->mode_info.num_hpd = 6;
2643 adev->mode_info.num_dig = 6;
2646 adev->mode_info.num_hpd = 6;
2647 adev->mode_info.num_dig = 7;
2651 adev->mode_info.num_hpd = 6;
2652 adev->mode_info.num_dig = 6; /* ? */
2655 /* FIXME: not supported yet */
2659 dce_v8_0_set_irq_funcs(adev);
2664 static int dce_v8_0_sw_init(void *handle)
2667 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2669 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2670 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
2675 for (i = 8; i < 20; i += 2) {
2676 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq);
2682 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 42, &adev->hpd_irq);
2686 adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
2688 adev_to_drm(adev)->mode_config.async_page_flip = true;
2690 adev_to_drm(adev)->mode_config.max_width = 16384;
2691 adev_to_drm(adev)->mode_config.max_height = 16384;
2693 adev_to_drm(adev)->mode_config.preferred_depth = 24;
2694 if (adev->asic_type == CHIP_HAWAII)
2695 /* disable prefer shadow for now due to hibernation issues */
2696 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
2698 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
2700 adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
2702 r = amdgpu_display_modeset_create_props(adev);
2706 adev_to_drm(adev)->mode_config.max_width = 16384;
2707 adev_to_drm(adev)->mode_config.max_height = 16384;
2709 /* allocate crtcs */
2710 for (i = 0; i < adev->mode_info.num_crtc; i++) {
2711 r = dce_v8_0_crtc_init(adev, i);
2716 if (amdgpu_atombios_get_connector_info_from_object_table(adev))
2717 amdgpu_display_print_display_setup(adev_to_drm(adev));
2722 r = dce_v8_0_afmt_init(adev);
2726 r = dce_v8_0_audio_init(adev);
2730 /* Disable vblank IRQs aggressively for power-saving */
2731 /* XXX: can this be enabled for DC? */
2732 adev_to_drm(adev)->vblank_disable_immediate = true;
2734 r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc);
2739 INIT_DELAYED_WORK(&adev->hotplug_work,
2740 amdgpu_display_hotplug_work_func);
2742 drm_kms_helper_poll_init(adev_to_drm(adev));
2744 adev->mode_info.mode_config_initialized = true;
2748 static int dce_v8_0_sw_fini(void *handle)
2750 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2752 kfree(adev->mode_info.bios_hardcoded_edid);
2754 drm_kms_helper_poll_fini(adev_to_drm(adev));
2756 dce_v8_0_audio_fini(adev);
2758 dce_v8_0_afmt_fini(adev);
2760 drm_mode_config_cleanup(adev_to_drm(adev));
2761 adev->mode_info.mode_config_initialized = false;
2766 static int dce_v8_0_hw_init(void *handle)
2769 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2771 /* disable vga render */
2772 dce_v8_0_set_vga_render_state(adev, false);
2773 /* init dig PHYs, disp eng pll */
2774 amdgpu_atombios_encoder_init_dig(adev);
2775 amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
2777 /* initialize hpd */
2778 dce_v8_0_hpd_init(adev);
2780 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2781 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2784 dce_v8_0_pageflip_interrupt_init(adev);
2789 static int dce_v8_0_hw_fini(void *handle)
2792 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2794 dce_v8_0_hpd_fini(adev);
2796 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
2797 dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
2800 dce_v8_0_pageflip_interrupt_fini(adev);
2802 flush_delayed_work(&adev->hotplug_work);
2807 static int dce_v8_0_suspend(void *handle)
2809 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2812 r = amdgpu_display_suspend_helper(adev);
2816 adev->mode_info.bl_level =
2817 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2819 return dce_v8_0_hw_fini(handle);
2822 static int dce_v8_0_resume(void *handle)
2824 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2827 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2828 adev->mode_info.bl_level);
2830 ret = dce_v8_0_hw_init(handle);
2832 /* turn on the BL */
2833 if (adev->mode_info.bl_encoder) {
2834 u8 bl_level = amdgpu_display_backlight_get_level(adev,
2835 adev->mode_info.bl_encoder);
2836 amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
2842 return amdgpu_display_resume_helper(adev);
2845 static bool dce_v8_0_is_idle(void *handle)
2850 static int dce_v8_0_wait_for_idle(void *handle)
2855 static int dce_v8_0_soft_reset(void *handle)
2857 u32 srbm_soft_reset = 0, tmp;
2858 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2860 if (dce_v8_0_is_display_hung(adev))
2861 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK;
2863 if (srbm_soft_reset) {
2864 tmp = RREG32(mmSRBM_SOFT_RESET);
2865 tmp |= srbm_soft_reset;
2866 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp);
2867 WREG32(mmSRBM_SOFT_RESET, tmp);
2868 tmp = RREG32(mmSRBM_SOFT_RESET);
2872 tmp &= ~srbm_soft_reset;
2873 WREG32(mmSRBM_SOFT_RESET, tmp);
2874 tmp = RREG32(mmSRBM_SOFT_RESET);
2876 /* Wait a little for things to settle down */
2882 static void dce_v8_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
2884 enum amdgpu_interrupt_state state)
2886 u32 reg_block, lb_interrupt_mask;
2888 if (crtc >= adev->mode_info.num_crtc) {
2889 DRM_DEBUG("invalid crtc %d\n", crtc);
2895 reg_block = CRTC0_REGISTER_OFFSET;
2898 reg_block = CRTC1_REGISTER_OFFSET;
2901 reg_block = CRTC2_REGISTER_OFFSET;
2904 reg_block = CRTC3_REGISTER_OFFSET;
2907 reg_block = CRTC4_REGISTER_OFFSET;
2910 reg_block = CRTC5_REGISTER_OFFSET;
2913 DRM_DEBUG("invalid crtc %d\n", crtc);
2918 case AMDGPU_IRQ_STATE_DISABLE:
2919 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2920 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2921 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2923 case AMDGPU_IRQ_STATE_ENABLE:
2924 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2925 lb_interrupt_mask |= LB_INTERRUPT_MASK__VBLANK_INTERRUPT_MASK_MASK;
2926 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2933 static void dce_v8_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev,
2935 enum amdgpu_interrupt_state state)
2937 u32 reg_block, lb_interrupt_mask;
2939 if (crtc >= adev->mode_info.num_crtc) {
2940 DRM_DEBUG("invalid crtc %d\n", crtc);
2946 reg_block = CRTC0_REGISTER_OFFSET;
2949 reg_block = CRTC1_REGISTER_OFFSET;
2952 reg_block = CRTC2_REGISTER_OFFSET;
2955 reg_block = CRTC3_REGISTER_OFFSET;
2958 reg_block = CRTC4_REGISTER_OFFSET;
2961 reg_block = CRTC5_REGISTER_OFFSET;
2964 DRM_DEBUG("invalid crtc %d\n", crtc);
2969 case AMDGPU_IRQ_STATE_DISABLE:
2970 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2971 lb_interrupt_mask &= ~LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2972 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2974 case AMDGPU_IRQ_STATE_ENABLE:
2975 lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + reg_block);
2976 lb_interrupt_mask |= LB_INTERRUPT_MASK__VLINE_INTERRUPT_MASK_MASK;
2977 WREG32(mmLB_INTERRUPT_MASK + reg_block, lb_interrupt_mask);
2984 static int dce_v8_0_set_hpd_interrupt_state(struct amdgpu_device *adev,
2985 struct amdgpu_irq_src *src,
2987 enum amdgpu_interrupt_state state)
2989 u32 dc_hpd_int_cntl;
2991 if (type >= adev->mode_info.num_hpd) {
2992 DRM_DEBUG("invalid hdp %d\n", type);
2997 case AMDGPU_IRQ_STATE_DISABLE:
2998 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
2999 dc_hpd_int_cntl &= ~DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3000 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3002 case AMDGPU_IRQ_STATE_ENABLE:
3003 dc_hpd_int_cntl = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type]);
3004 dc_hpd_int_cntl |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_EN_MASK;
3005 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[type], dc_hpd_int_cntl);
3014 static int dce_v8_0_set_crtc_interrupt_state(struct amdgpu_device *adev,
3015 struct amdgpu_irq_src *src,
3017 enum amdgpu_interrupt_state state)
3020 case AMDGPU_CRTC_IRQ_VBLANK1:
3021 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 0, state);
3023 case AMDGPU_CRTC_IRQ_VBLANK2:
3024 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 1, state);
3026 case AMDGPU_CRTC_IRQ_VBLANK3:
3027 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 2, state);
3029 case AMDGPU_CRTC_IRQ_VBLANK4:
3030 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 3, state);
3032 case AMDGPU_CRTC_IRQ_VBLANK5:
3033 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 4, state);
3035 case AMDGPU_CRTC_IRQ_VBLANK6:
3036 dce_v8_0_set_crtc_vblank_interrupt_state(adev, 5, state);
3038 case AMDGPU_CRTC_IRQ_VLINE1:
3039 dce_v8_0_set_crtc_vline_interrupt_state(adev, 0, state);
3041 case AMDGPU_CRTC_IRQ_VLINE2:
3042 dce_v8_0_set_crtc_vline_interrupt_state(adev, 1, state);
3044 case AMDGPU_CRTC_IRQ_VLINE3:
3045 dce_v8_0_set_crtc_vline_interrupt_state(adev, 2, state);
3047 case AMDGPU_CRTC_IRQ_VLINE4:
3048 dce_v8_0_set_crtc_vline_interrupt_state(adev, 3, state);
3050 case AMDGPU_CRTC_IRQ_VLINE5:
3051 dce_v8_0_set_crtc_vline_interrupt_state(adev, 4, state);
3053 case AMDGPU_CRTC_IRQ_VLINE6:
3054 dce_v8_0_set_crtc_vline_interrupt_state(adev, 5, state);
3062 static int dce_v8_0_crtc_irq(struct amdgpu_device *adev,
3063 struct amdgpu_irq_src *source,
3064 struct amdgpu_iv_entry *entry)
3066 unsigned crtc = entry->src_id - 1;
3067 uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg);
3068 unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
3071 switch (entry->src_data[0]) {
3072 case 0: /* vblank */
3073 if (disp_int & interrupt_status_offsets[crtc].vblank)
3074 WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], LB_VBLANK_STATUS__VBLANK_ACK_MASK);
3076 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3078 if (amdgpu_irq_enabled(adev, source, irq_type)) {
3079 drm_handle_vblank(adev_to_drm(adev), crtc);
3081 DRM_DEBUG("IH: D%d vblank\n", crtc + 1);
3084 if (disp_int & interrupt_status_offsets[crtc].vline)
3085 WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], LB_VLINE_STATUS__VLINE_ACK_MASK);
3087 DRM_DEBUG("IH: IH event w/o asserted irq bit?\n");
3089 DRM_DEBUG("IH: D%d vline\n", crtc + 1);
3092 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3099 static int dce_v8_0_set_pageflip_interrupt_state(struct amdgpu_device *adev,
3100 struct amdgpu_irq_src *src,
3102 enum amdgpu_interrupt_state state)
3106 if (type >= adev->mode_info.num_crtc) {
3107 DRM_ERROR("invalid pageflip crtc %d\n", type);
3111 reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]);
3112 if (state == AMDGPU_IRQ_STATE_DISABLE)
3113 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3114 reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3116 WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type],
3117 reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK);
3122 static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3123 struct amdgpu_irq_src *source,
3124 struct amdgpu_iv_entry *entry)
3126 unsigned long flags;
3128 struct amdgpu_crtc *amdgpu_crtc;
3129 struct amdgpu_flip_work *works;
3131 crtc_id = (entry->src_id - 8) >> 1;
3132 amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
3134 if (crtc_id >= adev->mode_info.num_crtc) {
3135 DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
3139 if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) &
3140 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK)
3141 WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id],
3142 GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK);
3144 /* IRQ could occur when in initial stage */
3145 if (amdgpu_crtc == NULL)
3148 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
3149 works = amdgpu_crtc->pflip_works;
3150 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
3151 DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
3152 "AMDGPU_FLIP_SUBMITTED(%d)\n",
3153 amdgpu_crtc->pflip_status,
3154 AMDGPU_FLIP_SUBMITTED);
3155 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3159 /* page flip completed. clean up */
3160 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
3161 amdgpu_crtc->pflip_works = NULL;
3163 /* wakeup usersapce */
3165 drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
3167 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
3169 drm_crtc_vblank_put(&amdgpu_crtc->base);
3170 schedule_work(&works->unpin_work);
3175 static int dce_v8_0_hpd_irq(struct amdgpu_device *adev,
3176 struct amdgpu_irq_src *source,
3177 struct amdgpu_iv_entry *entry)
3179 uint32_t disp_int, mask, tmp;
3182 if (entry->src_data[0] >= adev->mode_info.num_hpd) {
3183 DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]);
3187 hpd = entry->src_data[0];
3188 disp_int = RREG32(interrupt_status_offsets[hpd].reg);
3189 mask = interrupt_status_offsets[hpd].hpd;
3191 if (disp_int & mask) {
3192 tmp = RREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd]);
3193 tmp |= DC_HPD1_INT_CONTROL__DC_HPD1_INT_ACK_MASK;
3194 WREG32(mmDC_HPD1_INT_CONTROL + hpd_offsets[hpd], tmp);
3195 schedule_delayed_work(&adev->hotplug_work, 0);
3196 DRM_DEBUG("IH: HPD%d\n", hpd + 1);
3203 static int dce_v8_0_set_clockgating_state(void *handle,
3204 enum amd_clockgating_state state)
3209 static int dce_v8_0_set_powergating_state(void *handle,
3210 enum amd_powergating_state state)
3215 static const struct amd_ip_funcs dce_v8_0_ip_funcs = {
3217 .early_init = dce_v8_0_early_init,
3219 .sw_init = dce_v8_0_sw_init,
3220 .sw_fini = dce_v8_0_sw_fini,
3221 .hw_init = dce_v8_0_hw_init,
3222 .hw_fini = dce_v8_0_hw_fini,
3223 .suspend = dce_v8_0_suspend,
3224 .resume = dce_v8_0_resume,
3225 .is_idle = dce_v8_0_is_idle,
3226 .wait_for_idle = dce_v8_0_wait_for_idle,
3227 .soft_reset = dce_v8_0_soft_reset,
3228 .set_clockgating_state = dce_v8_0_set_clockgating_state,
3229 .set_powergating_state = dce_v8_0_set_powergating_state,
3233 dce_v8_0_encoder_mode_set(struct drm_encoder *encoder,
3234 struct drm_display_mode *mode,
3235 struct drm_display_mode *adjusted_mode)
3237 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3239 amdgpu_encoder->pixel_clock = adjusted_mode->clock;
3241 /* need to call this here rather than in prepare() since we need some crtc info */
3242 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3244 /* set scaler clears this on some chips */
3245 dce_v8_0_set_interleave(encoder->crtc, mode);
3247 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
3248 dce_v8_0_afmt_enable(encoder, true);
3249 dce_v8_0_afmt_setmode(encoder, adjusted_mode);
3253 static void dce_v8_0_encoder_prepare(struct drm_encoder *encoder)
3255 struct amdgpu_device *adev = drm_to_adev(encoder->dev);
3256 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3257 struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder);
3259 if ((amdgpu_encoder->active_device &
3260 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
3261 (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) !=
3262 ENCODER_OBJECT_ID_NONE)) {
3263 struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
3265 dig->dig_encoder = dce_v8_0_pick_dig_encoder(encoder);
3266 if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT)
3267 dig->afmt = adev->mode_info.afmt[dig->dig_encoder];
3271 amdgpu_atombios_scratch_regs_lock(adev, true);
3274 struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
3276 /* select the clock/data port if it uses a router */
3277 if (amdgpu_connector->router.cd_valid)
3278 amdgpu_i2c_router_select_cd_port(amdgpu_connector);
3280 /* turn eDP panel on for mode set */
3281 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
3282 amdgpu_atombios_encoder_set_edp_panel_power(connector,
3283 ATOM_TRANSMITTER_ACTION_POWER_ON);
3286 /* this is needed for the pll/ss setup to work correctly in some cases */
3287 amdgpu_atombios_encoder_set_crtc_source(encoder);
3288 /* set up the FMT blocks */
3289 dce_v8_0_program_fmt(encoder);
3292 static void dce_v8_0_encoder_commit(struct drm_encoder *encoder)
3294 struct drm_device *dev = encoder->dev;
3295 struct amdgpu_device *adev = drm_to_adev(dev);
3297 /* need to call this here as we need the crtc set up */
3298 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
3299 amdgpu_atombios_scratch_regs_lock(adev, false);
3302 static void dce_v8_0_encoder_disable(struct drm_encoder *encoder)
3304 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3305 struct amdgpu_encoder_atom_dig *dig;
3307 amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
3309 if (amdgpu_atombios_encoder_is_digital(encoder)) {
3310 if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
3311 dce_v8_0_afmt_enable(encoder, false);
3312 dig = amdgpu_encoder->enc_priv;
3313 dig->dig_encoder = -1;
3315 amdgpu_encoder->active_device = 0;
3318 /* these are handled by the primary encoders */
3319 static void dce_v8_0_ext_prepare(struct drm_encoder *encoder)
3324 static void dce_v8_0_ext_commit(struct drm_encoder *encoder)
3330 dce_v8_0_ext_mode_set(struct drm_encoder *encoder,
3331 struct drm_display_mode *mode,
3332 struct drm_display_mode *adjusted_mode)
3337 static void dce_v8_0_ext_disable(struct drm_encoder *encoder)
3343 dce_v8_0_ext_dpms(struct drm_encoder *encoder, int mode)
3348 static const struct drm_encoder_helper_funcs dce_v8_0_ext_helper_funcs = {
3349 .dpms = dce_v8_0_ext_dpms,
3350 .prepare = dce_v8_0_ext_prepare,
3351 .mode_set = dce_v8_0_ext_mode_set,
3352 .commit = dce_v8_0_ext_commit,
3353 .disable = dce_v8_0_ext_disable,
3354 /* no detect for TMDS/LVDS yet */
3357 static const struct drm_encoder_helper_funcs dce_v8_0_dig_helper_funcs = {
3358 .dpms = amdgpu_atombios_encoder_dpms,
3359 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3360 .prepare = dce_v8_0_encoder_prepare,
3361 .mode_set = dce_v8_0_encoder_mode_set,
3362 .commit = dce_v8_0_encoder_commit,
3363 .disable = dce_v8_0_encoder_disable,
3364 .detect = amdgpu_atombios_encoder_dig_detect,
3367 static const struct drm_encoder_helper_funcs dce_v8_0_dac_helper_funcs = {
3368 .dpms = amdgpu_atombios_encoder_dpms,
3369 .mode_fixup = amdgpu_atombios_encoder_mode_fixup,
3370 .prepare = dce_v8_0_encoder_prepare,
3371 .mode_set = dce_v8_0_encoder_mode_set,
3372 .commit = dce_v8_0_encoder_commit,
3373 .detect = amdgpu_atombios_encoder_dac_detect,
3376 static void dce_v8_0_encoder_destroy(struct drm_encoder *encoder)
3378 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
3379 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3380 amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder);
3381 kfree(amdgpu_encoder->enc_priv);
3382 drm_encoder_cleanup(encoder);
3383 kfree(amdgpu_encoder);
3386 static const struct drm_encoder_funcs dce_v8_0_encoder_funcs = {
3387 .destroy = dce_v8_0_encoder_destroy,
3390 static void dce_v8_0_encoder_add(struct amdgpu_device *adev,
3391 uint32_t encoder_enum,
3392 uint32_t supported_device,
3395 struct drm_device *dev = adev_to_drm(adev);
3396 struct drm_encoder *encoder;
3397 struct amdgpu_encoder *amdgpu_encoder;
3399 /* see if we already added it */
3400 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3401 amdgpu_encoder = to_amdgpu_encoder(encoder);
3402 if (amdgpu_encoder->encoder_enum == encoder_enum) {
3403 amdgpu_encoder->devices |= supported_device;
3410 amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL);
3411 if (!amdgpu_encoder)
3414 encoder = &amdgpu_encoder->base;
3415 switch (adev->mode_info.num_crtc) {
3417 encoder->possible_crtcs = 0x1;
3421 encoder->possible_crtcs = 0x3;
3424 encoder->possible_crtcs = 0xf;
3427 encoder->possible_crtcs = 0x3f;
3431 amdgpu_encoder->enc_priv = NULL;
3433 amdgpu_encoder->encoder_enum = encoder_enum;
3434 amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
3435 amdgpu_encoder->devices = supported_device;
3436 amdgpu_encoder->rmx_type = RMX_OFF;
3437 amdgpu_encoder->underscan_type = UNDERSCAN_OFF;
3438 amdgpu_encoder->is_ext_encoder = false;
3439 amdgpu_encoder->caps = caps;
3441 switch (amdgpu_encoder->encoder_id) {
3442 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
3443 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
3444 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3445 DRM_MODE_ENCODER_DAC, NULL);
3446 drm_encoder_helper_add(encoder, &dce_v8_0_dac_helper_funcs);
3448 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
3449 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
3450 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
3451 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
3452 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
3453 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
3454 amdgpu_encoder->rmx_type = RMX_FULL;
3455 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3456 DRM_MODE_ENCODER_LVDS, NULL);
3457 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder);
3458 } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
3459 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3460 DRM_MODE_ENCODER_DAC, NULL);
3461 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3463 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3464 DRM_MODE_ENCODER_TMDS, NULL);
3465 amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder);
3467 drm_encoder_helper_add(encoder, &dce_v8_0_dig_helper_funcs);
3469 case ENCODER_OBJECT_ID_SI170B:
3470 case ENCODER_OBJECT_ID_CH7303:
3471 case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
3472 case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
3473 case ENCODER_OBJECT_ID_TITFP513:
3474 case ENCODER_OBJECT_ID_VT1623:
3475 case ENCODER_OBJECT_ID_HDMI_SI1930:
3476 case ENCODER_OBJECT_ID_TRAVIS:
3477 case ENCODER_OBJECT_ID_NUTMEG:
3478 /* these are handled by the primary encoders */
3479 amdgpu_encoder->is_ext_encoder = true;
3480 if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
3481 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3482 DRM_MODE_ENCODER_LVDS, NULL);
3483 else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
3484 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3485 DRM_MODE_ENCODER_DAC, NULL);
3487 drm_encoder_init(dev, encoder, &dce_v8_0_encoder_funcs,
3488 DRM_MODE_ENCODER_TMDS, NULL);
3489 drm_encoder_helper_add(encoder, &dce_v8_0_ext_helper_funcs);
3494 static const struct amdgpu_display_funcs dce_v8_0_display_funcs = {
3495 .bandwidth_update = &dce_v8_0_bandwidth_update,
3496 .vblank_get_counter = &dce_v8_0_vblank_get_counter,
3497 .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level,
3498 .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level,
3499 .hpd_sense = &dce_v8_0_hpd_sense,
3500 .hpd_set_polarity = &dce_v8_0_hpd_set_polarity,
3501 .hpd_get_gpio_reg = &dce_v8_0_hpd_get_gpio_reg,
3502 .page_flip = &dce_v8_0_page_flip,
3503 .page_flip_get_scanoutpos = &dce_v8_0_crtc_get_scanoutpos,
3504 .add_encoder = &dce_v8_0_encoder_add,
3505 .add_connector = &amdgpu_connector_add,
3508 static void dce_v8_0_set_display_funcs(struct amdgpu_device *adev)
3510 adev->mode_info.funcs = &dce_v8_0_display_funcs;
3513 static const struct amdgpu_irq_src_funcs dce_v8_0_crtc_irq_funcs = {
3514 .set = dce_v8_0_set_crtc_interrupt_state,
3515 .process = dce_v8_0_crtc_irq,
3518 static const struct amdgpu_irq_src_funcs dce_v8_0_pageflip_irq_funcs = {
3519 .set = dce_v8_0_set_pageflip_interrupt_state,
3520 .process = dce_v8_0_pageflip_irq,
3523 static const struct amdgpu_irq_src_funcs dce_v8_0_hpd_irq_funcs = {
3524 .set = dce_v8_0_set_hpd_interrupt_state,
3525 .process = dce_v8_0_hpd_irq,
3528 static void dce_v8_0_set_irq_funcs(struct amdgpu_device *adev)
3530 if (adev->mode_info.num_crtc > 0)
3531 adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc;
3533 adev->crtc_irq.num_types = 0;
3534 adev->crtc_irq.funcs = &dce_v8_0_crtc_irq_funcs;
3536 adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
3537 adev->pageflip_irq.funcs = &dce_v8_0_pageflip_irq_funcs;
3539 adev->hpd_irq.num_types = adev->mode_info.num_hpd;
3540 adev->hpd_irq.funcs = &dce_v8_0_hpd_irq_funcs;
3543 const struct amdgpu_ip_block_version dce_v8_0_ip_block = {
3544 .type = AMD_IP_BLOCK_TYPE_DCE,
3548 .funcs = &dce_v8_0_ip_funcs,
3551 const struct amdgpu_ip_block_version dce_v8_1_ip_block = {
3552 .type = AMD_IP_BLOCK_TYPE_DCE,
3556 .funcs = &dce_v8_0_ip_funcs,
3559 const struct amdgpu_ip_block_version dce_v8_2_ip_block = {
3560 .type = AMD_IP_BLOCK_TYPE_DCE,
3564 .funcs = &dce_v8_0_ip_funcs,
3567 const struct amdgpu_ip_block_version dce_v8_3_ip_block = {
3568 .type = AMD_IP_BLOCK_TYPE_DCE,
3572 .funcs = &dce_v8_0_ip_funcs,
3575 const struct amdgpu_ip_block_version dce_v8_5_ip_block = {
3576 .type = AMD_IP_BLOCK_TYPE_DCE,
3580 .funcs = &dce_v8_0_ip_funcs,