2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie
27 #include <drm/amdgpu_drm.h>
29 #include "amdgpu_i2c.h"
31 #include "amdgpu_connectors.h"
32 #include "amdgpu_display.h"
33 #include "soc15_common.h"
34 #include "gc/gc_11_0_0_offset.h"
35 #include "gc/gc_11_0_0_sh_mask.h"
36 #include <asm/div64.h>
38 #include <linux/pci.h>
39 #include <linux/pm_runtime.h>
40 #include <drm/drm_crtc_helper.h>
41 #include <drm/drm_damage_helper.h>
42 #include <drm/drm_drv.h>
43 #include <drm/drm_edid.h>
44 #include <drm/drm_fb_helper.h>
45 #include <drm/drm_gem_framebuffer_helper.h>
46 #include <drm/drm_fourcc.h>
47 #include <drm/drm_modeset_helper.h>
48 #include <drm/drm_vblank.h>
51 * amdgpu_display_hotplug_work_func - work handler for display hotplug event
53 * @work: work struct pointer
55 * This is the hotplug event work handler (all ASICs).
56 * The work gets scheduled from the IRQ handler if there
57 * was a hotplug interrupt. It walks through the connector table
58 * and calls hotplug handler for each connector. After this, it sends
59 * a DRM hotplug event to alert userspace.
61 * This design approach is required in order to defer hotplug event handling
62 * from the IRQ handler to a work handler because hotplug handler has to use
63 * mutexes which cannot be locked in an IRQ handler (since &mutex_lock may
66 void amdgpu_display_hotplug_work_func(struct work_struct *work)
68 struct amdgpu_device *adev = container_of(work, struct amdgpu_device,
70 struct drm_device *dev = adev_to_drm(adev);
71 struct drm_mode_config *mode_config = &dev->mode_config;
72 struct drm_connector *connector;
73 struct drm_connector_list_iter iter;
75 mutex_lock(&mode_config->mutex);
76 drm_connector_list_iter_begin(dev, &iter);
77 drm_for_each_connector_iter(connector, &iter)
78 amdgpu_connector_hotplug(connector);
79 drm_connector_list_iter_end(&iter);
80 mutex_unlock(&mode_config->mutex);
81 /* Just fire off a uevent and let userspace tell us what to do */
82 drm_helper_hpd_irq_event(dev);
85 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
86 struct amdgpu_framebuffer *rfb,
87 const struct drm_mode_fb_cmd2 *mode_cmd,
88 struct drm_gem_object *obj);
90 static void amdgpu_display_flip_callback(struct dma_fence *f,
91 struct dma_fence_cb *cb)
93 struct amdgpu_flip_work *work =
94 container_of(cb, struct amdgpu_flip_work, cb);
97 schedule_work(&work->flip_work.work);
100 static bool amdgpu_display_flip_handle_fence(struct amdgpu_flip_work *work,
101 struct dma_fence **f)
103 struct dma_fence *fence = *f;
110 if (!dma_fence_add_callback(fence, &work->cb,
111 amdgpu_display_flip_callback))
114 dma_fence_put(fence);
118 static void amdgpu_display_flip_work_func(struct work_struct *__work)
120 struct delayed_work *delayed_work =
121 container_of(__work, struct delayed_work, work);
122 struct amdgpu_flip_work *work =
123 container_of(delayed_work, struct amdgpu_flip_work, flip_work);
124 struct amdgpu_device *adev = work->adev;
125 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id];
127 struct drm_crtc *crtc = &amdgpu_crtc->base;
132 for (i = 0; i < work->shared_count; ++i)
133 if (amdgpu_display_flip_handle_fence(work, &work->shared[i]))
136 /* Wait until we're out of the vertical blank period before the one
137 * targeted by the flip
139 if (amdgpu_crtc->enabled &&
140 (amdgpu_display_get_crtc_scanoutpos(adev_to_drm(adev), work->crtc_id, 0,
141 &vpos, &hpos, NULL, NULL,
143 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
144 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
145 (int)(work->target_vblank -
146 amdgpu_get_vblank_counter_kms(crtc)) > 0) {
147 schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
151 /* We borrow the event spin lock for protecting flip_status */
152 spin_lock_irqsave(&crtc->dev->event_lock, flags);
154 /* Do the flip (mmio) */
155 adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async);
157 /* Set the flip status */
158 amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
159 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
162 drm_dbg_vbl(adev_to_drm(adev),
163 "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n",
164 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
169 * Handle unpin events outside the interrupt handler proper.
171 static void amdgpu_display_unpin_work_func(struct work_struct *__work)
173 struct amdgpu_flip_work *work =
174 container_of(__work, struct amdgpu_flip_work, unpin_work);
177 /* unpin of the old buffer */
178 r = amdgpu_bo_reserve(work->old_abo, true);
179 if (likely(r == 0)) {
180 amdgpu_bo_unpin(work->old_abo);
181 amdgpu_bo_unreserve(work->old_abo);
183 DRM_ERROR("failed to reserve buffer after flip\n");
185 amdgpu_bo_unref(&work->old_abo);
190 int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
191 struct drm_framebuffer *fb,
192 struct drm_pending_vblank_event *event,
193 uint32_t page_flip_flags, uint32_t target,
194 struct drm_modeset_acquire_ctx *ctx)
196 struct drm_device *dev = crtc->dev;
197 struct amdgpu_device *adev = drm_to_adev(dev);
198 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
199 struct drm_gem_object *obj;
200 struct amdgpu_flip_work *work;
201 struct amdgpu_bo *new_abo;
206 work = kzalloc(sizeof(*work), GFP_KERNEL);
210 INIT_DELAYED_WORK(&work->flip_work, amdgpu_display_flip_work_func);
211 INIT_WORK(&work->unpin_work, amdgpu_display_unpin_work_func);
215 work->crtc_id = amdgpu_crtc->crtc_id;
216 work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
218 /* schedule unpin of the old buffer */
219 obj = crtc->primary->fb->obj[0];
221 /* take a reference to the old object */
222 work->old_abo = gem_to_amdgpu_bo(obj);
223 amdgpu_bo_ref(work->old_abo);
226 new_abo = gem_to_amdgpu_bo(obj);
228 /* pin the new buffer */
229 r = amdgpu_bo_reserve(new_abo, false);
230 if (unlikely(r != 0)) {
231 DRM_ERROR("failed to reserve new abo buffer before flip\n");
235 if (!adev->enable_virtual_display) {
236 r = amdgpu_bo_pin(new_abo,
237 amdgpu_display_supported_domains(adev, new_abo->flags));
238 if (unlikely(r != 0)) {
239 DRM_ERROR("failed to pin new abo buffer before flip\n");
244 r = amdgpu_ttm_alloc_gart(&new_abo->tbo);
245 if (unlikely(r != 0)) {
246 DRM_ERROR("%p bind failed\n", new_abo);
250 r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
253 if (unlikely(r != 0)) {
254 DRM_ERROR("failed to get fences for buffer\n");
258 amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags);
259 amdgpu_bo_unreserve(new_abo);
261 if (!adev->enable_virtual_display)
262 work->base = amdgpu_bo_gpu_offset(new_abo);
263 work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
264 amdgpu_get_vblank_counter_kms(crtc);
266 /* we borrow the event spin lock for protecting flip_wrok */
267 spin_lock_irqsave(&crtc->dev->event_lock, flags);
268 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) {
269 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
270 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
275 amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
276 amdgpu_crtc->pflip_works = work;
279 DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
280 amdgpu_crtc->crtc_id, amdgpu_crtc, work);
282 crtc->primary->fb = fb;
283 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
284 amdgpu_display_flip_work_func(&work->flip_work.work);
288 if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) {
289 DRM_ERROR("failed to reserve new abo in error path\n");
293 if (!adev->enable_virtual_display)
294 amdgpu_bo_unpin(new_abo);
297 amdgpu_bo_unreserve(new_abo);
300 amdgpu_bo_unref(&work->old_abo);
301 for (i = 0; i < work->shared_count; ++i)
302 dma_fence_put(work->shared[i]);
309 int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
310 struct drm_modeset_acquire_ctx *ctx)
312 struct drm_device *dev;
313 struct amdgpu_device *adev;
314 struct drm_crtc *crtc;
318 if (!set || !set->crtc)
321 dev = set->crtc->dev;
323 ret = pm_runtime_get_sync(dev->dev);
327 ret = drm_crtc_helper_set_config(set, ctx);
329 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
333 pm_runtime_mark_last_busy(dev->dev);
335 adev = drm_to_adev(dev);
336 /* if we have active crtcs and we don't have a power ref,
337 * take the current one
339 if (active && !adev->have_disp_power_ref) {
340 adev->have_disp_power_ref = true;
343 /* if we have no active crtcs, then go to
344 * drop the power ref we got before
346 if (!active && adev->have_disp_power_ref)
347 adev->have_disp_power_ref = false;
349 /* drop the power reference we got coming in here */
350 pm_runtime_put_autosuspend(dev->dev);
354 static const char *encoder_names[41] = {
374 "INTERNAL_KLDSCP_TMDS1",
375 "INTERNAL_KLDSCP_DVO1",
376 "INTERNAL_KLDSCP_DAC1",
377 "INTERNAL_KLDSCP_DAC2",
386 "INTERNAL_KLDSCP_LVTMA",
398 static const char *hpd_names[6] = {
407 void amdgpu_display_print_display_setup(struct drm_device *dev)
409 struct drm_connector *connector;
410 struct amdgpu_connector *amdgpu_connector;
411 struct drm_encoder *encoder;
412 struct amdgpu_encoder *amdgpu_encoder;
413 struct drm_connector_list_iter iter;
417 drm_connector_list_iter_begin(dev, &iter);
418 DRM_INFO("AMDGPU Display Connectors\n");
419 drm_for_each_connector_iter(connector, &iter) {
420 amdgpu_connector = to_amdgpu_connector(connector);
421 DRM_INFO("Connector %d:\n", i);
422 DRM_INFO(" %s\n", connector->name);
423 if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE)
424 DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]);
425 if (amdgpu_connector->ddc_bus) {
426 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
427 amdgpu_connector->ddc_bus->rec.mask_clk_reg,
428 amdgpu_connector->ddc_bus->rec.mask_data_reg,
429 amdgpu_connector->ddc_bus->rec.a_clk_reg,
430 amdgpu_connector->ddc_bus->rec.a_data_reg,
431 amdgpu_connector->ddc_bus->rec.en_clk_reg,
432 amdgpu_connector->ddc_bus->rec.en_data_reg,
433 amdgpu_connector->ddc_bus->rec.y_clk_reg,
434 amdgpu_connector->ddc_bus->rec.y_data_reg);
435 if (amdgpu_connector->router.ddc_valid)
436 DRM_INFO(" DDC Router 0x%x/0x%x\n",
437 amdgpu_connector->router.ddc_mux_control_pin,
438 amdgpu_connector->router.ddc_mux_state);
439 if (amdgpu_connector->router.cd_valid)
440 DRM_INFO(" Clock/Data Router 0x%x/0x%x\n",
441 amdgpu_connector->router.cd_mux_control_pin,
442 amdgpu_connector->router.cd_mux_state);
444 if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
445 connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
446 connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
447 connector->connector_type == DRM_MODE_CONNECTOR_DVIA ||
448 connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
449 connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
452 DRM_INFO(" Encoders:\n");
453 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
454 amdgpu_encoder = to_amdgpu_encoder(encoder);
455 devices = amdgpu_encoder->devices & amdgpu_connector->devices;
457 if (devices & ATOM_DEVICE_CRT1_SUPPORT)
458 DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
459 if (devices & ATOM_DEVICE_CRT2_SUPPORT)
460 DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
461 if (devices & ATOM_DEVICE_LCD1_SUPPORT)
462 DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
463 if (devices & ATOM_DEVICE_DFP1_SUPPORT)
464 DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
465 if (devices & ATOM_DEVICE_DFP2_SUPPORT)
466 DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
467 if (devices & ATOM_DEVICE_DFP3_SUPPORT)
468 DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
469 if (devices & ATOM_DEVICE_DFP4_SUPPORT)
470 DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
471 if (devices & ATOM_DEVICE_DFP5_SUPPORT)
472 DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
473 if (devices & ATOM_DEVICE_DFP6_SUPPORT)
474 DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
475 if (devices & ATOM_DEVICE_TV1_SUPPORT)
476 DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
477 if (devices & ATOM_DEVICE_CV_SUPPORT)
478 DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]);
483 drm_connector_list_iter_end(&iter);
486 bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
492 struct i2c_msg msgs[] = {
507 /* on hw with routers, select right port */
508 if (amdgpu_connector->router.ddc_valid)
509 amdgpu_i2c_router_select_ddc_port(amdgpu_connector);
512 ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2);
514 ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2);
517 /* Couldn't find an accessible DDC on this connector */
519 /* Probe also for valid EDID header
520 * EDID header starts with:
521 * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
522 * Only the first 6 bytes must be valid as
523 * drm_edid_block_valid() can fix the last 2 bytes
525 if (drm_edid_header_is_valid(buf) < 6) {
526 /* Couldn't find an accessible EDID on this
534 static int amdgpu_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file,
535 unsigned int flags, unsigned int color,
536 struct drm_clip_rect *clips, unsigned int num_clips)
542 return drm_atomic_helper_dirtyfb(fb, file, flags, color, clips,
546 static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
547 .destroy = drm_gem_fb_destroy,
548 .create_handle = drm_gem_fb_create_handle,
551 static const struct drm_framebuffer_funcs amdgpu_fb_funcs_atomic = {
552 .destroy = drm_gem_fb_destroy,
553 .create_handle = drm_gem_fb_create_handle,
554 .dirty = amdgpu_dirtyfb
557 uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
560 uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
562 #if defined(CONFIG_DRM_AMD_DC)
564 * if amdgpu_bo_support_uswc returns false it means that USWC mappings
565 * is not supported for this board. But this mapping is required
566 * to avoid hang caused by placement of scanout BO in GTT on certain
567 * APUs. So force the BO placement to VRAM in case this architecture
568 * will not allow USWC mappings.
569 * Also, don't allow GTT domain if the BO doesn't have USWC flag set.
571 if ((bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) &&
572 amdgpu_bo_support_uswc(bo_flags) &&
574 adev->mode_info.gpu_vm_support)
575 domain |= AMDGPU_GEM_DOMAIN_GTT;
581 static const struct drm_format_info dcc_formats[] = {
582 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
583 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
584 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
585 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
586 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
587 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
588 .has_alpha = true, },
589 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
590 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
591 .has_alpha = true, },
592 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 2,
593 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
594 .has_alpha = true, },
595 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 2,
596 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
597 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 2,
598 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
599 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 2,
600 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
601 .has_alpha = true, },
602 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 2,
603 .cpp = { 4, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
604 .has_alpha = true, },
605 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 2,
606 .cpp = { 2, 0, }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
609 static const struct drm_format_info dcc_retile_formats[] = {
610 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
611 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
612 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
613 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
614 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
615 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
616 .has_alpha = true, },
617 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
618 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
619 .has_alpha = true, },
620 { .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 3,
621 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
622 .has_alpha = true, },
623 { .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 3,
624 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
625 { .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 3,
626 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
627 { .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 3,
628 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
629 .has_alpha = true, },
630 { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 3,
631 .cpp = { 4, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1,
632 .has_alpha = true, },
633 { .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 3,
634 .cpp = { 2, 0, 0 }, .block_w = {1, 1, 1}, .block_h = {1, 1, 1}, .hsub = 1, .vsub = 1, },
637 static const struct drm_format_info *
638 lookup_format_info(const struct drm_format_info formats[],
639 int num_formats, u32 format)
643 for (i = 0; i < num_formats; i++) {
644 if (formats[i].format == format)
651 const struct drm_format_info *
652 amdgpu_lookup_format_info(u32 format, uint64_t modifier)
654 if (!IS_AMD_FMT_MOD(modifier))
657 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier))
658 return lookup_format_info(dcc_retile_formats,
659 ARRAY_SIZE(dcc_retile_formats),
662 if (AMD_FMT_MOD_GET(DCC, modifier))
663 return lookup_format_info(dcc_formats, ARRAY_SIZE(dcc_formats),
666 /* returning NULL will cause the default format structs to be used. */
672 * Tries to extract the renderable DCC offset from the opaque metadata attached
676 extract_render_dcc_offset(struct amdgpu_device *adev,
677 struct drm_gem_object *obj,
680 struct amdgpu_bo *rbo;
682 uint32_t metadata[10]; /* Something that fits a descriptor + header. */
685 rbo = gem_to_amdgpu_bo(obj);
686 r = amdgpu_bo_reserve(rbo, false);
689 /* Don't show error message when returning -ERESTARTSYS */
690 if (r != -ERESTARTSYS)
691 DRM_ERROR("Unable to reserve buffer: %d\n", r);
695 r = amdgpu_bo_get_metadata(rbo, metadata, sizeof(metadata), &size, NULL);
696 amdgpu_bo_unreserve(rbo);
702 * The first word is the metadata version, and we need space for at least
703 * the version + pci vendor+device id + 8 words for a descriptor.
705 if (size < 40 || metadata[0] != 1)
708 if (adev->family >= AMDGPU_FAMILY_NV) {
709 /* resource word 6/7 META_DATA_ADDRESS{_LO} */
710 *offset = ((u64)metadata[9] << 16u) |
711 ((metadata[8] & 0xFF000000u) >> 16);
713 /* resource word 5/7 META_DATA_ADDRESS */
714 *offset = ((u64)metadata[9] << 8u) |
715 ((u64)(metadata[7] & 0x1FE0000u) << 23);
721 static int convert_tiling_flags_to_modifier_gfx12(struct amdgpu_framebuffer *afb)
723 struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
724 const struct drm_format_info *format_info;
729 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
730 tile = AMD_FMT_MOD_TILE_VER_GFX12;
731 swizzle = AMDGPU_TILING_GET(afb->tiling_flags, GFX12_SWIZZLE_MODE);
736 AMD_FMT_MOD_SET(TILE, swizzle) |
737 AMD_FMT_MOD_SET(TILE_VERSION, tile) |
738 AMD_FMT_MOD_SET(DCC, 0) |
739 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, 0);
741 format_info = amdgpu_lookup_format_info(afb->base.format->format,
746 afb->base.modifier = modifier;
747 afb->base.flags |= DRM_MODE_FB_MODIFIERS;
752 static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
754 struct amdgpu_device *adev = drm_to_adev(afb->base.dev);
755 uint64_t modifier = 0;
759 num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
760 num_pipes = adev->gfx.config.gb_addr_config_fields.num_pipes;
762 if (!afb->tiling_flags || !AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) {
763 modifier = DRM_FORMAT_MOD_LINEAR;
765 int swizzle = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE);
766 bool has_xor = swizzle >= 16;
769 int pipe_xor_bits = 0;
770 int bank_xor_bits = 0;
773 int pipes = ilog2(num_pipes);
774 uint32_t dcc_offset = AMDGPU_TILING_GET(afb->tiling_flags, DCC_OFFSET_256B);
777 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0)) {
778 convert_tiling_flags_to_modifier_gfx12(afb);
782 switch (swizzle >> 2) {
787 case 5: /* 4KiB _X */
788 block_size_bits = 12;
791 case 4: /* 64 KiB _T */
792 case 6: /* 64 KiB _X */
793 block_size_bits = 16;
795 case 7: /* 256 KiB */
796 block_size_bits = 18;
799 /* RESERVED or VAR */
803 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0))
804 version = AMD_FMT_MOD_TILE_VER_GFX11;
805 else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
806 IP_VERSION(10, 3, 0))
807 version = AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
808 else if (amdgpu_ip_version(adev, GC_HWIP, 0) >=
809 IP_VERSION(10, 0, 0))
810 version = AMD_FMT_MOD_TILE_VER_GFX10;
812 version = AMD_FMT_MOD_TILE_VER_GFX9;
814 switch (swizzle & 3) {
815 case 0: /* Z microtiling */
817 case 1: /* S microtiling */
818 if (amdgpu_ip_version(adev, GC_HWIP, 0) <
819 IP_VERSION(11, 0, 0)) {
821 version = AMD_FMT_MOD_TILE_VER_GFX9;
825 if (amdgpu_ip_version(adev, GC_HWIP, 0) <
826 IP_VERSION(11, 0, 0)) {
827 if (!has_xor && afb->base.format->cpp[0] != 4)
828 version = AMD_FMT_MOD_TILE_VER_GFX9;
836 if (num_pipes == num_pkrs && num_pkrs == 0) {
837 DRM_ERROR("invalid number of pipes and packers\n");
842 case AMD_FMT_MOD_TILE_VER_GFX11:
843 pipe_xor_bits = min(block_size_bits - 8, pipes);
844 packers = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
846 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
847 pipe_xor_bits = min(block_size_bits - 8, pipes);
848 packers = min(block_size_bits - 8 - pipe_xor_bits,
849 ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs));
851 case AMD_FMT_MOD_TILE_VER_GFX10:
852 pipe_xor_bits = min(block_size_bits - 8, pipes);
854 case AMD_FMT_MOD_TILE_VER_GFX9:
855 rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
856 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
857 pipe_xor_bits = min(block_size_bits - 8, pipes +
858 ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
859 bank_xor_bits = min(block_size_bits - 8 - pipe_xor_bits,
860 ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
865 modifier = AMD_FMT_MOD |
866 AMD_FMT_MOD_SET(TILE, AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE)) |
867 AMD_FMT_MOD_SET(TILE_VERSION, version) |
868 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
869 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
870 AMD_FMT_MOD_SET(PACKERS, packers);
872 if (dcc_offset != 0) {
873 bool dcc_i64b = AMDGPU_TILING_GET(afb->tiling_flags, DCC_INDEPENDENT_64B) != 0;
874 bool dcc_i128b = version >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS;
875 const struct drm_format_info *format_info;
876 u64 render_dcc_offset;
878 /* Enable constant encode on RAVEN2 and later. */
879 bool dcc_constant_encode =
880 (adev->asic_type > CHIP_RAVEN ||
881 (adev->asic_type == CHIP_RAVEN &&
882 adev->external_rev_id >= 0x81)) &&
883 amdgpu_ip_version(adev, GC_HWIP, 0) <
884 IP_VERSION(11, 0, 0);
886 int max_cblock_size = dcc_i64b ? AMD_FMT_MOD_DCC_BLOCK_64B :
887 dcc_i128b ? AMD_FMT_MOD_DCC_BLOCK_128B :
888 AMD_FMT_MOD_DCC_BLOCK_256B;
890 modifier |= AMD_FMT_MOD_SET(DCC, 1) |
891 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, dcc_constant_encode) |
892 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, dcc_i64b) |
893 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, dcc_i128b) |
894 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, max_cblock_size);
896 afb->base.offsets[1] = dcc_offset * 256 + afb->base.offsets[0];
897 afb->base.pitches[1] =
898 AMDGPU_TILING_GET(afb->tiling_flags, DCC_PITCH_MAX) + 1;
901 * If the userspace driver uses retiling the tiling flags do not contain
902 * info on the renderable DCC buffer. Luckily the opaque metadata contains
903 * the info so we can try to extract it. The kernel does not use this info
904 * but we should convert it to a modifier plane for getfb2, so the
905 * userspace driver that gets it doesn't have to juggle around another DCC
908 if (extract_render_dcc_offset(adev, afb->base.obj[0],
909 &render_dcc_offset) == 0 &&
910 render_dcc_offset != 0 &&
911 render_dcc_offset != afb->base.offsets[1] &&
912 render_dcc_offset < UINT_MAX) {
913 uint32_t dcc_block_bits; /* of base surface data */
915 modifier |= AMD_FMT_MOD_SET(DCC_RETILE, 1);
916 afb->base.offsets[2] = render_dcc_offset;
918 if (adev->family >= AMDGPU_FAMILY_NV) {
921 if ((amdgpu_ip_version(adev, GC_HWIP,
923 IP_VERSION(10, 3, 0)) &&
924 pipes == packers && pipes > 1)
927 dcc_block_bits = max(20, 16 + pipes + extra_pipe);
929 modifier |= AMD_FMT_MOD_SET(RB, rb) |
930 AMD_FMT_MOD_SET(PIPE, pipes);
931 dcc_block_bits = max(20, 18 + rb);
934 dcc_block_bits -= ilog2(afb->base.format->cpp[0]);
935 afb->base.pitches[2] = ALIGN(afb->base.width,
936 1u << ((dcc_block_bits + 1) / 2));
938 format_info = amdgpu_lookup_format_info(afb->base.format->format,
943 afb->base.format = format_info;
947 afb->base.modifier = modifier;
948 afb->base.flags |= DRM_MODE_FB_MODIFIERS;
952 /* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
953 static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
957 /* Zero swizzle mode means linear */
958 if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
961 micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
962 switch (micro_tile_mode) {
963 case 0: /* DISPLAY */
967 drm_dbg_kms(afb->base.dev,
968 "Micro tile mode %llu not supported for scanout\n",
974 static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
975 unsigned int *width, unsigned int *height)
977 unsigned int cpp_log2 = ilog2(cpp);
978 unsigned int pixel_log2 = block_log2 - cpp_log2;
979 unsigned int width_log2 = (pixel_log2 + 1) / 2;
980 unsigned int height_log2 = pixel_log2 - width_log2;
982 *width = 1 << width_log2;
983 *height = 1 << height_log2;
986 static unsigned int get_dcc_block_size(uint64_t modifier, bool rb_aligned,
989 unsigned int ver = AMD_FMT_MOD_GET(TILE_VERSION, modifier);
992 case AMD_FMT_MOD_TILE_VER_GFX9: {
994 * TODO: for pipe aligned we may need to check the alignment of the
995 * total size of the surface, which may need to be bigger than the
996 * natural alignment due to some HW workarounds
998 return max(10 + (rb_aligned ? (int)AMD_FMT_MOD_GET(RB, modifier) : 0), 12);
1000 case AMD_FMT_MOD_TILE_VER_GFX10:
1001 case AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS:
1002 case AMD_FMT_MOD_TILE_VER_GFX11: {
1003 int pipes_log2 = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
1005 if (ver >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && pipes_log2 > 1 &&
1006 AMD_FMT_MOD_GET(PACKERS, modifier) == pipes_log2)
1009 return max(8 + (pipe_aligned ? pipes_log2 : 0), 12);
1016 static int amdgpu_display_verify_plane(struct amdgpu_framebuffer *rfb, int plane,
1017 const struct drm_format_info *format,
1018 unsigned int block_width, unsigned int block_height,
1019 unsigned int block_size_log2)
1021 unsigned int width = rfb->base.width /
1022 ((plane && plane < format->num_planes) ? format->hsub : 1);
1023 unsigned int height = rfb->base.height /
1024 ((plane && plane < format->num_planes) ? format->vsub : 1);
1025 unsigned int cpp = plane < format->num_planes ? format->cpp[plane] : 1;
1026 unsigned int block_pitch = block_width * cpp;
1027 unsigned int min_pitch = ALIGN(width * cpp, block_pitch);
1028 unsigned int block_size = 1 << block_size_log2;
1031 if (rfb->base.pitches[plane] % block_pitch) {
1032 drm_dbg_kms(rfb->base.dev,
1033 "pitch %d for plane %d is not a multiple of block pitch %d\n",
1034 rfb->base.pitches[plane], plane, block_pitch);
1037 if (rfb->base.pitches[plane] < min_pitch) {
1038 drm_dbg_kms(rfb->base.dev,
1039 "pitch %d for plane %d is less than minimum pitch %d\n",
1040 rfb->base.pitches[plane], plane, min_pitch);
1044 /* Force at least natural alignment. */
1045 if (rfb->base.offsets[plane] % block_size) {
1046 drm_dbg_kms(rfb->base.dev,
1047 "offset 0x%x for plane %d is not a multiple of block pitch 0x%x\n",
1048 rfb->base.offsets[plane], plane, block_size);
1052 size = rfb->base.offsets[plane] +
1053 (uint64_t)rfb->base.pitches[plane] / block_pitch *
1054 block_size * DIV_ROUND_UP(height, block_height);
1056 if (rfb->base.obj[0]->size < size) {
1057 drm_dbg_kms(rfb->base.dev,
1058 "BO size 0x%zx is less than 0x%llx required for plane %d\n",
1059 rfb->base.obj[0]->size, size, plane);
1067 static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb)
1069 const struct drm_format_info *format_info = drm_format_info(rfb->base.format->format);
1070 uint64_t modifier = rfb->base.modifier;
1072 unsigned int i, block_width, block_height, block_size_log2;
1074 if (rfb->base.dev->mode_config.fb_modifiers_not_supported)
1077 for (i = 0; i < format_info->num_planes; ++i) {
1078 if (modifier == DRM_FORMAT_MOD_LINEAR) {
1079 block_width = 256 / format_info->cpp[i];
1081 block_size_log2 = 8;
1083 int swizzle = AMD_FMT_MOD_GET(TILE, modifier);
1085 switch ((swizzle & ~3) + 1) {
1087 block_size_log2 = 8;
1091 block_size_log2 = 12;
1094 case DC_SW_64KB_S_T:
1095 case DC_SW_64KB_S_X:
1096 block_size_log2 = 16;
1099 block_size_log2 = 18;
1102 drm_dbg_kms(rfb->base.dev,
1103 "Swizzle mode with unknown block size: %d\n", swizzle);
1107 get_block_dimensions(block_size_log2, format_info->cpp[i],
1108 &block_width, &block_height);
1111 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1112 block_width, block_height, block_size_log2);
1117 if (AMD_FMT_MOD_GET(DCC, modifier)) {
1118 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
1119 block_size_log2 = get_dcc_block_size(modifier, false, false);
1120 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1121 &block_width, &block_height);
1122 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1123 block_width, block_height,
1129 block_size_log2 = get_dcc_block_size(modifier, true, true);
1131 bool pipe_aligned = AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
1133 block_size_log2 = get_dcc_block_size(modifier, true, pipe_aligned);
1135 get_block_dimensions(block_size_log2 + 8, format_info->cpp[0],
1136 &block_width, &block_height);
1137 ret = amdgpu_display_verify_plane(rfb, i, format_info,
1138 block_width, block_height, block_size_log2);
1146 static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
1147 uint64_t *tiling_flags, bool *tmz_surface)
1149 struct amdgpu_bo *rbo;
1154 *tmz_surface = false;
1158 rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
1159 r = amdgpu_bo_reserve(rbo, false);
1162 /* Don't show error message when returning -ERESTARTSYS */
1163 if (r != -ERESTARTSYS)
1164 DRM_ERROR("Unable to reserve buffer: %d\n", r);
1169 amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
1172 *tmz_surface = amdgpu_bo_encrypted(rbo);
1174 amdgpu_bo_unreserve(rbo);
1179 static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev,
1180 struct amdgpu_framebuffer *rfb,
1181 struct drm_file *file_priv,
1182 const struct drm_mode_fb_cmd2 *mode_cmd,
1183 struct drm_gem_object *obj)
1187 rfb->base.obj[0] = obj;
1188 drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
1189 /* Verify that the modifier is supported. */
1190 if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format,
1191 mode_cmd->modifier[0])) {
1193 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1194 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1200 ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
1204 if (drm_drv_uses_atomic_modeset(dev))
1205 ret = drm_framebuffer_init(dev, &rfb->base,
1206 &amdgpu_fb_funcs_atomic);
1208 ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
1215 drm_dbg_kms(dev, "Failed to verify and init gem fb: %d\n", ret);
1216 rfb->base.obj[0] = NULL;
1220 static int amdgpu_display_framebuffer_init(struct drm_device *dev,
1221 struct amdgpu_framebuffer *rfb,
1222 const struct drm_mode_fb_cmd2 *mode_cmd,
1223 struct drm_gem_object *obj)
1225 struct amdgpu_device *adev = drm_to_adev(dev);
1229 * This needs to happen before modifier conversion as that might change
1230 * the number of planes.
1232 for (i = 1; i < rfb->base.format->num_planes; ++i) {
1233 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
1234 drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
1235 i, mode_cmd->handles[0], mode_cmd->handles[i]);
1241 ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
1245 if (dev->mode_config.fb_modifiers_not_supported && !adev->enable_virtual_display) {
1246 drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
1247 "GFX9+ requires FB check based on format modifier\n");
1248 ret = check_tiling_flags_gfx6(rfb);
1253 if (!dev->mode_config.fb_modifiers_not_supported &&
1254 !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
1255 ret = convert_tiling_flags_to_modifier(rfb);
1257 drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
1263 ret = amdgpu_display_verify_sizes(rfb);
1267 for (i = 0; i < rfb->base.format->num_planes; ++i) {
1268 drm_gem_object_get(rfb->base.obj[0]);
1269 rfb->base.obj[i] = rfb->base.obj[0];
1275 struct drm_framebuffer *
1276 amdgpu_display_user_framebuffer_create(struct drm_device *dev,
1277 struct drm_file *file_priv,
1278 const struct drm_mode_fb_cmd2 *mode_cmd)
1280 struct amdgpu_framebuffer *amdgpu_fb;
1281 struct drm_gem_object *obj;
1282 struct amdgpu_bo *bo;
1286 obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
1289 "No GEM object associated to handle 0x%08X, can't create framebuffer\n",
1290 mode_cmd->handles[0]);
1292 return ERR_PTR(-ENOENT);
1295 /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
1296 bo = gem_to_amdgpu_bo(obj);
1297 domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags);
1298 if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) {
1299 drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n");
1300 drm_gem_object_put(obj);
1301 return ERR_PTR(-EINVAL);
1304 amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
1305 if (amdgpu_fb == NULL) {
1306 drm_gem_object_put(obj);
1307 return ERR_PTR(-ENOMEM);
1310 ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
1314 drm_gem_object_put(obj);
1315 return ERR_PTR(ret);
1318 drm_gem_object_put(obj);
1319 return &amdgpu_fb->base;
1322 const struct drm_mode_config_funcs amdgpu_mode_funcs = {
1323 .fb_create = amdgpu_display_user_framebuffer_create,
1326 static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = {
1327 { UNDERSCAN_OFF, "off" },
1328 { UNDERSCAN_ON, "on" },
1329 { UNDERSCAN_AUTO, "auto" },
1332 static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = {
1333 { AMDGPU_AUDIO_DISABLE, "off" },
1334 { AMDGPU_AUDIO_ENABLE, "on" },
1335 { AMDGPU_AUDIO_AUTO, "auto" },
1338 /* XXX support different dither options? spatial, temporal, both, etc. */
1339 static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = {
1340 { AMDGPU_FMT_DITHER_DISABLE, "off" },
1341 { AMDGPU_FMT_DITHER_ENABLE, "on" },
1344 int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
1348 adev->mode_info.coherent_mode_property =
1349 drm_property_create_range(adev_to_drm(adev), 0, "coherent", 0, 1);
1350 if (!adev->mode_info.coherent_mode_property)
1353 adev->mode_info.load_detect_property =
1354 drm_property_create_range(adev_to_drm(adev), 0, "load detection", 0, 1);
1355 if (!adev->mode_info.load_detect_property)
1358 drm_mode_create_scaling_mode_property(adev_to_drm(adev));
1360 sz = ARRAY_SIZE(amdgpu_underscan_enum_list);
1361 adev->mode_info.underscan_property =
1362 drm_property_create_enum(adev_to_drm(adev), 0,
1364 amdgpu_underscan_enum_list, sz);
1366 adev->mode_info.underscan_hborder_property =
1367 drm_property_create_range(adev_to_drm(adev), 0,
1368 "underscan hborder", 0, 128);
1369 if (!adev->mode_info.underscan_hborder_property)
1372 adev->mode_info.underscan_vborder_property =
1373 drm_property_create_range(adev_to_drm(adev), 0,
1374 "underscan vborder", 0, 128);
1375 if (!adev->mode_info.underscan_vborder_property)
1378 sz = ARRAY_SIZE(amdgpu_audio_enum_list);
1379 adev->mode_info.audio_property =
1380 drm_property_create_enum(adev_to_drm(adev), 0,
1382 amdgpu_audio_enum_list, sz);
1384 sz = ARRAY_SIZE(amdgpu_dither_enum_list);
1385 adev->mode_info.dither_property =
1386 drm_property_create_enum(adev_to_drm(adev), 0,
1388 amdgpu_dither_enum_list, sz);
1393 void amdgpu_display_update_priority(struct amdgpu_device *adev)
1395 /* adjustment options for the display watermarks */
1396 if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2))
1397 adev->mode_info.disp_priority = 0;
1399 adev->mode_info.disp_priority = amdgpu_disp_priority;
1403 static bool amdgpu_display_is_hdtv_mode(const struct drm_display_mode *mode)
1405 /* try and guess if this is a tv or a monitor */
1406 if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1407 (mode->vdisplay == 576) || /* 576p */
1408 (mode->vdisplay == 720) || /* 720p */
1409 (mode->vdisplay == 1080)) /* 1080p */
1415 bool amdgpu_display_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
1416 const struct drm_display_mode *mode,
1417 struct drm_display_mode *adjusted_mode)
1419 struct drm_device *dev = crtc->dev;
1420 struct drm_encoder *encoder;
1421 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1422 struct amdgpu_encoder *amdgpu_encoder;
1423 struct drm_connector *connector;
1424 u32 src_v = 1, dst_v = 1;
1425 u32 src_h = 1, dst_h = 1;
1427 amdgpu_crtc->h_border = 0;
1428 amdgpu_crtc->v_border = 0;
1430 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1431 if (encoder->crtc != crtc)
1433 amdgpu_encoder = to_amdgpu_encoder(encoder);
1434 connector = amdgpu_get_connector_for_encoder(encoder);
1437 if (amdgpu_encoder->rmx_type == RMX_OFF)
1438 amdgpu_crtc->rmx_type = RMX_OFF;
1439 else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay ||
1440 mode->vdisplay < amdgpu_encoder->native_mode.vdisplay)
1441 amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type;
1443 amdgpu_crtc->rmx_type = RMX_OFF;
1444 /* copy native mode */
1445 memcpy(&amdgpu_crtc->native_mode,
1446 &amdgpu_encoder->native_mode,
1447 sizeof(struct drm_display_mode));
1448 src_v = crtc->mode.vdisplay;
1449 dst_v = amdgpu_crtc->native_mode.vdisplay;
1450 src_h = crtc->mode.hdisplay;
1451 dst_h = amdgpu_crtc->native_mode.hdisplay;
1453 /* fix up for overscan on hdmi */
1454 if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) &&
1455 ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) ||
1456 ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) &&
1457 connector->display_info.is_hdmi &&
1458 amdgpu_display_is_hdtv_mode(mode)))) {
1459 if (amdgpu_encoder->underscan_hborder != 0)
1460 amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder;
1462 amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16;
1463 if (amdgpu_encoder->underscan_vborder != 0)
1464 amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder;
1466 amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16;
1467 amdgpu_crtc->rmx_type = RMX_FULL;
1468 src_v = crtc->mode.vdisplay;
1469 dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2);
1470 src_h = crtc->mode.hdisplay;
1471 dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2);
1474 if (amdgpu_crtc->rmx_type != RMX_OFF) {
1477 a.full = dfixed_const(src_v);
1478 b.full = dfixed_const(dst_v);
1479 amdgpu_crtc->vsc.full = dfixed_div(a, b);
1480 a.full = dfixed_const(src_h);
1481 b.full = dfixed_const(dst_h);
1482 amdgpu_crtc->hsc.full = dfixed_div(a, b);
1484 amdgpu_crtc->vsc.full = dfixed_const(1);
1485 amdgpu_crtc->hsc.full = dfixed_const(1);
1491 * Retrieve current video scanout position of crtc on a given gpu, and
1492 * an optional accurate timestamp of when query happened.
1494 * \param dev Device to query.
1495 * \param pipe Crtc to query.
1496 * \param flags from caller (DRM_CALLED_FROM_VBLIRQ or 0).
1497 * For driver internal use only also supports these flags:
1499 * USE_REAL_VBLANKSTART to use the real start of vblank instead
1500 * of a fudged earlier start of vblank.
1502 * GET_DISTANCE_TO_VBLANKSTART to return distance to the
1503 * fudged earlier start of vblank in *vpos and the distance
1504 * to true start of vblank in *hpos.
1506 * \param *vpos Location where vertical scanout position should be stored.
1507 * \param *hpos Location where horizontal scanout position should go.
1508 * \param *stime Target location for timestamp taken immediately before
1509 * scanout position query. Can be NULL to skip timestamp.
1510 * \param *etime Target location for timestamp taken immediately after
1511 * scanout position query. Can be NULL to skip timestamp.
1513 * Returns vpos as a positive number while in active scanout area.
1514 * Returns vpos as a negative number inside vblank, counting the number
1515 * of scanlines to go until end of vblank, e.g., -1 means "one scanline
1516 * until start of active scanout / end of vblank."
1518 * \return Flags, or'ed together as follows:
1520 * DRM_SCANOUTPOS_VALID = Query successful.
1521 * DRM_SCANOUTPOS_INVBL = Inside vblank.
1522 * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
1523 * this flag means that returned position may be offset by a constant but
1524 * unknown small number of scanlines wrt. real scanout position.
1527 int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
1528 unsigned int pipe, unsigned int flags, int *vpos,
1529 int *hpos, ktime_t *stime, ktime_t *etime,
1530 const struct drm_display_mode *mode)
1532 u32 vbl = 0, position = 0;
1533 int vbl_start, vbl_end, vtotal, ret = 0;
1536 struct amdgpu_device *adev = drm_to_adev(dev);
1538 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
1540 /* Get optional system timestamp before query. */
1542 *stime = ktime_get();
1544 if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0)
1545 ret |= DRM_SCANOUTPOS_VALID;
1547 /* Get optional system timestamp after query. */
1549 *etime = ktime_get();
1551 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
1553 /* Decode into vertical and horizontal scanout position. */
1554 *vpos = position & 0x1fff;
1555 *hpos = (position >> 16) & 0x1fff;
1557 /* Valid vblank area boundaries from gpu retrieved? */
1560 ret |= DRM_SCANOUTPOS_ACCURATE;
1561 vbl_start = vbl & 0x1fff;
1562 vbl_end = (vbl >> 16) & 0x1fff;
1564 /* No: Fake something reasonable which gives at least ok results. */
1565 vbl_start = mode->crtc_vdisplay;
1569 /* Called from driver internal vblank counter query code? */
1570 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1571 /* Caller wants distance from real vbl_start in *hpos */
1572 *hpos = *vpos - vbl_start;
1575 /* Fudge vblank to start a few scanlines earlier to handle the
1576 * problem that vblank irqs fire a few scanlines before start
1577 * of vblank. Some driver internal callers need the true vblank
1578 * start to be used and signal this via the USE_REAL_VBLANKSTART flag.
1580 * The cause of the "early" vblank irq is that the irq is triggered
1581 * by the line buffer logic when the line buffer read position enters
1582 * the vblank, whereas our crtc scanout position naturally lags the
1583 * line buffer read position.
1585 if (!(flags & USE_REAL_VBLANKSTART))
1586 vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines;
1588 /* Test scanout position against vblank region. */
1589 if ((*vpos < vbl_start) && (*vpos >= vbl_end))
1594 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1596 /* Called from driver internal vblank counter query code? */
1597 if (flags & GET_DISTANCE_TO_VBLANKSTART) {
1598 /* Caller wants distance from fudged earlier vbl_start */
1603 /* Check if inside vblank area and apply corrective offsets:
1604 * vpos will then be >=0 in video scanout area, but negative
1605 * within vblank area, counting down the number of lines until
1609 /* Inside "upper part" of vblank area? Apply corrective offset if so: */
1610 if (in_vbl && (*vpos >= vbl_start)) {
1611 vtotal = mode->crtc_vtotal;
1613 /* With variable refresh rate displays the vpos can exceed
1614 * the vtotal value. Clamp to 0 to return -vbl_end instead
1615 * of guessing the remaining number of lines until scanout.
1617 *vpos = (*vpos < vtotal) ? (*vpos - vtotal) : 0;
1620 /* Correct for shifted end of vbl at vbl_end. */
1621 *vpos = *vpos - vbl_end;
1626 int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
1628 if (crtc < 0 || crtc >= adev->mode_info.num_crtc)
1629 return AMDGPU_CRTC_IRQ_NONE;
1633 return AMDGPU_CRTC_IRQ_VBLANK1;
1635 return AMDGPU_CRTC_IRQ_VBLANK2;
1637 return AMDGPU_CRTC_IRQ_VBLANK3;
1639 return AMDGPU_CRTC_IRQ_VBLANK4;
1641 return AMDGPU_CRTC_IRQ_VBLANK5;
1643 return AMDGPU_CRTC_IRQ_VBLANK6;
1645 return AMDGPU_CRTC_IRQ_NONE;
1649 bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
1650 bool in_vblank_irq, int *vpos,
1651 int *hpos, ktime_t *stime, ktime_t *etime,
1652 const struct drm_display_mode *mode)
1654 struct drm_device *dev = crtc->dev;
1655 unsigned int pipe = crtc->index;
1657 return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
1658 stime, etime, mode);
1662 amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
1664 struct drm_device *dev = adev_to_drm(adev);
1665 struct drm_fb_helper *fb_helper = dev->fb_helper;
1667 if (!fb_helper || !fb_helper->buffer)
1670 if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
1676 int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
1678 struct drm_device *dev = adev_to_drm(adev);
1679 struct drm_crtc *crtc;
1680 struct drm_connector *connector;
1681 struct drm_connector_list_iter iter;
1684 drm_kms_helper_poll_disable(dev);
1686 /* turn off display hw */
1687 drm_modeset_lock_all(dev);
1688 drm_connector_list_iter_begin(dev, &iter);
1689 drm_for_each_connector_iter(connector, &iter)
1690 drm_helper_connector_dpms(connector,
1692 drm_connector_list_iter_end(&iter);
1693 drm_modeset_unlock_all(dev);
1694 /* unpin the front buffers and cursors */
1695 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1696 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1697 struct drm_framebuffer *fb = crtc->primary->fb;
1698 struct amdgpu_bo *robj;
1700 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1701 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1703 r = amdgpu_bo_reserve(aobj, true);
1705 amdgpu_bo_unpin(aobj);
1706 amdgpu_bo_unreserve(aobj);
1710 if (!fb || !fb->obj[0])
1713 robj = gem_to_amdgpu_bo(fb->obj[0]);
1714 if (!amdgpu_display_robj_is_fb(adev, robj)) {
1715 r = amdgpu_bo_reserve(robj, true);
1717 amdgpu_bo_unpin(robj);
1718 amdgpu_bo_unreserve(robj);
1725 int amdgpu_display_resume_helper(struct amdgpu_device *adev)
1727 struct drm_device *dev = adev_to_drm(adev);
1728 struct drm_connector *connector;
1729 struct drm_connector_list_iter iter;
1730 struct drm_crtc *crtc;
1734 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1735 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
1737 if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
1738 struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
1740 r = amdgpu_bo_reserve(aobj, true);
1742 r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
1744 dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
1745 amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
1746 amdgpu_bo_unreserve(aobj);
1751 drm_helper_resume_force_mode(dev);
1753 /* turn on display hw */
1754 drm_modeset_lock_all(dev);
1756 drm_connector_list_iter_begin(dev, &iter);
1757 drm_for_each_connector_iter(connector, &iter)
1758 drm_helper_connector_dpms(connector,
1760 drm_connector_list_iter_end(&iter);
1762 drm_modeset_unlock_all(dev);
1764 drm_kms_helper_poll_enable(dev);