]>
Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2007-8 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice shall be included in | |
13 | * all copies or substantial portions of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
21 | * OTHER DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: Dave Airlie | |
24 | * Alex Deucher | |
25 | */ | |
26 | #include <drm/drmP.h> | |
27 | #include <drm/amdgpu_drm.h> | |
28 | #include "amdgpu.h" | |
29 | #include "amdgpu_i2c.h" | |
30 | #include "atom.h" | |
31 | #include "amdgpu_connectors.h" | |
32 | #include <asm/div64.h> | |
33 | ||
34 | #include <linux/pm_runtime.h> | |
35 | #include <drm/drm_crtc_helper.h> | |
36 | #include <drm/drm_edid.h> | |
37 | ||
f54d1867 | 38 | static void amdgpu_flip_callback(struct dma_fence *f, struct dma_fence_cb *cb) |
1ffd2652 | 39 | { |
c3874b75 CK |
40 | struct amdgpu_flip_work *work = |
41 | container_of(cb, struct amdgpu_flip_work, cb); | |
1ffd2652 | 42 | |
f54d1867 | 43 | dma_fence_put(f); |
325cbba1 | 44 | schedule_work(&work->flip_work.work); |
c3874b75 | 45 | } |
1ffd2652 | 46 | |
c3874b75 | 47 | static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, |
f54d1867 | 48 | struct dma_fence **f) |
c3874b75 | 49 | { |
f54d1867 | 50 | struct dma_fence *fence= *f; |
c3874b75 CK |
51 | |
52 | if (fence == NULL) | |
53 | return false; | |
1ffd2652 | 54 | |
1ffd2652 | 55 | *f = NULL; |
c3874b75 | 56 | |
f54d1867 | 57 | if (!dma_fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) |
c3874b75 CK |
58 | return true; |
59 | ||
f54d1867 | 60 | dma_fence_put(fence); |
c3874b75 | 61 | return false; |
1ffd2652 | 62 | } |
d38ceaf9 AD |
63 | |
64 | static void amdgpu_flip_work_func(struct work_struct *__work) | |
65 | { | |
325cbba1 MD |
66 | struct delayed_work *delayed_work = |
67 | container_of(__work, struct delayed_work, work); | |
d38ceaf9 | 68 | struct amdgpu_flip_work *work = |
325cbba1 | 69 | container_of(delayed_work, struct amdgpu_flip_work, flip_work); |
d38ceaf9 | 70 | struct amdgpu_device *adev = work->adev; |
f93932bc | 71 | struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[work->crtc_id]; |
d38ceaf9 | 72 | |
f93932bc | 73 | struct drm_crtc *crtc = &amdgpu_crtc->base; |
d38ceaf9 | 74 | unsigned long flags; |
325cbba1 MD |
75 | unsigned i; |
76 | int vpos, hpos; | |
d38ceaf9 | 77 | |
c3874b75 CK |
78 | if (amdgpu_flip_handle_fence(work, &work->excl)) |
79 | return; | |
80 | ||
1ffd2652 | 81 | for (i = 0; i < work->shared_count; ++i) |
c3874b75 CK |
82 | if (amdgpu_flip_handle_fence(work, &work->shared[i])) |
83 | return; | |
d38ceaf9 | 84 | |
325cbba1 MD |
85 | /* Wait until we're out of the vertical blank period before the one |
86 | * targeted by the flip | |
8e36f9d3 | 87 | */ |
f93932bc | 88 | if (amdgpu_crtc->enabled && |
325cbba1 MD |
89 | (amdgpu_get_crtc_scanoutpos(adev->ddev, work->crtc_id, 0, |
90 | &vpos, &hpos, NULL, NULL, | |
91 | &crtc->hwmode) | |
92 | & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == | |
93 | (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && | |
94 | (int)(work->target_vblank - | |
f93932bc | 95 | amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) { |
325cbba1 MD |
96 | schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000)); |
97 | return; | |
9c3578af | 98 | } |
8e36f9d3 | 99 | |
325cbba1 MD |
100 | /* We borrow the event spin lock for protecting flip_status */ |
101 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | |
e1d09dc0 | 102 | |
bd4c72d1 | 103 | /* Do the flip (mmio) */ |
cb9e59d7 | 104 | adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base, work->async); |
bd4c72d1 AG |
105 | |
106 | /* Set the flip status */ | |
f93932bc | 107 | amdgpu_crtc->pflip_status = AMDGPU_FLIP_SUBMITTED; |
d38ceaf9 | 108 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
6bd9e877 | 109 | |
bd4c72d1 AG |
110 | |
111 | DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_SUBMITTED, work: %p,\n", | |
f93932bc | 112 | amdgpu_crtc->crtc_id, amdgpu_crtc, work); |
bd4c72d1 | 113 | |
d38ceaf9 AD |
114 | } |
115 | ||
116 | /* | |
117 | * Handle unpin events outside the interrupt handler proper. | |
118 | */ | |
119 | static void amdgpu_unpin_work_func(struct work_struct *__work) | |
120 | { | |
121 | struct amdgpu_flip_work *work = | |
122 | container_of(__work, struct amdgpu_flip_work, unpin_work); | |
123 | int r; | |
124 | ||
125 | /* unpin of the old buffer */ | |
765e7fbf | 126 | r = amdgpu_bo_reserve(work->old_abo, false); |
d38ceaf9 | 127 | if (likely(r == 0)) { |
765e7fbf | 128 | r = amdgpu_bo_unpin(work->old_abo); |
d38ceaf9 AD |
129 | if (unlikely(r != 0)) { |
130 | DRM_ERROR("failed to unpin buffer after flip\n"); | |
131 | } | |
765e7fbf | 132 | amdgpu_bo_unreserve(work->old_abo); |
d38ceaf9 AD |
133 | } else |
134 | DRM_ERROR("failed to reserve buffer after flip\n"); | |
135 | ||
765e7fbf | 136 | amdgpu_bo_unref(&work->old_abo); |
1ffd2652 | 137 | kfree(work->shared); |
d38ceaf9 AD |
138 | kfree(work); |
139 | } | |
140 | ||
cb341a31 AG |
141 | |
142 | static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work) | |
143 | { | |
144 | int i; | |
145 | ||
146 | amdgpu_bo_unref(&work->old_abo); | |
147 | dma_fence_put(work->excl); | |
148 | for (i = 0; i < work->shared_count; ++i) | |
149 | dma_fence_put(work->shared[i]); | |
150 | kfree(work->shared); | |
151 | kfree(work); | |
152 | } | |
153 | ||
154 | static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work, | |
155 | struct amdgpu_bo *new_abo) | |
156 | { | |
157 | amdgpu_bo_unreserve(new_abo); | |
158 | amdgpu_flip_work_cleanup(work); | |
159 | } | |
160 | ||
161 | static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work, | |
162 | struct amdgpu_bo *new_abo) | |
163 | { | |
164 | if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) | |
165 | DRM_ERROR("failed to unpin new abo in error path\n"); | |
166 | amdgpu_flip_cleanup_unreserve(work, new_abo); | |
167 | } | |
168 | ||
169 | void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work, | |
170 | struct amdgpu_bo *new_abo) | |
171 | { | |
12d39245 | 172 | if (unlikely(amdgpu_bo_reserve(new_abo, true) != 0)) { |
cb341a31 AG |
173 | DRM_ERROR("failed to reserve new abo in error path\n"); |
174 | amdgpu_flip_work_cleanup(work); | |
175 | return; | |
176 | } | |
177 | amdgpu_flip_cleanup_unpin(work, new_abo); | |
178 | } | |
179 | ||
180 | int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc, | |
181 | struct drm_framebuffer *fb, | |
182 | struct drm_pending_vblank_event *event, | |
183 | uint32_t page_flip_flags, | |
184 | uint32_t target, | |
185 | struct amdgpu_flip_work **work_p, | |
186 | struct amdgpu_bo **new_abo_p) | |
d38ceaf9 AD |
187 | { |
188 | struct drm_device *dev = crtc->dev; | |
189 | struct amdgpu_device *adev = dev->dev_private; | |
190 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | |
191 | struct amdgpu_framebuffer *old_amdgpu_fb; | |
192 | struct amdgpu_framebuffer *new_amdgpu_fb; | |
193 | struct drm_gem_object *obj; | |
194 | struct amdgpu_flip_work *work; | |
765e7fbf | 195 | struct amdgpu_bo *new_abo; |
d38ceaf9 AD |
196 | unsigned long flags; |
197 | u64 tiling_flags; | |
198 | u64 base; | |
cb341a31 | 199 | int r; |
d38ceaf9 AD |
200 | |
201 | work = kzalloc(sizeof *work, GFP_KERNEL); | |
202 | if (work == NULL) | |
203 | return -ENOMEM; | |
204 | ||
325cbba1 | 205 | INIT_DELAYED_WORK(&work->flip_work, amdgpu_flip_work_func); |
d38ceaf9 AD |
206 | INIT_WORK(&work->unpin_work, amdgpu_unpin_work_func); |
207 | ||
208 | work->event = event; | |
209 | work->adev = adev; | |
210 | work->crtc_id = amdgpu_crtc->crtc_id; | |
cb9e59d7 | 211 | work->async = (page_flip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0; |
d38ceaf9 AD |
212 | |
213 | /* schedule unpin of the old buffer */ | |
214 | old_amdgpu_fb = to_amdgpu_framebuffer(crtc->primary->fb); | |
215 | obj = old_amdgpu_fb->obj; | |
216 | ||
217 | /* take a reference to the old object */ | |
765e7fbf CK |
218 | work->old_abo = gem_to_amdgpu_bo(obj); |
219 | amdgpu_bo_ref(work->old_abo); | |
d38ceaf9 AD |
220 | |
221 | new_amdgpu_fb = to_amdgpu_framebuffer(fb); | |
222 | obj = new_amdgpu_fb->obj; | |
765e7fbf | 223 | new_abo = gem_to_amdgpu_bo(obj); |
d38ceaf9 AD |
224 | |
225 | /* pin the new buffer */ | |
765e7fbf | 226 | r = amdgpu_bo_reserve(new_abo, false); |
d38ceaf9 | 227 | if (unlikely(r != 0)) { |
765e7fbf | 228 | DRM_ERROR("failed to reserve new abo buffer before flip\n"); |
d38ceaf9 AD |
229 | goto cleanup; |
230 | } | |
231 | ||
7fe28576 | 232 | r = amdgpu_bo_pin(new_abo, AMDGPU_GEM_DOMAIN_VRAM, &base); |
d38ceaf9 | 233 | if (unlikely(r != 0)) { |
765e7fbf | 234 | DRM_ERROR("failed to pin new abo buffer before flip\n"); |
ee7fd957 | 235 | goto unreserve; |
d38ceaf9 AD |
236 | } |
237 | ||
765e7fbf | 238 | r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl, |
1ffd2652 CK |
239 | &work->shared_count, |
240 | &work->shared); | |
241 | if (unlikely(r != 0)) { | |
1ffd2652 | 242 | DRM_ERROR("failed to get fences for buffer\n"); |
ee7fd957 | 243 | goto unpin; |
1ffd2652 CK |
244 | } |
245 | ||
765e7fbf CK |
246 | amdgpu_bo_get_tiling_flags(new_abo, &tiling_flags); |
247 | amdgpu_bo_unreserve(new_abo); | |
d38ceaf9 AD |
248 | |
249 | work->base = base; | |
325cbba1 MD |
250 | work->target_vblank = target - drm_crtc_vblank_count(crtc) + |
251 | amdgpu_get_vblank_counter_kms(dev, work->crtc_id); | |
d38ceaf9 AD |
252 | |
253 | /* we borrow the event spin lock for protecting flip_wrok */ | |
254 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | |
255 | if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_NONE) { | |
256 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | |
257 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | |
258 | r = -EBUSY; | |
325cbba1 | 259 | goto pflip_cleanup; |
cb341a31 | 260 | |
d38ceaf9 | 261 | } |
cb341a31 AG |
262 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); |
263 | ||
264 | *work_p = work; | |
265 | *new_abo_p = new_abo; | |
266 | ||
267 | return 0; | |
268 | ||
269 | pflip_cleanup: | |
270 | amdgpu_crtc_cleanup_flip_ctx(work, new_abo); | |
271 | return r; | |
272 | ||
273 | unpin: | |
274 | amdgpu_flip_cleanup_unpin(work, new_abo); | |
275 | return r; | |
276 | ||
277 | unreserve: | |
278 | amdgpu_flip_cleanup_unreserve(work, new_abo); | |
279 | return r; | |
d38ceaf9 | 280 | |
cb341a31 AG |
281 | cleanup: |
282 | amdgpu_flip_work_cleanup(work); | |
283 | return r; | |
284 | ||
285 | } | |
286 | ||
287 | void amdgpu_crtc_submit_flip(struct drm_crtc *crtc, | |
288 | struct drm_framebuffer *fb, | |
289 | struct amdgpu_flip_work *work, | |
290 | struct amdgpu_bo *new_abo) | |
291 | { | |
292 | unsigned long flags; | |
293 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | |
294 | ||
295 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | |
d38ceaf9 AD |
296 | amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; |
297 | amdgpu_crtc->pflip_works = work; | |
298 | ||
299 | /* update crtc fb */ | |
300 | crtc->primary->fb = fb; | |
301 | spin_unlock_irqrestore(&crtc->dev->event_lock, flags); | |
cb341a31 AG |
302 | |
303 | DRM_DEBUG_DRIVER( | |
304 | "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n", | |
305 | amdgpu_crtc->crtc_id, amdgpu_crtc, work); | |
306 | ||
325cbba1 | 307 | amdgpu_flip_work_func(&work->flip_work.work); |
cb341a31 | 308 | } |
d38ceaf9 | 309 | |
cb341a31 AG |
310 | int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, |
311 | struct drm_framebuffer *fb, | |
312 | struct drm_pending_vblank_event *event, | |
313 | uint32_t page_flip_flags, | |
41292b1f SV |
314 | uint32_t target, |
315 | struct drm_modeset_acquire_ctx *ctx) | |
cb341a31 AG |
316 | { |
317 | struct amdgpu_bo *new_abo; | |
318 | struct amdgpu_flip_work *work; | |
319 | int r; | |
d38ceaf9 | 320 | |
cb341a31 AG |
321 | r = amdgpu_crtc_prepare_flip(crtc, |
322 | fb, | |
323 | event, | |
324 | page_flip_flags, | |
325 | target, | |
326 | &work, | |
327 | &new_abo); | |
328 | if (r) | |
329 | return r; | |
d38ceaf9 | 330 | |
cb341a31 AG |
331 | amdgpu_crtc_submit_flip(crtc, fb, work, new_abo); |
332 | ||
333 | return 0; | |
d38ceaf9 AD |
334 | } |
335 | ||
a4eff9aa SV |
336 | int amdgpu_crtc_set_config(struct drm_mode_set *set, |
337 | struct drm_modeset_acquire_ctx *ctx) | |
d38ceaf9 AD |
338 | { |
339 | struct drm_device *dev; | |
340 | struct amdgpu_device *adev; | |
341 | struct drm_crtc *crtc; | |
342 | bool active = false; | |
343 | int ret; | |
344 | ||
345 | if (!set || !set->crtc) | |
346 | return -EINVAL; | |
347 | ||
348 | dev = set->crtc->dev; | |
349 | ||
350 | ret = pm_runtime_get_sync(dev->dev); | |
351 | if (ret < 0) | |
352 | return ret; | |
353 | ||
a4eff9aa | 354 | ret = drm_crtc_helper_set_config(set, ctx); |
d38ceaf9 AD |
355 | |
356 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | |
357 | if (crtc->enabled) | |
358 | active = true; | |
359 | ||
360 | pm_runtime_mark_last_busy(dev->dev); | |
361 | ||
362 | adev = dev->dev_private; | |
363 | /* if we have active crtcs and we don't have a power ref, | |
364 | take the current one */ | |
365 | if (active && !adev->have_disp_power_ref) { | |
366 | adev->have_disp_power_ref = true; | |
367 | return ret; | |
368 | } | |
369 | /* if we have no active crtcs, then drop the power ref | |
370 | we got before */ | |
371 | if (!active && adev->have_disp_power_ref) { | |
372 | pm_runtime_put_autosuspend(dev->dev); | |
373 | adev->have_disp_power_ref = false; | |
374 | } | |
375 | ||
376 | /* drop the power reference we got coming in here */ | |
377 | pm_runtime_put_autosuspend(dev->dev); | |
378 | return ret; | |
379 | } | |
380 | ||
c6e14f40 | 381 | static const char *encoder_names[41] = { |
d38ceaf9 AD |
382 | "NONE", |
383 | "INTERNAL_LVDS", | |
384 | "INTERNAL_TMDS1", | |
385 | "INTERNAL_TMDS2", | |
386 | "INTERNAL_DAC1", | |
387 | "INTERNAL_DAC2", | |
388 | "INTERNAL_SDVOA", | |
389 | "INTERNAL_SDVOB", | |
390 | "SI170B", | |
391 | "CH7303", | |
392 | "CH7301", | |
393 | "INTERNAL_DVO1", | |
394 | "EXTERNAL_SDVOA", | |
395 | "EXTERNAL_SDVOB", | |
396 | "TITFP513", | |
397 | "INTERNAL_LVTM1", | |
398 | "VT1623", | |
399 | "HDMI_SI1930", | |
400 | "HDMI_INTERNAL", | |
401 | "INTERNAL_KLDSCP_TMDS1", | |
402 | "INTERNAL_KLDSCP_DVO1", | |
403 | "INTERNAL_KLDSCP_DAC1", | |
404 | "INTERNAL_KLDSCP_DAC2", | |
405 | "SI178", | |
406 | "MVPU_FPGA", | |
407 | "INTERNAL_DDI", | |
408 | "VT1625", | |
409 | "HDMI_SI1932", | |
410 | "DP_AN9801", | |
411 | "DP_DP501", | |
412 | "INTERNAL_UNIPHY", | |
413 | "INTERNAL_KLDSCP_LVTMA", | |
414 | "INTERNAL_UNIPHY1", | |
415 | "INTERNAL_UNIPHY2", | |
416 | "NUTMEG", | |
417 | "TRAVIS", | |
418 | "INTERNAL_VCE", | |
419 | "INTERNAL_UNIPHY3", | |
c6e14f40 ED |
420 | "HDMI_ANX9805", |
421 | "INTERNAL_AMCLK", | |
422 | "VIRTUAL", | |
d38ceaf9 AD |
423 | }; |
424 | ||
425 | static const char *hpd_names[6] = { | |
426 | "HPD1", | |
427 | "HPD2", | |
428 | "HPD3", | |
429 | "HPD4", | |
430 | "HPD5", | |
431 | "HPD6", | |
432 | }; | |
433 | ||
434 | void amdgpu_print_display_setup(struct drm_device *dev) | |
435 | { | |
436 | struct drm_connector *connector; | |
437 | struct amdgpu_connector *amdgpu_connector; | |
438 | struct drm_encoder *encoder; | |
439 | struct amdgpu_encoder *amdgpu_encoder; | |
440 | uint32_t devices; | |
441 | int i = 0; | |
442 | ||
443 | DRM_INFO("AMDGPU Display Connectors\n"); | |
444 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | |
445 | amdgpu_connector = to_amdgpu_connector(connector); | |
446 | DRM_INFO("Connector %d:\n", i); | |
447 | DRM_INFO(" %s\n", connector->name); | |
448 | if (amdgpu_connector->hpd.hpd != AMDGPU_HPD_NONE) | |
449 | DRM_INFO(" %s\n", hpd_names[amdgpu_connector->hpd.hpd]); | |
450 | if (amdgpu_connector->ddc_bus) { | |
451 | DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", | |
452 | amdgpu_connector->ddc_bus->rec.mask_clk_reg, | |
453 | amdgpu_connector->ddc_bus->rec.mask_data_reg, | |
454 | amdgpu_connector->ddc_bus->rec.a_clk_reg, | |
455 | amdgpu_connector->ddc_bus->rec.a_data_reg, | |
456 | amdgpu_connector->ddc_bus->rec.en_clk_reg, | |
457 | amdgpu_connector->ddc_bus->rec.en_data_reg, | |
458 | amdgpu_connector->ddc_bus->rec.y_clk_reg, | |
459 | amdgpu_connector->ddc_bus->rec.y_data_reg); | |
460 | if (amdgpu_connector->router.ddc_valid) | |
461 | DRM_INFO(" DDC Router 0x%x/0x%x\n", | |
462 | amdgpu_connector->router.ddc_mux_control_pin, | |
463 | amdgpu_connector->router.ddc_mux_state); | |
464 | if (amdgpu_connector->router.cd_valid) | |
465 | DRM_INFO(" Clock/Data Router 0x%x/0x%x\n", | |
466 | amdgpu_connector->router.cd_mux_control_pin, | |
467 | amdgpu_connector->router.cd_mux_state); | |
468 | } else { | |
469 | if (connector->connector_type == DRM_MODE_CONNECTOR_VGA || | |
470 | connector->connector_type == DRM_MODE_CONNECTOR_DVII || | |
471 | connector->connector_type == DRM_MODE_CONNECTOR_DVID || | |
472 | connector->connector_type == DRM_MODE_CONNECTOR_DVIA || | |
473 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || | |
474 | connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) | |
475 | DRM_INFO(" DDC: no ddc bus - possible BIOS bug - please report to [email protected]\n"); | |
476 | } | |
477 | DRM_INFO(" Encoders:\n"); | |
478 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | |
479 | amdgpu_encoder = to_amdgpu_encoder(encoder); | |
480 | devices = amdgpu_encoder->devices & amdgpu_connector->devices; | |
481 | if (devices) { | |
482 | if (devices & ATOM_DEVICE_CRT1_SUPPORT) | |
483 | DRM_INFO(" CRT1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
484 | if (devices & ATOM_DEVICE_CRT2_SUPPORT) | |
485 | DRM_INFO(" CRT2: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
486 | if (devices & ATOM_DEVICE_LCD1_SUPPORT) | |
487 | DRM_INFO(" LCD1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
488 | if (devices & ATOM_DEVICE_DFP1_SUPPORT) | |
489 | DRM_INFO(" DFP1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
490 | if (devices & ATOM_DEVICE_DFP2_SUPPORT) | |
491 | DRM_INFO(" DFP2: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
492 | if (devices & ATOM_DEVICE_DFP3_SUPPORT) | |
493 | DRM_INFO(" DFP3: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
494 | if (devices & ATOM_DEVICE_DFP4_SUPPORT) | |
495 | DRM_INFO(" DFP4: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
496 | if (devices & ATOM_DEVICE_DFP5_SUPPORT) | |
497 | DRM_INFO(" DFP5: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
498 | if (devices & ATOM_DEVICE_DFP6_SUPPORT) | |
499 | DRM_INFO(" DFP6: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
500 | if (devices & ATOM_DEVICE_TV1_SUPPORT) | |
501 | DRM_INFO(" TV1: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
502 | if (devices & ATOM_DEVICE_CV_SUPPORT) | |
503 | DRM_INFO(" CV: %s\n", encoder_names[amdgpu_encoder->encoder_id]); | |
504 | } | |
505 | } | |
506 | i++; | |
507 | } | |
508 | } | |
509 | ||
510 | /** | |
511 | * amdgpu_ddc_probe | |
512 | * | |
513 | */ | |
514 | bool amdgpu_ddc_probe(struct amdgpu_connector *amdgpu_connector, | |
515 | bool use_aux) | |
516 | { | |
517 | u8 out = 0x0; | |
518 | u8 buf[8]; | |
519 | int ret; | |
520 | struct i2c_msg msgs[] = { | |
521 | { | |
522 | .addr = DDC_ADDR, | |
523 | .flags = 0, | |
524 | .len = 1, | |
525 | .buf = &out, | |
526 | }, | |
527 | { | |
528 | .addr = DDC_ADDR, | |
529 | .flags = I2C_M_RD, | |
530 | .len = 8, | |
531 | .buf = buf, | |
532 | } | |
533 | }; | |
534 | ||
535 | /* on hw with routers, select right port */ | |
536 | if (amdgpu_connector->router.ddc_valid) | |
537 | amdgpu_i2c_router_select_ddc_port(amdgpu_connector); | |
538 | ||
539 | if (use_aux) { | |
540 | ret = i2c_transfer(&amdgpu_connector->ddc_bus->aux.ddc, msgs, 2); | |
541 | } else { | |
542 | ret = i2c_transfer(&amdgpu_connector->ddc_bus->adapter, msgs, 2); | |
543 | } | |
544 | ||
545 | if (ret != 2) | |
546 | /* Couldn't find an accessible DDC on this connector */ | |
547 | return false; | |
548 | /* Probe also for valid EDID header | |
549 | * EDID header starts with: | |
550 | * 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00. | |
551 | * Only the first 6 bytes must be valid as | |
552 | * drm_edid_block_valid() can fix the last 2 bytes */ | |
553 | if (drm_edid_header_is_valid(buf) < 6) { | |
554 | /* Couldn't find an accessible EDID on this | |
555 | * connector */ | |
556 | return false; | |
557 | } | |
558 | return true; | |
559 | } | |
560 | ||
561 | static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) | |
562 | { | |
563 | struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); | |
564 | ||
1721c69c | 565 | drm_gem_object_unreference_unlocked(amdgpu_fb->obj); |
d38ceaf9 AD |
566 | drm_framebuffer_cleanup(fb); |
567 | kfree(amdgpu_fb); | |
568 | } | |
569 | ||
570 | static int amdgpu_user_framebuffer_create_handle(struct drm_framebuffer *fb, | |
571 | struct drm_file *file_priv, | |
572 | unsigned int *handle) | |
573 | { | |
574 | struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); | |
575 | ||
576 | return drm_gem_handle_create(file_priv, amdgpu_fb->obj, handle); | |
577 | } | |
578 | ||
579 | static const struct drm_framebuffer_funcs amdgpu_fb_funcs = { | |
580 | .destroy = amdgpu_user_framebuffer_destroy, | |
581 | .create_handle = amdgpu_user_framebuffer_create_handle, | |
582 | }; | |
583 | ||
584 | int | |
585 | amdgpu_framebuffer_init(struct drm_device *dev, | |
586 | struct amdgpu_framebuffer *rfb, | |
1eb83451 | 587 | const struct drm_mode_fb_cmd2 *mode_cmd, |
d38ceaf9 AD |
588 | struct drm_gem_object *obj) |
589 | { | |
590 | int ret; | |
591 | rfb->obj = obj; | |
a3f913ca | 592 | drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); |
d38ceaf9 AD |
593 | ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); |
594 | if (ret) { | |
595 | rfb->obj = NULL; | |
596 | return ret; | |
597 | } | |
598 | return 0; | |
599 | } | |
600 | ||
601 | static struct drm_framebuffer * | |
602 | amdgpu_user_framebuffer_create(struct drm_device *dev, | |
603 | struct drm_file *file_priv, | |
1eb83451 | 604 | const struct drm_mode_fb_cmd2 *mode_cmd) |
d38ceaf9 AD |
605 | { |
606 | struct drm_gem_object *obj; | |
607 | struct amdgpu_framebuffer *amdgpu_fb; | |
608 | int ret; | |
609 | ||
a8ad0bd8 | 610 | obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); |
d38ceaf9 AD |
611 | if (obj == NULL) { |
612 | dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " | |
613 | "can't create framebuffer\n", mode_cmd->handles[0]); | |
614 | return ERR_PTR(-ENOENT); | |
615 | } | |
616 | ||
1769152a CJHR |
617 | /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ |
618 | if (obj->import_attach) { | |
619 | DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); | |
620 | return ERR_PTR(-EINVAL); | |
621 | } | |
622 | ||
d38ceaf9 AD |
623 | amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); |
624 | if (amdgpu_fb == NULL) { | |
625 | drm_gem_object_unreference_unlocked(obj); | |
626 | return ERR_PTR(-ENOMEM); | |
627 | } | |
628 | ||
629 | ret = amdgpu_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj); | |
630 | if (ret) { | |
631 | kfree(amdgpu_fb); | |
632 | drm_gem_object_unreference_unlocked(obj); | |
633 | return ERR_PTR(ret); | |
634 | } | |
635 | ||
636 | return &amdgpu_fb->base; | |
637 | } | |
638 | ||
639 | static void amdgpu_output_poll_changed(struct drm_device *dev) | |
640 | { | |
641 | struct amdgpu_device *adev = dev->dev_private; | |
642 | amdgpu_fb_output_poll_changed(adev); | |
643 | } | |
644 | ||
645 | const struct drm_mode_config_funcs amdgpu_mode_funcs = { | |
646 | .fb_create = amdgpu_user_framebuffer_create, | |
647 | .output_poll_changed = amdgpu_output_poll_changed | |
648 | }; | |
649 | ||
f498d9ed | 650 | static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] = |
d38ceaf9 AD |
651 | { { UNDERSCAN_OFF, "off" }, |
652 | { UNDERSCAN_ON, "on" }, | |
653 | { UNDERSCAN_AUTO, "auto" }, | |
654 | }; | |
655 | ||
f498d9ed | 656 | static const struct drm_prop_enum_list amdgpu_audio_enum_list[] = |
d38ceaf9 AD |
657 | { { AMDGPU_AUDIO_DISABLE, "off" }, |
658 | { AMDGPU_AUDIO_ENABLE, "on" }, | |
659 | { AMDGPU_AUDIO_AUTO, "auto" }, | |
660 | }; | |
661 | ||
662 | /* XXX support different dither options? spatial, temporal, both, etc. */ | |
f498d9ed | 663 | static const struct drm_prop_enum_list amdgpu_dither_enum_list[] = |
d38ceaf9 AD |
664 | { { AMDGPU_FMT_DITHER_DISABLE, "off" }, |
665 | { AMDGPU_FMT_DITHER_ENABLE, "on" }, | |
666 | }; | |
667 | ||
668 | int amdgpu_modeset_create_props(struct amdgpu_device *adev) | |
669 | { | |
670 | int sz; | |
671 | ||
f7e9e9fe NW |
672 | adev->mode_info.coherent_mode_property = |
673 | drm_property_create_range(adev->ddev, 0 , "coherent", 0, 1); | |
674 | if (!adev->mode_info.coherent_mode_property) | |
675 | return -ENOMEM; | |
d38ceaf9 AD |
676 | |
677 | adev->mode_info.load_detect_property = | |
678 | drm_property_create_range(adev->ddev, 0, "load detection", 0, 1); | |
679 | if (!adev->mode_info.load_detect_property) | |
680 | return -ENOMEM; | |
681 | ||
682 | drm_mode_create_scaling_mode_property(adev->ddev); | |
683 | ||
684 | sz = ARRAY_SIZE(amdgpu_underscan_enum_list); | |
685 | adev->mode_info.underscan_property = | |
686 | drm_property_create_enum(adev->ddev, 0, | |
687 | "underscan", | |
688 | amdgpu_underscan_enum_list, sz); | |
689 | ||
690 | adev->mode_info.underscan_hborder_property = | |
691 | drm_property_create_range(adev->ddev, 0, | |
692 | "underscan hborder", 0, 128); | |
693 | if (!adev->mode_info.underscan_hborder_property) | |
694 | return -ENOMEM; | |
695 | ||
696 | adev->mode_info.underscan_vborder_property = | |
697 | drm_property_create_range(adev->ddev, 0, | |
698 | "underscan vborder", 0, 128); | |
699 | if (!adev->mode_info.underscan_vborder_property) | |
700 | return -ENOMEM; | |
701 | ||
702 | sz = ARRAY_SIZE(amdgpu_audio_enum_list); | |
703 | adev->mode_info.audio_property = | |
704 | drm_property_create_enum(adev->ddev, 0, | |
705 | "audio", | |
706 | amdgpu_audio_enum_list, sz); | |
707 | ||
708 | sz = ARRAY_SIZE(amdgpu_dither_enum_list); | |
709 | adev->mode_info.dither_property = | |
710 | drm_property_create_enum(adev->ddev, 0, | |
711 | "dither", | |
712 | amdgpu_dither_enum_list, sz); | |
713 | ||
714 | return 0; | |
715 | } | |
716 | ||
717 | void amdgpu_update_display_priority(struct amdgpu_device *adev) | |
718 | { | |
719 | /* adjustment options for the display watermarks */ | |
720 | if ((amdgpu_disp_priority == 0) || (amdgpu_disp_priority > 2)) | |
721 | adev->mode_info.disp_priority = 0; | |
722 | else | |
723 | adev->mode_info.disp_priority = amdgpu_disp_priority; | |
724 | ||
725 | } | |
726 | ||
727 | static bool is_hdtv_mode(const struct drm_display_mode *mode) | |
728 | { | |
729 | /* try and guess if this is a tv or a monitor */ | |
730 | if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */ | |
731 | (mode->vdisplay == 576) || /* 576p */ | |
732 | (mode->vdisplay == 720) || /* 720p */ | |
733 | (mode->vdisplay == 1080)) /* 1080p */ | |
734 | return true; | |
735 | else | |
736 | return false; | |
737 | } | |
738 | ||
739 | bool amdgpu_crtc_scaling_mode_fixup(struct drm_crtc *crtc, | |
740 | const struct drm_display_mode *mode, | |
741 | struct drm_display_mode *adjusted_mode) | |
742 | { | |
743 | struct drm_device *dev = crtc->dev; | |
744 | struct drm_encoder *encoder; | |
745 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | |
746 | struct amdgpu_encoder *amdgpu_encoder; | |
747 | struct drm_connector *connector; | |
748 | struct amdgpu_connector *amdgpu_connector; | |
749 | u32 src_v = 1, dst_v = 1; | |
750 | u32 src_h = 1, dst_h = 1; | |
751 | ||
752 | amdgpu_crtc->h_border = 0; | |
753 | amdgpu_crtc->v_border = 0; | |
754 | ||
755 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | |
756 | if (encoder->crtc != crtc) | |
757 | continue; | |
758 | amdgpu_encoder = to_amdgpu_encoder(encoder); | |
759 | connector = amdgpu_get_connector_for_encoder(encoder); | |
760 | amdgpu_connector = to_amdgpu_connector(connector); | |
761 | ||
762 | /* set scaling */ | |
763 | if (amdgpu_encoder->rmx_type == RMX_OFF) | |
764 | amdgpu_crtc->rmx_type = RMX_OFF; | |
765 | else if (mode->hdisplay < amdgpu_encoder->native_mode.hdisplay || | |
766 | mode->vdisplay < amdgpu_encoder->native_mode.vdisplay) | |
767 | amdgpu_crtc->rmx_type = amdgpu_encoder->rmx_type; | |
768 | else | |
769 | amdgpu_crtc->rmx_type = RMX_OFF; | |
770 | /* copy native mode */ | |
771 | memcpy(&amdgpu_crtc->native_mode, | |
772 | &amdgpu_encoder->native_mode, | |
773 | sizeof(struct drm_display_mode)); | |
774 | src_v = crtc->mode.vdisplay; | |
775 | dst_v = amdgpu_crtc->native_mode.vdisplay; | |
776 | src_h = crtc->mode.hdisplay; | |
777 | dst_h = amdgpu_crtc->native_mode.hdisplay; | |
778 | ||
779 | /* fix up for overscan on hdmi */ | |
780 | if ((!(mode->flags & DRM_MODE_FLAG_INTERLACE)) && | |
781 | ((amdgpu_encoder->underscan_type == UNDERSCAN_ON) || | |
782 | ((amdgpu_encoder->underscan_type == UNDERSCAN_AUTO) && | |
783 | drm_detect_hdmi_monitor(amdgpu_connector_edid(connector)) && | |
784 | is_hdtv_mode(mode)))) { | |
785 | if (amdgpu_encoder->underscan_hborder != 0) | |
786 | amdgpu_crtc->h_border = amdgpu_encoder->underscan_hborder; | |
787 | else | |
788 | amdgpu_crtc->h_border = (mode->hdisplay >> 5) + 16; | |
789 | if (amdgpu_encoder->underscan_vborder != 0) | |
790 | amdgpu_crtc->v_border = amdgpu_encoder->underscan_vborder; | |
791 | else | |
792 | amdgpu_crtc->v_border = (mode->vdisplay >> 5) + 16; | |
793 | amdgpu_crtc->rmx_type = RMX_FULL; | |
794 | src_v = crtc->mode.vdisplay; | |
795 | dst_v = crtc->mode.vdisplay - (amdgpu_crtc->v_border * 2); | |
796 | src_h = crtc->mode.hdisplay; | |
797 | dst_h = crtc->mode.hdisplay - (amdgpu_crtc->h_border * 2); | |
798 | } | |
799 | } | |
800 | if (amdgpu_crtc->rmx_type != RMX_OFF) { | |
801 | fixed20_12 a, b; | |
802 | a.full = dfixed_const(src_v); | |
803 | b.full = dfixed_const(dst_v); | |
804 | amdgpu_crtc->vsc.full = dfixed_div(a, b); | |
805 | a.full = dfixed_const(src_h); | |
806 | b.full = dfixed_const(dst_h); | |
807 | amdgpu_crtc->hsc.full = dfixed_div(a, b); | |
808 | } else { | |
809 | amdgpu_crtc->vsc.full = dfixed_const(1); | |
810 | amdgpu_crtc->hsc.full = dfixed_const(1); | |
811 | } | |
812 | return true; | |
813 | } | |
814 | ||
815 | /* | |
816 | * Retrieve current video scanout position of crtc on a given gpu, and | |
817 | * an optional accurate timestamp of when query happened. | |
818 | * | |
819 | * \param dev Device to query. | |
88e72717 | 820 | * \param pipe Crtc to query. |
d38ceaf9 | 821 | * \param flags Flags from caller (DRM_CALLED_FROM_VBLIRQ or 0). |
8e36f9d3 AD |
822 | * For driver internal use only also supports these flags: |
823 | * | |
824 | * USE_REAL_VBLANKSTART to use the real start of vblank instead | |
825 | * of a fudged earlier start of vblank. | |
826 | * | |
827 | * GET_DISTANCE_TO_VBLANKSTART to return distance to the | |
828 | * fudged earlier start of vblank in *vpos and the distance | |
829 | * to true start of vblank in *hpos. | |
830 | * | |
d38ceaf9 AD |
831 | * \param *vpos Location where vertical scanout position should be stored. |
832 | * \param *hpos Location where horizontal scanout position should go. | |
833 | * \param *stime Target location for timestamp taken immediately before | |
834 | * scanout position query. Can be NULL to skip timestamp. | |
835 | * \param *etime Target location for timestamp taken immediately after | |
836 | * scanout position query. Can be NULL to skip timestamp. | |
837 | * | |
838 | * Returns vpos as a positive number while in active scanout area. | |
839 | * Returns vpos as a negative number inside vblank, counting the number | |
840 | * of scanlines to go until end of vblank, e.g., -1 means "one scanline | |
841 | * until start of active scanout / end of vblank." | |
842 | * | |
843 | * \return Flags, or'ed together as follows: | |
844 | * | |
845 | * DRM_SCANOUTPOS_VALID = Query successful. | |
846 | * DRM_SCANOUTPOS_INVBL = Inside vblank. | |
847 | * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of | |
848 | * this flag means that returned position may be offset by a constant but | |
849 | * unknown small number of scanlines wrt. real scanout position. | |
850 | * | |
851 | */ | |
88e72717 TR |
852 | int amdgpu_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, |
853 | unsigned int flags, int *vpos, int *hpos, | |
854 | ktime_t *stime, ktime_t *etime, | |
3bb403bf | 855 | const struct drm_display_mode *mode) |
d38ceaf9 AD |
856 | { |
857 | u32 vbl = 0, position = 0; | |
858 | int vbl_start, vbl_end, vtotal, ret = 0; | |
859 | bool in_vbl = true; | |
860 | ||
861 | struct amdgpu_device *adev = dev->dev_private; | |
862 | ||
863 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ | |
864 | ||
865 | /* Get optional system timestamp before query. */ | |
866 | if (stime) | |
867 | *stime = ktime_get(); | |
868 | ||
88e72717 | 869 | if (amdgpu_display_page_flip_get_scanoutpos(adev, pipe, &vbl, &position) == 0) |
d38ceaf9 AD |
870 | ret |= DRM_SCANOUTPOS_VALID; |
871 | ||
872 | /* Get optional system timestamp after query. */ | |
873 | if (etime) | |
874 | *etime = ktime_get(); | |
875 | ||
876 | /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ | |
877 | ||
878 | /* Decode into vertical and horizontal scanout position. */ | |
879 | *vpos = position & 0x1fff; | |
880 | *hpos = (position >> 16) & 0x1fff; | |
881 | ||
882 | /* Valid vblank area boundaries from gpu retrieved? */ | |
883 | if (vbl > 0) { | |
884 | /* Yes: Decode. */ | |
885 | ret |= DRM_SCANOUTPOS_ACCURATE; | |
886 | vbl_start = vbl & 0x1fff; | |
887 | vbl_end = (vbl >> 16) & 0x1fff; | |
888 | } | |
889 | else { | |
890 | /* No: Fake something reasonable which gives at least ok results. */ | |
3bb403bf | 891 | vbl_start = mode->crtc_vdisplay; |
d38ceaf9 AD |
892 | vbl_end = 0; |
893 | } | |
894 | ||
8e36f9d3 AD |
895 | /* Called from driver internal vblank counter query code? */ |
896 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | |
897 | /* Caller wants distance from real vbl_start in *hpos */ | |
898 | *hpos = *vpos - vbl_start; | |
899 | } | |
900 | ||
901 | /* Fudge vblank to start a few scanlines earlier to handle the | |
902 | * problem that vblank irqs fire a few scanlines before start | |
903 | * of vblank. Some driver internal callers need the true vblank | |
904 | * start to be used and signal this via the USE_REAL_VBLANKSTART flag. | |
905 | * | |
906 | * The cause of the "early" vblank irq is that the irq is triggered | |
907 | * by the line buffer logic when the line buffer read position enters | |
908 | * the vblank, whereas our crtc scanout position naturally lags the | |
909 | * line buffer read position. | |
910 | */ | |
911 | if (!(flags & USE_REAL_VBLANKSTART)) | |
912 | vbl_start -= adev->mode_info.crtcs[pipe]->lb_vblank_lead_lines; | |
913 | ||
d38ceaf9 AD |
914 | /* Test scanout position against vblank region. */ |
915 | if ((*vpos < vbl_start) && (*vpos >= vbl_end)) | |
916 | in_vbl = false; | |
917 | ||
8e36f9d3 AD |
918 | /* In vblank? */ |
919 | if (in_vbl) | |
920 | ret |= DRM_SCANOUTPOS_IN_VBLANK; | |
921 | ||
922 | /* Called from driver internal vblank counter query code? */ | |
923 | if (flags & GET_DISTANCE_TO_VBLANKSTART) { | |
924 | /* Caller wants distance from fudged earlier vbl_start */ | |
925 | *vpos -= vbl_start; | |
926 | return ret; | |
927 | } | |
928 | ||
d38ceaf9 AD |
929 | /* Check if inside vblank area and apply corrective offsets: |
930 | * vpos will then be >=0 in video scanout area, but negative | |
931 | * within vblank area, counting down the number of lines until | |
932 | * start of scanout. | |
933 | */ | |
934 | ||
935 | /* Inside "upper part" of vblank area? Apply corrective offset if so: */ | |
936 | if (in_vbl && (*vpos >= vbl_start)) { | |
3bb403bf | 937 | vtotal = mode->crtc_vtotal; |
d38ceaf9 AD |
938 | *vpos = *vpos - vtotal; |
939 | } | |
940 | ||
941 | /* Correct for shifted end of vbl at vbl_end. */ | |
942 | *vpos = *vpos - vbl_end; | |
943 | ||
d38ceaf9 AD |
944 | return ret; |
945 | } | |
946 | ||
947 | int amdgpu_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc) | |
948 | { | |
949 | if (crtc < 0 || crtc >= adev->mode_info.num_crtc) | |
950 | return AMDGPU_CRTC_IRQ_NONE; | |
951 | ||
952 | switch (crtc) { | |
953 | case 0: | |
954 | return AMDGPU_CRTC_IRQ_VBLANK1; | |
955 | case 1: | |
956 | return AMDGPU_CRTC_IRQ_VBLANK2; | |
957 | case 2: | |
958 | return AMDGPU_CRTC_IRQ_VBLANK3; | |
959 | case 3: | |
960 | return AMDGPU_CRTC_IRQ_VBLANK4; | |
961 | case 4: | |
962 | return AMDGPU_CRTC_IRQ_VBLANK5; | |
963 | case 5: | |
964 | return AMDGPU_CRTC_IRQ_VBLANK6; | |
965 | default: | |
966 | return AMDGPU_CRTC_IRQ_NONE; | |
967 | } | |
968 | } |