2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <linux/pci.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/slab.h>
32 #include <linux/uaccess.h>
33 #include <linux/vga_switcheroo.h>
35 #include <drm/drm_file.h>
36 #include <drm/drm_ioctl.h>
37 #include <drm/radeon_drm.h>
40 #include "radeon_asic.h"
41 #include "radeon_drv.h"
42 #include "radeon_kms.h"
44 #if defined(CONFIG_VGA_SWITCHEROO)
45 bool radeon_has_atpx(void);
47 static inline bool radeon_has_atpx(void) { return false; }
51 * radeon_driver_unload_kms - Main unload function for KMS.
53 * @dev: drm dev pointer
55 * This is the main unload function for KMS (all asics).
56 * It calls radeon_modeset_fini() to tear down the
57 * displays, and radeon_device_fini() to tear down
58 * the rest of the device (CP, writeback, etc.).
59 * Returns 0 on success.
61 void radeon_driver_unload_kms(struct drm_device *dev)
63 struct radeon_device *rdev = dev->dev_private;
68 if (rdev->rmmio == NULL)
71 if (radeon_is_px(dev)) {
72 pm_runtime_get_sync(dev->dev);
73 pm_runtime_forbid(dev->dev);
76 radeon_acpi_fini(rdev);
78 radeon_modeset_fini(rdev);
79 radeon_device_fini(rdev);
82 arch_phys_wc_del(rdev->agp->agp_mtrr);
88 dev->dev_private = NULL;
92 * radeon_driver_load_kms - Main load function for KMS.
94 * @dev: drm dev pointer
95 * @flags: device flags
97 * This is the main load function for KMS (all asics).
98 * It calls radeon_device_init() to set up the non-display
99 * parts of the chip (asic init, CP, writeback, etc.), and
100 * radeon_modeset_init() to set up the display parts
101 * (crtcs, encoders, hotplug detect, etc.).
102 * Returns 0 on success, error on failure.
104 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
106 struct pci_dev *pdev = to_pci_dev(dev->dev);
107 struct radeon_device *rdev = dev->dev_private;
111 rdev->hose = pdev->sysdata;
114 if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
115 rdev->agp = radeon_agp_head_init(dev);
117 rdev->agp->agp_mtrr = arch_phys_wc_add(
118 rdev->agp->agp_info.aper_base,
119 rdev->agp->agp_info.aper_size *
123 /* update BUS flag */
124 if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) {
125 flags |= RADEON_IS_AGP;
126 } else if (pci_is_pcie(pdev)) {
127 flags |= RADEON_IS_PCIE;
129 flags |= RADEON_IS_PCI;
132 if ((radeon_runtime_pm != 0) &&
134 ((flags & RADEON_IS_IGP) == 0) &&
135 !pci_is_thunderbolt_attached(pdev))
136 flags |= RADEON_IS_PX;
138 /* radeon_device_init should report only fatal error
139 * like memory allocation failure or iomapping failure,
140 * or memory manager initialization failure, it must
141 * properly initialize the GPU MC controller and permit
144 r = radeon_device_init(rdev, dev, pdev, flags);
146 dev_err(dev->dev, "Fatal error during GPU init\n");
150 /* Again modeset_init should fail only on fatal error
151 * otherwise it should provide enough functionalities
152 * for shadowfb to run
154 r = radeon_modeset_init(rdev);
156 dev_err(dev->dev, "Fatal error during modeset init\n");
158 /* Call ACPI methods: require modeset init
159 * but failure is not fatal
162 acpi_status = radeon_acpi_init(rdev);
164 dev_dbg(dev->dev, "Error during ACPI methods call\n");
167 if (radeon_is_px(dev)) {
168 dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
169 pm_runtime_use_autosuspend(dev->dev);
170 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
171 pm_runtime_set_active(dev->dev);
172 pm_runtime_allow(dev->dev);
173 pm_runtime_mark_last_busy(dev->dev);
174 pm_runtime_put_autosuspend(dev->dev);
179 radeon_driver_unload_kms(dev);
186 * radeon_set_filp_rights - Set filp right.
188 * @dev: drm dev pointer
193 * Sets the filp rights for the device (all asics).
195 static void radeon_set_filp_rights(struct drm_device *dev,
196 struct drm_file **owner,
197 struct drm_file *applier,
200 struct radeon_device *rdev = dev->dev_private;
202 mutex_lock(&rdev->gem.mutex);
207 } else if (*value == 0) {
209 if (*owner == applier)
212 *value = *owner == applier ? 1 : 0;
213 mutex_unlock(&rdev->gem.mutex);
217 * Userspace get information ioctl
220 * radeon_info_ioctl - answer a device specific request.
222 * @dev: drm device pointer
223 * @data: request object
226 * This function is used to pass device specific parameters to the userspace
227 * drivers. Examples include: pci device id, pipeline parms, tiling params,
229 * Returns 0 on success, -EINVAL on failure.
231 int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
233 struct radeon_device *rdev = dev->dev_private;
234 struct drm_radeon_info *info = data;
235 struct radeon_mode_info *minfo = &rdev->mode_info;
236 uint32_t *value, value_tmp, *value_ptr, value_size;
237 struct ttm_resource_manager *man;
239 struct drm_crtc *crtc;
242 value_ptr = (uint32_t *)((unsigned long)info->value);
244 value_size = sizeof(uint32_t);
246 switch (info->request) {
247 case RADEON_INFO_DEVICE_ID:
248 *value = to_pci_dev(dev->dev)->device;
250 case RADEON_INFO_NUM_GB_PIPES:
251 *value = rdev->num_gb_pipes;
253 case RADEON_INFO_NUM_Z_PIPES:
254 *value = rdev->num_z_pipes;
256 case RADEON_INFO_ACCEL_WORKING:
257 /* xf86-video-ati 6.13.0 relies on this being false for evergreen */
258 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK))
261 *value = rdev->accel_working;
263 case RADEON_INFO_CRTC_FROM_ID:
264 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
265 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
268 for (i = 0, found = 0; i < rdev->num_crtc; i++) {
269 crtc = (struct drm_crtc *)minfo->crtcs[i];
270 if (crtc && crtc->base.id == *value) {
271 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
272 *value = radeon_crtc->crtc_id;
278 DRM_DEBUG_KMS("unknown crtc id %d\n", *value);
282 case RADEON_INFO_ACCEL_WORKING2:
283 if (rdev->family == CHIP_HAWAII) {
284 if (rdev->accel_working) {
293 *value = rdev->accel_working;
296 case RADEON_INFO_TILING_CONFIG:
297 if (rdev->family >= CHIP_BONAIRE)
298 *value = rdev->config.cik.tile_config;
299 else if (rdev->family >= CHIP_TAHITI)
300 *value = rdev->config.si.tile_config;
301 else if (rdev->family >= CHIP_CAYMAN)
302 *value = rdev->config.cayman.tile_config;
303 else if (rdev->family >= CHIP_CEDAR)
304 *value = rdev->config.evergreen.tile_config;
305 else if (rdev->family >= CHIP_RV770)
306 *value = rdev->config.rv770.tile_config;
307 else if (rdev->family >= CHIP_R600)
308 *value = rdev->config.r600.tile_config;
310 DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
314 case RADEON_INFO_WANT_HYPERZ:
315 /* The "value" here is both an input and output parameter.
316 * If the input value is 1, filp requests hyper-z access.
317 * If the input value is 0, filp revokes its hyper-z access.
319 * When returning, the value is 1 if filp owns hyper-z access,
321 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
322 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
326 DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", *value);
329 radeon_set_filp_rights(dev, &rdev->hyperz_filp, filp, value);
331 case RADEON_INFO_WANT_CMASK:
332 /* The same logic as Hyper-Z. */
333 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
334 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
338 DRM_DEBUG_KMS("WANT_CMASK: invalid value %d\n", *value);
341 radeon_set_filp_rights(dev, &rdev->cmask_filp, filp, value);
343 case RADEON_INFO_CLOCK_CRYSTAL_FREQ:
344 /* return clock value in KHz */
345 if (rdev->asic->get_xclk)
346 *value = radeon_get_xclk(rdev) * 10;
348 *value = rdev->clock.spll.reference_freq * 10;
350 case RADEON_INFO_NUM_BACKENDS:
351 if (rdev->family >= CHIP_BONAIRE)
352 *value = rdev->config.cik.max_backends_per_se *
353 rdev->config.cik.max_shader_engines;
354 else if (rdev->family >= CHIP_TAHITI)
355 *value = rdev->config.si.max_backends_per_se *
356 rdev->config.si.max_shader_engines;
357 else if (rdev->family >= CHIP_CAYMAN)
358 *value = rdev->config.cayman.max_backends_per_se *
359 rdev->config.cayman.max_shader_engines;
360 else if (rdev->family >= CHIP_CEDAR)
361 *value = rdev->config.evergreen.max_backends;
362 else if (rdev->family >= CHIP_RV770)
363 *value = rdev->config.rv770.max_backends;
364 else if (rdev->family >= CHIP_R600)
365 *value = rdev->config.r600.max_backends;
370 case RADEON_INFO_NUM_TILE_PIPES:
371 if (rdev->family >= CHIP_BONAIRE)
372 *value = rdev->config.cik.max_tile_pipes;
373 else if (rdev->family >= CHIP_TAHITI)
374 *value = rdev->config.si.max_tile_pipes;
375 else if (rdev->family >= CHIP_CAYMAN)
376 *value = rdev->config.cayman.max_tile_pipes;
377 else if (rdev->family >= CHIP_CEDAR)
378 *value = rdev->config.evergreen.max_tile_pipes;
379 else if (rdev->family >= CHIP_RV770)
380 *value = rdev->config.rv770.max_tile_pipes;
381 else if (rdev->family >= CHIP_R600)
382 *value = rdev->config.r600.max_tile_pipes;
387 case RADEON_INFO_FUSION_GART_WORKING:
390 case RADEON_INFO_BACKEND_MAP:
391 if (rdev->family >= CHIP_BONAIRE)
392 *value = rdev->config.cik.backend_map;
393 else if (rdev->family >= CHIP_TAHITI)
394 *value = rdev->config.si.backend_map;
395 else if (rdev->family >= CHIP_CAYMAN)
396 *value = rdev->config.cayman.backend_map;
397 else if (rdev->family >= CHIP_CEDAR)
398 *value = rdev->config.evergreen.backend_map;
399 else if (rdev->family >= CHIP_RV770)
400 *value = rdev->config.rv770.backend_map;
401 else if (rdev->family >= CHIP_R600)
402 *value = rdev->config.r600.backend_map;
407 case RADEON_INFO_VA_START:
408 /* this is where we report if vm is supported or not */
409 if (rdev->family < CHIP_CAYMAN)
411 *value = RADEON_VA_RESERVED_SIZE;
413 case RADEON_INFO_IB_VM_MAX_SIZE:
414 /* this is where we report if vm is supported or not */
415 if (rdev->family < CHIP_CAYMAN)
417 *value = RADEON_IB_VM_MAX_SIZE;
419 case RADEON_INFO_MAX_PIPES:
420 if (rdev->family >= CHIP_BONAIRE)
421 *value = rdev->config.cik.max_cu_per_sh;
422 else if (rdev->family >= CHIP_TAHITI)
423 *value = rdev->config.si.max_cu_per_sh;
424 else if (rdev->family >= CHIP_CAYMAN)
425 *value = rdev->config.cayman.max_pipes_per_simd;
426 else if (rdev->family >= CHIP_CEDAR)
427 *value = rdev->config.evergreen.max_pipes;
428 else if (rdev->family >= CHIP_RV770)
429 *value = rdev->config.rv770.max_pipes;
430 else if (rdev->family >= CHIP_R600)
431 *value = rdev->config.r600.max_pipes;
436 case RADEON_INFO_TIMESTAMP:
437 if (rdev->family < CHIP_R600) {
438 DRM_DEBUG_KMS("timestamp is r6xx+ only!\n");
441 value = (uint32_t *)&value64;
442 value_size = sizeof(uint64_t);
443 value64 = radeon_get_gpu_clock_counter(rdev);
445 case RADEON_INFO_MAX_SE:
446 if (rdev->family >= CHIP_BONAIRE)
447 *value = rdev->config.cik.max_shader_engines;
448 else if (rdev->family >= CHIP_TAHITI)
449 *value = rdev->config.si.max_shader_engines;
450 else if (rdev->family >= CHIP_CAYMAN)
451 *value = rdev->config.cayman.max_shader_engines;
452 else if (rdev->family >= CHIP_CEDAR)
453 *value = rdev->config.evergreen.num_ses;
457 case RADEON_INFO_MAX_SH_PER_SE:
458 if (rdev->family >= CHIP_BONAIRE)
459 *value = rdev->config.cik.max_sh_per_se;
460 else if (rdev->family >= CHIP_TAHITI)
461 *value = rdev->config.si.max_sh_per_se;
465 case RADEON_INFO_FASTFB_WORKING:
466 *value = rdev->fastfb_working;
468 case RADEON_INFO_RING_WORKING:
469 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
470 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
474 case RADEON_CS_RING_GFX:
475 case RADEON_CS_RING_COMPUTE:
476 *value = rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready;
478 case RADEON_CS_RING_DMA:
479 *value = rdev->ring[R600_RING_TYPE_DMA_INDEX].ready;
480 *value |= rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready;
482 case RADEON_CS_RING_UVD:
483 *value = rdev->ring[R600_RING_TYPE_UVD_INDEX].ready;
485 case RADEON_CS_RING_VCE:
486 *value = rdev->ring[TN_RING_TYPE_VCE1_INDEX].ready;
492 case RADEON_INFO_SI_TILE_MODE_ARRAY:
493 if (rdev->family >= CHIP_BONAIRE) {
494 value = rdev->config.cik.tile_mode_array;
495 value_size = sizeof(uint32_t)*32;
496 } else if (rdev->family >= CHIP_TAHITI) {
497 value = rdev->config.si.tile_mode_array;
498 value_size = sizeof(uint32_t)*32;
500 DRM_DEBUG_KMS("tile mode array is si+ only!\n");
504 case RADEON_INFO_CIK_MACROTILE_MODE_ARRAY:
505 if (rdev->family >= CHIP_BONAIRE) {
506 value = rdev->config.cik.macrotile_mode_array;
507 value_size = sizeof(uint32_t)*16;
509 DRM_DEBUG_KMS("macrotile mode array is cik+ only!\n");
513 case RADEON_INFO_SI_CP_DMA_COMPUTE:
516 case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
517 if (rdev->family >= CHIP_BONAIRE) {
518 *value = rdev->config.cik.backend_enable_mask;
519 } else if (rdev->family >= CHIP_TAHITI) {
520 *value = rdev->config.si.backend_enable_mask;
522 DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
526 case RADEON_INFO_MAX_SCLK:
527 if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
528 rdev->pm.dpm_enabled)
529 *value = rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk * 10;
531 *value = rdev->pm.default_sclk * 10;
533 case RADEON_INFO_VCE_FW_VERSION:
534 *value = rdev->vce.fw_version;
536 case RADEON_INFO_VCE_FB_VERSION:
537 *value = rdev->vce.fb_version;
539 case RADEON_INFO_NUM_BYTES_MOVED:
540 value = (uint32_t *)&value64;
541 value_size = sizeof(uint64_t);
542 value64 = atomic64_read(&rdev->num_bytes_moved);
544 case RADEON_INFO_VRAM_USAGE:
545 value = (uint32_t *)&value64;
546 value_size = sizeof(uint64_t);
547 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
548 value64 = ttm_resource_manager_usage(man);
550 case RADEON_INFO_GTT_USAGE:
551 value = (uint32_t *)&value64;
552 value_size = sizeof(uint64_t);
553 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_TT);
554 value64 = ttm_resource_manager_usage(man);
556 case RADEON_INFO_ACTIVE_CU_COUNT:
557 if (rdev->family >= CHIP_BONAIRE)
558 *value = rdev->config.cik.active_cus;
559 else if (rdev->family >= CHIP_TAHITI)
560 *value = rdev->config.si.active_cus;
561 else if (rdev->family >= CHIP_CAYMAN)
562 *value = rdev->config.cayman.active_simds;
563 else if (rdev->family >= CHIP_CEDAR)
564 *value = rdev->config.evergreen.active_simds;
565 else if (rdev->family >= CHIP_RV770)
566 *value = rdev->config.rv770.active_simds;
567 else if (rdev->family >= CHIP_R600)
568 *value = rdev->config.r600.active_simds;
572 case RADEON_INFO_CURRENT_GPU_TEMP:
573 /* get temperature in millidegrees C */
574 if (rdev->asic->pm.get_temperature)
575 *value = radeon_get_temperature(rdev);
579 case RADEON_INFO_CURRENT_GPU_SCLK:
580 /* get sclk in Mhz */
581 if (rdev->pm.dpm_enabled)
582 *value = radeon_dpm_get_current_sclk(rdev) / 100;
584 *value = rdev->pm.current_sclk / 100;
586 case RADEON_INFO_CURRENT_GPU_MCLK:
587 /* get mclk in Mhz */
588 if (rdev->pm.dpm_enabled)
589 *value = radeon_dpm_get_current_mclk(rdev) / 100;
591 *value = rdev->pm.current_mclk / 100;
593 case RADEON_INFO_READ_REG:
594 if (copy_from_user(value, value_ptr, sizeof(uint32_t))) {
595 DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__);
598 if (radeon_get_allowed_info_register(rdev, *value, value))
601 case RADEON_INFO_VA_UNMAP_WORKING:
604 case RADEON_INFO_GPU_RESET_COUNTER:
605 *value = atomic_read(&rdev->gpu_reset_counter);
608 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
611 if (copy_to_user(value_ptr, (char *)value, value_size)) {
612 DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__);
619 * radeon_driver_open_kms - drm callback for open
621 * @dev: drm dev pointer
622 * @file_priv: drm file
624 * On device open, init vm on cayman+ (all asics).
625 * Returns 0 on success, error on failure.
627 int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
629 struct radeon_device *rdev = dev->dev_private;
630 struct radeon_fpriv *fpriv;
631 struct radeon_vm *vm;
634 file_priv->driver_priv = NULL;
636 r = pm_runtime_get_sync(dev->dev);
638 pm_runtime_put_autosuspend(dev->dev);
642 /* new gpu have virtual address space support */
643 if (rdev->family >= CHIP_CAYMAN) {
645 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
646 if (unlikely(!fpriv)) {
651 if (rdev->accel_working) {
653 r = radeon_vm_init(rdev, vm);
657 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
661 /* map the ib pool buffer read only into
662 * virtual address space */
663 vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
664 rdev->ring_tmp_bo.bo);
670 r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
672 RADEON_VM_PAGE_READABLE |
673 RADEON_VM_PAGE_SNOOPED);
677 file_priv->driver_priv = fpriv;
680 pm_runtime_mark_last_busy(dev->dev);
681 pm_runtime_put_autosuspend(dev->dev);
685 radeon_vm_fini(rdev, vm);
690 pm_runtime_mark_last_busy(dev->dev);
691 pm_runtime_put_autosuspend(dev->dev);
696 * radeon_driver_postclose_kms - drm callback for post close
698 * @dev: drm dev pointer
699 * @file_priv: drm file
701 * On device close, tear down hyperz and cmask filps on r1xx-r5xx
702 * (all asics). And tear down vm on cayman+ (all asics).
704 void radeon_driver_postclose_kms(struct drm_device *dev,
705 struct drm_file *file_priv)
707 struct radeon_device *rdev = dev->dev_private;
709 pm_runtime_get_sync(dev->dev);
711 mutex_lock(&rdev->gem.mutex);
712 if (rdev->hyperz_filp == file_priv)
713 rdev->hyperz_filp = NULL;
714 if (rdev->cmask_filp == file_priv)
715 rdev->cmask_filp = NULL;
716 mutex_unlock(&rdev->gem.mutex);
718 radeon_uvd_free_handles(rdev, file_priv);
719 radeon_vce_free_handles(rdev, file_priv);
721 /* new gpu have virtual address space support */
722 if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
723 struct radeon_fpriv *fpriv = file_priv->driver_priv;
724 struct radeon_vm *vm = &fpriv->vm;
727 if (rdev->accel_working) {
728 r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
731 radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
732 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
734 radeon_vm_fini(rdev, vm);
738 file_priv->driver_priv = NULL;
740 pm_runtime_mark_last_busy(dev->dev);
741 pm_runtime_put_autosuspend(dev->dev);
745 * VBlank related functions.
748 * radeon_get_vblank_counter_kms - get frame count
750 * @crtc: crtc to get the frame count from
752 * Gets the frame count on the requested crtc (all asics).
753 * Returns frame count on success, -EINVAL on failure.
755 u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
757 struct drm_device *dev = crtc->dev;
758 unsigned int pipe = crtc->index;
759 int vpos, hpos, stat;
761 struct radeon_device *rdev = dev->dev_private;
763 if (pipe >= rdev->num_crtc) {
764 DRM_ERROR("Invalid crtc %u\n", pipe);
768 /* The hw increments its frame counter at start of vsync, not at start
769 * of vblank, as is required by DRM core vblank counter handling.
770 * Cook the hw count here to make it appear to the caller as if it
771 * incremented at start of vblank. We measure distance to start of
772 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
773 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
774 * result by 1 to give the proper appearance to caller.
776 if (rdev->mode_info.crtcs[pipe]) {
777 /* Repeat readout if needed to provide stable result if
778 * we cross start of vsync during the queries.
781 count = radeon_get_vblank_counter(rdev, pipe);
782 /* Ask radeon_get_crtc_scanoutpos to return vpos as
783 * distance to start of vblank, instead of regular
784 * vertical scanout pos.
786 stat = radeon_get_crtc_scanoutpos(
787 dev, pipe, GET_DISTANCE_TO_VBLANKSTART,
788 &vpos, &hpos, NULL, NULL,
789 &rdev->mode_info.crtcs[pipe]->base.hwmode);
790 } while (count != radeon_get_vblank_counter(rdev, pipe));
792 if (((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE)) !=
793 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE))) {
794 DRM_DEBUG_VBL("Query failed! stat %d\n", stat);
797 DRM_DEBUG_VBL("crtc %u: dist from vblank start %d\n",
800 /* Bump counter if we are at >= leading edge of vblank,
801 * but before vsync where vpos would turn negative and
802 * the hw counter really increments.
809 /* Fallback to use value as is. */
810 count = radeon_get_vblank_counter(rdev, pipe);
811 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
818 * radeon_enable_vblank_kms - enable vblank interrupt
820 * @crtc: crtc to enable vblank interrupt for
822 * Enable the interrupt on the requested crtc (all asics).
823 * Returns 0 on success, -EINVAL on failure.
825 int radeon_enable_vblank_kms(struct drm_crtc *crtc)
827 struct drm_device *dev = crtc->dev;
828 unsigned int pipe = crtc->index;
829 struct radeon_device *rdev = dev->dev_private;
830 unsigned long irqflags;
833 if (pipe >= rdev->num_crtc) {
834 DRM_ERROR("Invalid crtc %d\n", pipe);
838 spin_lock_irqsave(&rdev->irq.lock, irqflags);
839 rdev->irq.crtc_vblank_int[pipe] = true;
840 r = radeon_irq_set(rdev);
841 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
846 * radeon_disable_vblank_kms - disable vblank interrupt
848 * @crtc: crtc to disable vblank interrupt for
850 * Disable the interrupt on the requested crtc (all asics).
852 void radeon_disable_vblank_kms(struct drm_crtc *crtc)
854 struct drm_device *dev = crtc->dev;
855 unsigned int pipe = crtc->index;
856 struct radeon_device *rdev = dev->dev_private;
857 unsigned long irqflags;
859 if (pipe >= rdev->num_crtc) {
860 DRM_ERROR("Invalid crtc %d\n", pipe);
864 spin_lock_irqsave(&rdev->irq.lock, irqflags);
865 rdev->irq.crtc_vblank_int[pipe] = false;
866 radeon_irq_set(rdev);
867 spin_unlock_irqrestore(&rdev->irq.lock, irqflags);