1 // SPDX-License-Identifier: GPL-2.0-only
2 /**************************************************************************
3 * Copyright (c) 2007, Intel Corporation.
6 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
9 **************************************************************************/
11 #include <drm/drm_drv.h>
12 #include <drm/drm_vblank.h>
16 #include "psb_intel_reg.h"
24 static inline u32 gma_pipestat(int pipe)
35 static inline u32 gma_pipeconf(int pipe)
46 void gma_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
48 if ((dev_priv->pipestat[pipe] & mask) != mask) {
49 u32 reg = gma_pipestat(pipe);
50 dev_priv->pipestat[pipe] |= mask;
51 /* Enable the interrupt, clear any pending status */
52 if (gma_power_begin(&dev_priv->dev, false)) {
53 u32 writeVal = PSB_RVDC32(reg);
54 writeVal |= (mask | (mask >> 16));
55 PSB_WVDC32(writeVal, reg);
56 (void) PSB_RVDC32(reg);
57 gma_power_end(&dev_priv->dev);
62 void gma_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
64 if ((dev_priv->pipestat[pipe] & mask) != 0) {
65 u32 reg = gma_pipestat(pipe);
66 dev_priv->pipestat[pipe] &= ~mask;
67 if (gma_power_begin(&dev_priv->dev, false)) {
68 u32 writeVal = PSB_RVDC32(reg);
70 PSB_WVDC32(writeVal, reg);
71 (void) PSB_RVDC32(reg);
72 gma_power_end(&dev_priv->dev);
78 * Display controller interrupt handler for pipe event.
80 static void gma_pipe_event_handler(struct drm_device *dev, int pipe)
82 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
84 uint32_t pipe_stat_val = 0;
85 uint32_t pipe_stat_reg = gma_pipestat(pipe);
86 uint32_t pipe_enable = dev_priv->pipestat[pipe];
87 uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
91 spin_lock(&dev_priv->irqmask_lock);
93 pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
94 pipe_stat_val &= pipe_enable | pipe_status;
95 pipe_stat_val &= pipe_stat_val >> 16;
97 spin_unlock(&dev_priv->irqmask_lock);
99 /* Clear the 2nd level interrupt status bits
100 * Sometimes the bits are very sticky so we repeat until they unstick */
101 for (i = 0; i < 0xffff; i++) {
102 PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
103 pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
111 "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
112 __func__, pipe, PSB_RVDC32(pipe_stat_reg));
114 if (pipe_stat_val & PIPE_VBLANK_STATUS) {
115 struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
116 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
119 drm_handle_vblank(dev, pipe);
121 spin_lock_irqsave(&dev->event_lock, flags);
122 if (gma_crtc->page_flip_event) {
123 drm_crtc_send_vblank_event(crtc,
124 gma_crtc->page_flip_event);
125 gma_crtc->page_flip_event = NULL;
126 drm_crtc_vblank_put(crtc);
128 spin_unlock_irqrestore(&dev->event_lock, flags);
133 * Display controller interrupt handler.
135 static void gma_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
137 if (vdc_stat & _PSB_IRQ_ASLE)
138 psb_intel_opregion_asle_intr(dev);
140 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
141 gma_pipe_event_handler(dev, 0);
143 if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
144 gma_pipe_event_handler(dev, 1);
148 * SGX interrupt handler
150 static void gma_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
152 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
155 if (stat_1 & _PSB_CE_TWOD_COMPLETE)
156 val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
158 if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
159 val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
160 addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
162 if (val & _PSB_CBI_STAT_PF_N_RW)
163 DRM_ERROR("SGX MMU page fault:");
165 DRM_ERROR("SGX MMU read / write protection fault:");
167 if (val & _PSB_CBI_STAT_FAULT_CACHE)
168 DRM_ERROR("\tCache requestor");
169 if (val & _PSB_CBI_STAT_FAULT_TA)
170 DRM_ERROR("\tTA requestor");
171 if (val & _PSB_CBI_STAT_FAULT_VDM)
172 DRM_ERROR("\tVDM requestor");
173 if (val & _PSB_CBI_STAT_FAULT_2D)
174 DRM_ERROR("\t2D requestor");
175 if (val & _PSB_CBI_STAT_FAULT_PBE)
176 DRM_ERROR("\tPBE requestor");
177 if (val & _PSB_CBI_STAT_FAULT_TSP)
178 DRM_ERROR("\tTSP requestor");
179 if (val & _PSB_CBI_STAT_FAULT_ISP)
180 DRM_ERROR("\tISP requestor");
181 if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
182 DRM_ERROR("\tUSSEPDS requestor");
183 if (val & _PSB_CBI_STAT_FAULT_HOST)
184 DRM_ERROR("\tHost requestor");
186 DRM_ERROR("\tMMU failing address is 0x%08x.\n",
192 PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
193 PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
194 PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
197 static irqreturn_t gma_irq_handler(int irq, void *arg)
199 struct drm_device *dev = arg;
200 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
201 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
202 u32 sgx_stat_1, sgx_stat_2;
205 spin_lock(&dev_priv->irqmask_lock);
207 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
209 if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
212 if (vdc_stat & _PSB_IRQ_SGX_FLAG)
214 if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
217 vdc_stat &= dev_priv->vdc_irq_mask;
218 spin_unlock(&dev_priv->irqmask_lock);
221 gma_vdc_interrupt(dev, vdc_stat);
226 sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
227 sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
228 gma_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
232 /* Note: this bit has other meanings on some devices, so we will
233 need to address that later if it ever matters */
234 if (hotplug_int && dev_priv->ops->hotplug) {
235 handled = dev_priv->ops->hotplug(dev);
236 REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
239 PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
240 (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
249 void gma_irq_preinstall(struct drm_device *dev)
251 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
252 unsigned long irqflags;
254 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
256 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
257 PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
258 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
259 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
260 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
262 if (dev->vblank[0].enabled)
263 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
264 if (dev->vblank[1].enabled)
265 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
267 /* Revisit this area - want per device masks ? */
268 if (dev_priv->ops->hotplug)
269 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
270 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
272 /* This register is safe even if display island is off */
273 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
274 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
277 void gma_irq_postinstall(struct drm_device *dev)
279 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
280 unsigned long irqflags;
283 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
285 /* Enable 2D and MMU fault interrupts */
286 PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
287 PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
288 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
290 /* This register is safe even if display island is off */
291 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
292 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
294 for (i = 0; i < dev->num_crtcs; ++i) {
295 if (dev->vblank[i].enabled)
296 gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
298 gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
301 if (dev_priv->ops->hotplug_enable)
302 dev_priv->ops->hotplug_enable(dev, true);
304 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
307 int gma_irq_install(struct drm_device *dev)
309 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
310 struct pci_dev *pdev = to_pci_dev(dev->dev);
313 if (dev_priv->use_msi && pci_enable_msi(pdev)) {
314 dev_warn(dev->dev, "Enabling MSI failed!\n");
315 dev_priv->use_msi = false;
318 if (pdev->irq == IRQ_NOTCONNECTED)
321 gma_irq_preinstall(dev);
323 /* PCI devices require shared interrupts. */
324 ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
328 gma_irq_postinstall(dev);
333 void gma_irq_uninstall(struct drm_device *dev)
335 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
336 struct pci_dev *pdev = to_pci_dev(dev->dev);
337 unsigned long irqflags;
340 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
342 if (dev_priv->ops->hotplug_enable)
343 dev_priv->ops->hotplug_enable(dev, false);
345 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
347 for (i = 0; i < dev->num_crtcs; ++i) {
348 if (dev->vblank[i].enabled)
349 gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
352 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
353 _PSB_IRQ_MSVDX_FLAG |
356 /* These two registers are safe even if display island is off */
357 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
358 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
362 /* This register is safe even if display island is off */
363 PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
364 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
366 free_irq(pdev->irq, dev);
367 if (dev_priv->use_msi)
368 pci_disable_msi(pdev);
371 int gma_crtc_enable_vblank(struct drm_crtc *crtc)
373 struct drm_device *dev = crtc->dev;
374 unsigned int pipe = crtc->index;
375 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
376 unsigned long irqflags;
377 uint32_t reg_val = 0;
378 uint32_t pipeconf_reg = gma_pipeconf(pipe);
380 if (gma_power_begin(dev, false)) {
381 reg_val = REG_READ(pipeconf_reg);
385 if (!(reg_val & PIPEACONF_ENABLE))
388 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
391 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
393 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
395 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
396 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
397 gma_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
399 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
404 void gma_crtc_disable_vblank(struct drm_crtc *crtc)
406 struct drm_device *dev = crtc->dev;
407 unsigned int pipe = crtc->index;
408 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
409 unsigned long irqflags;
411 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
414 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
416 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
418 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
419 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
420 gma_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
422 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
425 /* Called from drm generic code, passed a 'crtc', which
426 * we use as a pipe index
428 u32 gma_crtc_get_vblank_counter(struct drm_crtc *crtc)
430 struct drm_device *dev = crtc->dev;
431 unsigned int pipe = crtc->index;
432 uint32_t high_frame = PIPEAFRAMEHIGH;
433 uint32_t low_frame = PIPEAFRAMEPIXEL;
434 uint32_t pipeconf_reg = PIPEACONF;
435 uint32_t reg_val = 0;
436 uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
442 high_frame = PIPEBFRAMEHIGH;
443 low_frame = PIPEBFRAMEPIXEL;
444 pipeconf_reg = PIPEBCONF;
447 high_frame = PIPECFRAMEHIGH;
448 low_frame = PIPECFRAMEPIXEL;
449 pipeconf_reg = PIPECCONF;
452 dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
456 if (!gma_power_begin(dev, false))
459 reg_val = REG_READ(pipeconf_reg);
461 if (!(reg_val & PIPEACONF_ENABLE)) {
462 dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
464 goto err_gma_power_end;
468 * High & low register fields aren't synchronized, so make sure
469 * we get a low value that's stable across two reads of the high
473 high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
474 PIPE_FRAME_HIGH_SHIFT);
475 low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
476 PIPE_FRAME_LOW_SHIFT);
477 high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
478 PIPE_FRAME_HIGH_SHIFT);
479 } while (high1 != high2);
481 count = (high1 << 8) | low;