<title>Device Registration</title>
<para>
A number of functions are provided to help with device registration.
- The functions deal with PCI, USB and platform devices, respectively.
+ The functions deal with PCI and platform devices, respectively.
</para>
!Edrivers/gpu/drm/drm_pci.c
- !Edrivers/gpu/drm/drm_usb.c
!Edrivers/gpu/drm/drm_platform.c
<para>
New drivers that no longer rely on the services provided by the
<function>drm_dev_unregister()</function> followed by a call to
<function>drm_dev_unref()</function>.
</para>
- !Edrivers/gpu/drm/drm_stub.c
+ !Edrivers/gpu/drm/drm_drv.c
</sect2>
<sect2>
<title>Driver Load</title>
by scheduling a timer. The delay is accessible through the vblankoffdelay
module parameter or the <varname>drm_vblank_offdelay</varname> global
variable and expressed in milliseconds. Its default value is 5000 ms.
+ Zero means never disable, and a negative value means disable immediately.
+ Drivers may override the behaviour by setting the
+ <structname>drm_device</structname>
+ <structfield>vblank_disable_immediate</structfield> flag, which when set
+ causes vblank interrupts to be disabled immediately regardless of the
+ drm_vblank_offdelay value. The flag should only be set if there's a
+ properly working hardware vblank counter present.
</para>
<para>
When a vertical blanking interrupt occurs drivers only need to call the
<sect2>
<title>Vertical Blanking and Interrupt Handling Functions Reference</title>
!Edrivers/gpu/drm/drm_irq.c
+ !Iinclude/drm/drmP.h drm_crtc_vblank_waitqueue
</sect2>
</sect1>
!Pdrivers/gpu/drm/i915/i915_cmd_parser.c batch buffer command parser
!Idrivers/gpu/drm/i915/i915_cmd_parser.c
</sect2>
+ <sect2>
+ <title>Logical Rings, Logical Ring Contexts and Execlists</title>
+ !Pdrivers/gpu/drm/i915/intel_lrc.c Logical Rings, Logical Ring Contexts and Execlists
+ !Idrivers/gpu/drm/i915/intel_lrc.c
+ </sect2>
</sect1>
</chapter>
</part>
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
-unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
+int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
-MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
+MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs] (0: never disable, <0: disable immediately)");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
static void drm_master_destroy(struct kref *kref)
{
struct drm_master *master = container_of(kref, struct drm_master, refcount);
- struct drm_magic_entry *pt, *next;
struct drm_device *dev = master->minor->dev;
struct drm_map_list *r_list, *list_temp;
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) {
if (r_list->master == master) {
- drm_rmmap_locked(dev, r_list->map);
+ drm_legacy_rmmap_locked(dev, r_list->map);
r_list = NULL;
}
}
master->unique_len = 0;
}
- list_for_each_entry_safe(pt, next, &master->magicfree, head) {
- list_del(&pt->head);
- drm_ht_remove_item(&master->magiclist, &pt->hash_item);
- kfree(pt);
- }
-
drm_ht_remove(&master->magiclist);
mutex_unlock(&dev->struct_mutex);
int ret = 0;
mutex_lock(&dev->master_mutex);
- if (drm_is_master(file_priv))
+ if (file_priv->is_master)
goto out_unlock;
if (file_priv->minor->master) {
}
file_priv->minor->master = drm_master_get(file_priv->master);
+ file_priv->is_master = 1;
if (dev->driver->master_set) {
ret = dev->driver->master_set(dev, file_priv, false);
- if (unlikely(ret != 0))
+ if (unlikely(ret != 0)) {
+ file_priv->is_master = 0;
drm_master_put(&file_priv->minor->master);
+ }
}
out_unlock:
int ret = -EINVAL;
mutex_lock(&dev->master_mutex);
- if (!drm_is_master(file_priv))
+ if (!file_priv->is_master)
goto out_unlock;
if (!file_priv->minor->master)
if (dev->driver->master_drop)
dev->driver->master_drop(dev, file_priv, false);
drm_master_put(&file_priv->minor->master);
+ file_priv->is_master = 0;
out_unlock:
mutex_unlock(&dev->master_mutex);
drm_vblank_cleanup(dev);
list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_rmmap(dev, r_list->map);
+ drm_legacy_rmmap(dev, r_list->map);
drm_minor_unregister(dev, DRM_MINOR_LEGACY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
*/
#define DRM_REDUNDANT_VBLIRQ_THRESH_NS 1000000
-/*
- * Clear vblank timestamp buffer for a crtc.
+static bool
+drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags);
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc). Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
*/
-static void clear_vblank_timestamps(struct drm_device *dev, int crtc)
+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
- memset(dev->vblank[crtc].time, 0, sizeof(dev->vblank[crtc].time));
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+ u32 cur_vblank, diff, tslot;
+ bool rc;
+ struct timeval t_vblank;
+
+ /*
+ * Interrupts were disabled prior to this call, so deal with counter
+ * wrap if needed.
+ * NOTE! It's possible we lost a full dev->max_vblank_count events
+ * here if the register is small or we had vblank interrupts off for
+ * a long time.
+ *
+ * We repeat the hardware vblank counter & timestamp query until
+ * we get consistent results. This to prevent races between gpu
+ * updating its hardware counter while we are retrieving the
+ * corresponding vblank timestamp.
+ */
+ do {
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
+ } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
+
+ /* Deal with counter wrap */
+ diff = cur_vblank - vblank->last;
+ if (cur_vblank < vblank->last) {
+ diff += dev->max_vblank_count;
+
+ DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+ crtc, vblank->last, cur_vblank, diff);
+ }
+
+ DRM_DEBUG("updating vblank count on crtc %d, missed %d\n",
+ crtc, diff);
+
+ /* Reinitialize corresponding vblank timestamp if high-precision query
+ * available. Skip this step if query unsupported or failed. Will
+ * reinitialize delayed at next vblank interrupt in that case.
+ */
+ if (rc) {
+ tslot = atomic_read(&vblank->count) + diff;
+ vblanktimestamp(dev, crtc, tslot) = t_vblank;
+ }
+
+ smp_mb__before_atomic();
+ atomic_add(diff, &vblank->count);
+ smp_mb__after_atomic();
}
/*
*/
static void vblank_disable_and_save(struct drm_device *dev, int crtc)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
u32 vblcount;
s64 diff_ns;
- int vblrc;
+ bool vblrc;
struct timeval tvblank;
int count = DRM_TIMESTAMP_MAXRETRIES;
*/
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+ /*
+ * If the vblank interrupt was already disbled update the count
+ * and timestamp to maintain the appearance that the counter
+ * has been ticking all along until this time. This makes the
+ * count account for the entire time between drm_vblank_on() and
+ * drm_vblank_off().
+ *
+ * But only do this if precise vblank timestamps are available.
+ * Otherwise we might read a totally bogus timestamp since drivers
+ * lacking precise timestamp support rely upon sampling the system clock
+ * at vblank interrupt time. Which obviously won't work out well if the
+ * vblank interrupt is disabled.
+ */
+ if (!vblank->enabled &&
+ drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0)) {
+ drm_update_vblank_count(dev, crtc);
+ spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ return;
+ }
+
dev->driver->disable_vblank(dev, crtc);
- dev->vblank[crtc].enabled = false;
+ vblank->enabled = false;
/* No further vblank irq's will be processed after
* this point. Get current hardware vblank count and
* delayed gpu counter increment.
*/
do {
- dev->vblank[crtc].last = dev->driver->get_vblank_counter(dev, crtc);
+ vblank->last = dev->driver->get_vblank_counter(dev, crtc);
vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
- } while (dev->vblank[crtc].last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
+ } while (vblank->last != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);
if (!count)
vblrc = 0;
/* Compute time difference to stored timestamp of last vblank
* as updated by last invocation of drm_handle_vblank() in vblank irq.
*/
- vblcount = atomic_read(&dev->vblank[crtc].count);
+ vblcount = atomic_read(&vblank->count);
diff_ns = timeval_to_ns(&tvblank) -
timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));
* available. In that case we can't account for this and just
* hope for the best.
*/
- if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
- atomic_inc(&dev->vblank[crtc].count);
+ if (vblrc && (abs64(diff_ns) > 1000000)) {
+ /* Store new timestamp in ringbuffer. */
+ vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
+
+ /* Increment cooked vblank count. This also atomically commits
+ * the timestamp computed above.
+ */
+ smp_mb__before_atomic();
+ atomic_inc(&vblank->count);
smp_mb__after_atomic();
}
- /* Invalidate all timestamps while vblank irq's are off. */
- clear_vblank_timestamps(dev, crtc);
-
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
void drm_vblank_cleanup(struct drm_device *dev)
{
int crtc;
+ unsigned long irqflags;
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
- del_timer_sync(&dev->vblank[crtc].disable_timer);
- vblank_disable_fn((unsigned long)&dev->vblank[crtc]);
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+
+ del_timer_sync(&vblank->disable_timer);
+
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ vblank_disable_and_save(dev, crtc);
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
kfree(dev->vblank);
goto err;
for (i = 0; i < num_crtcs; i++) {
- dev->vblank[i].dev = dev;
- dev->vblank[i].crtc = i;
- init_waitqueue_head(&dev->vblank[i].queue);
- setup_timer(&dev->vblank[i].disable_timer, vblank_disable_fn,
- (unsigned long)&dev->vblank[i]);
+ struct drm_vblank_crtc *vblank = &dev->vblank[i];
+
+ vblank->dev = dev;
+ vblank->crtc = i;
+ init_waitqueue_head(&vblank->queue);
+ setup_timer(&vblank->disable_timer, vblank_disable_fn,
+ (unsigned long)vblank);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
return 0;
err:
- drm_vblank_cleanup(dev);
+ dev->num_crtcs = 0;
return ret;
}
EXPORT_SYMBOL(drm_vblank_init);
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
- wake_up(&dev->vblank[i].queue);
- dev->vblank[i].enabled = false;
- dev->vblank[i].last =
+ struct drm_vblank_crtc *vblank = &dev->vblank[i];
+
+ wake_up(&vblank->queue);
+ vblank->enabled = false;
+ vblank->last =
dev->driver->get_vblank_counter(dev, i);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
const struct drm_crtc *refcrtc,
const struct drm_display_mode *mode)
{
- ktime_t stime, etime, mono_time_offset;
struct timeval tv_etime;
+ ktime_t stime, etime;
int vbl_status;
int vpos, hpos, i;
int framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
vbl_status = dev->driver->get_scanout_position(dev, crtc, flags, &vpos,
&hpos, &stime, &etime);
- /*
- * Get correction for CLOCK_MONOTONIC -> CLOCK_REALTIME if
- * CLOCK_REALTIME is requested.
- */
- if (!drm_timestamp_monotonic)
- mono_time_offset = ktime_get_monotonic_offset();
-
/* Return as no-op if scanout query unsupported or failed. */
if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
* within vblank area, counting down the number of lines until
* start of scanout.
*/
- invbl = vbl_status & DRM_SCANOUTPOS_INVBL;
+ invbl = vbl_status & DRM_SCANOUTPOS_IN_VBLANK;
/* Convert scanout position into elapsed time at raw_time query
* since start of scanout at first display scanline. delta_ns
delta_ns = vpos * linedur_ns + hpos * pixeldur_ns;
if (!drm_timestamp_monotonic)
- etime = ktime_sub(etime, mono_time_offset);
+ etime = ktime_mono_to_real(etime);
/* save this only for debugging purposes */
tv_etime = ktime_to_timeval(etime);
vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
if (invbl)
- vbl_status |= DRM_VBLANKTIME_INVBL;
+ vbl_status |= DRM_VBLANKTIME_IN_VBLANK;
return vbl_status;
}
{
ktime_t now;
- now = ktime_get();
- if (!drm_timestamp_monotonic)
- now = ktime_sub(now, ktime_get_monotonic_offset());
-
+ now = drm_timestamp_monotonic ? ktime_get() : ktime_get_real();
return ktime_to_timeval(now);
}
* call, i.e., it isn't very precisely locked to the true vblank.
*
* Returns:
- * Non-zero if timestamp is considered to be very precise, zero otherwise.
+ * True if timestamp is considered to be very precise, false otherwise.
*/
-u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
- struct timeval *tvblank, unsigned flags)
+static bool
+drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags)
{
int ret;
ret = dev->driver->get_vblank_timestamp(dev, crtc, &max_error,
tvblank, flags);
if (ret > 0)
- return (u32) ret;
+ return true;
}
/* GPU high precision timestamp query unsupported or failed.
*/
*tvblank = get_drm_timestamp();
- return 0;
+ return false;
}
-EXPORT_SYMBOL(drm_get_last_vbltimestamp);
/**
* drm_vblank_count - retrieve "cooked" vblank counter value
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
- return atomic_read(&dev->vblank[crtc].count);
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return 0;
+ return atomic_read(&vblank->count);
}
EXPORT_SYMBOL(drm_vblank_count);
u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
u32 cur_vblank;
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return 0;
+
/* Read timestamp from slot of _vblank_time ringbuffer
* that corresponds to current vblank count. Retry if
* count has incremented during readout. This works like
* a seqlock.
*/
do {
- cur_vblank = atomic_read(&dev->vblank[crtc].count);
+ cur_vblank = atomic_read(&vblank->count);
*vblanktime = vblanktimestamp(dev, crtc, cur_vblank);
smp_rmb();
- } while (cur_vblank != atomic_read(&dev->vblank[crtc].count));
+ } while (cur_vblank != atomic_read(&vblank->count));
return cur_vblank;
}
}
EXPORT_SYMBOL(drm_send_vblank_event);
-/**
- * drm_update_vblank_count - update the master vblank counter
- * @dev: DRM device
- * @crtc: counter to update
- *
- * Call back into the driver to update the appropriate vblank counter
- * (specified by @crtc). Deal with wraparound, if it occurred, and
- * update the last read value so we can deal with wraparound on the next
- * call if necessary.
- *
- * Only necessary when going from off->on, to account for frames we
- * didn't get an interrupt for.
- *
- * Note: caller must hold dev->vbl_lock since this reads & writes
- * device vblank fields.
- */
-static void drm_update_vblank_count(struct drm_device *dev, int crtc)
-{
- u32 cur_vblank, diff, tslot, rc;
- struct timeval t_vblank;
-
- /*
- * Interrupts were disabled prior to this call, so deal with counter
- * wrap if needed.
- * NOTE! It's possible we lost a full dev->max_vblank_count events
- * here if the register is small or we had vblank interrupts off for
- * a long time.
- *
- * We repeat the hardware vblank counter & timestamp query until
- * we get consistent results. This to prevent races between gpu
- * updating its hardware counter while we are retrieving the
- * corresponding vblank timestamp.
- */
- do {
- cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
- rc = drm_get_last_vbltimestamp(dev, crtc, &t_vblank, 0);
- } while (cur_vblank != dev->driver->get_vblank_counter(dev, crtc));
-
- /* Deal with counter wrap */
- diff = cur_vblank - dev->vblank[crtc].last;
- if (cur_vblank < dev->vblank[crtc].last) {
- diff += dev->max_vblank_count;
-
- DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
- crtc, dev->vblank[crtc].last, cur_vblank, diff);
- }
-
- DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
- crtc, diff);
-
- /* Reinitialize corresponding vblank timestamp if high-precision query
- * available. Skip this step if query unsupported or failed. Will
- * reinitialize delayed at next vblank interrupt in that case.
- */
- if (rc) {
- tslot = atomic_read(&dev->vblank[crtc].count) + diff;
- vblanktimestamp(dev, crtc, tslot) = t_vblank;
- }
-
- smp_mb__before_atomic();
- atomic_add(diff, &dev->vblank[crtc].count);
- smp_mb__after_atomic();
-}
-
/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
*/
static int drm_vblank_enable(struct drm_device *dev, int crtc)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
int ret = 0;
assert_spin_locked(&dev->vbl_lock);
spin_lock(&dev->vblank_time_lock);
- if (!dev->vblank[crtc].enabled) {
+ if (!vblank->enabled) {
/*
* Enable vblank irqs under vblank_time_lock protection.
* All vblank count & timestamp updates are held off
ret = dev->driver->enable_vblank(dev, crtc);
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
if (ret)
- atomic_dec(&dev->vblank[crtc].refcount);
+ atomic_dec(&vblank->refcount);
else {
- dev->vblank[crtc].enabled = true;
+ vblank->enabled = true;
drm_update_vblank_count(dev, crtc);
}
}
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
int ret = 0;
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return -EINVAL;
+
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
- if (atomic_add_return(1, &dev->vblank[crtc].refcount) == 1) {
+ if (atomic_add_return(1, &vblank->refcount) == 1) {
ret = drm_vblank_enable(dev, crtc);
} else {
- if (!dev->vblank[crtc].enabled) {
- atomic_dec(&dev->vblank[crtc].refcount);
+ if (!vblank->enabled) {
+ atomic_dec(&vblank->refcount);
ret = -EINVAL;
}
}
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
- BUG_ON(atomic_read(&dev->vblank[crtc].refcount) == 0);
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+
+ BUG_ON(atomic_read(&vblank->refcount) == 0);
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return;
+
/* Last user schedules interrupt disable */
- if (atomic_dec_and_test(&dev->vblank[crtc].refcount) &&
- (drm_vblank_offdelay > 0))
- mod_timer(&dev->vblank[crtc].disable_timer,
- jiffies + ((drm_vblank_offdelay * HZ)/1000));
+ if (atomic_dec_and_test(&vblank->refcount)) {
+ if (drm_vblank_offdelay == 0)
+ return;
+ else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0)
+ vblank_disable_fn((unsigned long)vblank);
+ else
+ mod_timer(&vblank->disable_timer,
+ jiffies + ((drm_vblank_offdelay * HZ)/1000));
+ }
}
EXPORT_SYMBOL(drm_vblank_put);
}
EXPORT_SYMBOL(drm_crtc_vblank_put);
+ /**
+ * drm_wait_one_vblank - wait for one vblank
+ * @dev: DRM device
+ * @crtc: crtc index
+ *
+ * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
+ * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
+ * due to lack of driver support or because the crtc is off.
+ */
+ void drm_wait_one_vblank(struct drm_device *dev, int crtc)
+ {
+ int ret;
+ u32 last;
+
+ ret = drm_vblank_get(dev, crtc);
+ if (WARN_ON(ret))
+ return;
+
+ last = drm_vblank_count(dev, crtc);
+
+ ret = wait_event_timeout(dev->vblank[crtc].queue,
+ last != drm_vblank_count(dev, crtc),
+ msecs_to_jiffies(100));
+
+ WARN_ON(ret == 0);
+
+ drm_vblank_put(dev, crtc);
+ }
+ EXPORT_SYMBOL(drm_wait_one_vblank);
+
+ /**
+ * drm_crtc_wait_one_vblank - wait for one vblank
+ * @crtc: DRM crtc
+ *
+ * This waits for one vblank to pass on @crtc, using the irq driver interfaces.
+ * It is a failure to call this when the vblank irq for @crtc is disabled, e.g.
+ * due to lack of driver support or because the crtc is off.
+ */
+ void drm_crtc_wait_one_vblank(struct drm_crtc *crtc)
+ {
+ drm_wait_one_vblank(crtc->dev, drm_crtc_index(crtc));
+ }
+ EXPORT_SYMBOL(drm_crtc_wait_one_vblank);
+
/**
* drm_vblank_off - disable vblank events on a CRTC
* @dev: DRM device
*/
void drm_vblank_off(struct drm_device *dev, int crtc)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long irqflags;
unsigned int seq;
- spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+
+ spin_lock(&dev->vbl_lock);
vblank_disable_and_save(dev, crtc);
- wake_up(&dev->vblank[crtc].queue);
+ wake_up(&vblank->queue);
+
+ /*
+ * Prevent subsequent drm_vblank_get() from re-enabling
+ * the vblank interrupt by bumping the refcount.
+ */
+ if (!vblank->inmodeset) {
+ atomic_inc(&vblank->refcount);
+ vblank->inmodeset = 1;
+ }
+ spin_unlock(&dev->vbl_lock);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
- spin_lock(&dev->event_lock);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
drm_vblank_put(dev, e->pipe);
send_vblank_event(dev, e, seq, &now);
}
- spin_unlock(&dev->event_lock);
-
- spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);
*/
void drm_vblank_on(struct drm_device *dev, int crtc)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return;
+
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- /* re-enable interrupts if there's are users left */
- if (atomic_read(&dev->vblank[crtc].refcount) != 0)
+ /* Drop our private "prevent drm_vblank_get" refcount */
+ if (vblank->inmodeset) {
+ atomic_dec(&vblank->refcount);
+ vblank->inmodeset = 0;
+ }
+
+ /*
+ * sample the current counter to avoid random jumps
+ * when drm_vblank_enable() applies the diff
+ *
+ * -1 to make sure user will never see the same
+ * vblank counter value before and after a modeset
+ */
+ vblank->last =
+ (dev->driver->get_vblank_counter(dev, crtc) - 1) &
+ dev->max_vblank_count;
+ /*
+ * re-enable interrupts if there are users left, or the
+ * user wishes vblank interrupts to be enabled all the time.
+ */
+ if (atomic_read(&vblank->refcount) != 0 ||
+ (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
WARN_ON(drm_vblank_enable(dev, crtc));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
+
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
+
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return;
+
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
- if (!dev->vblank[crtc].inmodeset) {
- dev->vblank[crtc].inmodeset = 0x1;
+ if (!vblank->inmodeset) {
+ vblank->inmodeset = 0x1;
if (drm_vblank_get(dev, crtc) == 0)
- dev->vblank[crtc].inmodeset |= 0x2;
+ vblank->inmodeset |= 0x2;
}
}
EXPORT_SYMBOL(drm_vblank_pre_modeset);
*/
void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
unsigned long irqflags;
/* vblank is not initialized (IRQ not installed ?), or has been freed */
if (!dev->num_crtcs)
return;
- if (dev->vblank[crtc].inmodeset) {
+ if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- if (dev->vblank[crtc].inmodeset & 0x2)
+ if (vblank->inmodeset & 0x2)
drm_vblank_put(dev, crtc);
- dev->vblank[crtc].inmodeset = 0;
+ vblank->inmodeset = 0;
}
}
EXPORT_SYMBOL(drm_vblank_post_modeset);
union drm_wait_vblank *vblwait,
struct drm_file *file_priv)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct drm_pending_vblank_event *e;
struct timeval now;
unsigned long flags;
spin_lock_irqsave(&dev->event_lock, flags);
+ /*
+ * drm_vblank_off() might have been called after we called
+ * drm_vblank_get(). drm_vblank_off() holds event_lock
+ * around the vblank disable, so no need for further locking.
+ * The reference from drm_vblank_get() protects against
+ * vblank disable from another source.
+ */
+ if (!vblank->enabled) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
if (file_priv->event_space < sizeof e->event) {
ret = -EBUSY;
goto err_unlock;
int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_vblank_crtc *vblank;
union drm_wait_vblank *vblwait = data;
int ret;
unsigned int flags, seq, crtc, high_crtc;
if (crtc >= dev->num_crtcs)
return -EINVAL;
+ vblank = &dev->vblank[crtc];
+
ret = drm_vblank_get(dev, crtc);
if (ret) {
DRM_DEBUG("failed to acquire vblank counter, %d\n", ret);
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
- dev->vblank[crtc].last_wait = vblwait->request.sequence;
- DRM_WAIT_ON(ret, dev->vblank[crtc].queue, 3 * HZ,
+ vblank->last_wait = vblwait->request.sequence;
+ DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
(((drm_vblank_count(dev, crtc) -
vblwait->request.sequence) <= (1 << 23)) ||
- !dev->vblank[crtc].enabled ||
+ !vblank->enabled ||
!dev->irq_enabled));
if (ret != -EINTR) {
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
- unsigned long flags;
unsigned int seq;
- seq = drm_vblank_count_and_time(dev, crtc, &now);
+ assert_spin_locked(&dev->event_lock);
- spin_lock_irqsave(&dev->event_lock, flags);
+ seq = drm_vblank_count_and_time(dev, crtc, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
send_vblank_event(dev, e, seq, &now);
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
trace_drm_vblank_event(crtc, seq);
}
*/
bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
+ struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
u32 vblcount;
s64 diff_ns;
struct timeval tvblank;
if (!dev->num_crtcs)
return false;
+ if (WARN_ON(crtc >= dev->num_crtcs))
+ return false;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+
/* Need timestamp lock to prevent concurrent execution with
* vblank enable/disable, as this would cause inconsistent
* or corrupted timestamps and vblank counts.
*/
- spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
+ spin_lock(&dev->vblank_time_lock);
/* Vblank irq handling disabled. Nothing to do. */
- if (!dev->vblank[crtc].enabled) {
- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ if (!vblank->enabled) {
+ spin_unlock(&dev->vblank_time_lock);
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
return false;
}
*/
/* Get current timestamp and count. */
- vblcount = atomic_read(&dev->vblank[crtc].count);
+ vblcount = atomic_read(&vblank->count);
drm_get_last_vbltimestamp(dev, crtc, &tvblank, DRM_CALLED_FROM_VBLIRQ);
/* Compute time difference to timestamp of last vblank */
* the timestamp computed above.
*/
smp_mb__before_atomic();
- atomic_inc(&dev->vblank[crtc].count);
+ atomic_inc(&vblank->count);
smp_mb__after_atomic();
} else {
DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
crtc, (int) diff_ns);
}
- wake_up(&dev->vblank[crtc].queue);
+ spin_unlock(&dev->vblank_time_lock);
+
+ wake_up(&vblank->queue);
drm_handle_vblank_events(dev, crtc);
- spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
{
assert_spin_locked(&dev_priv->irq_lock);
- if (!intel_irqs_enabled(dev_priv))
+ if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
if ((dev_priv->irq_mask & mask) != mask) {
/* In vblank? */
if (in_vbl)
- ret |= DRM_SCANOUTPOS_INVBL;
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
return ret;
}
* some connectors */
if (hpd_disabled) {
drm_kms_helper_poll_enable(dev);
- mod_timer(&dev_priv->hotplug_reenable_timer,
- jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
+ mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
+ msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
drm_kms_helper_hotplug_event(dev);
}
- static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
- {
- del_timer_sync(&dev_priv->hotplug_reenable_timer);
- }
-
static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
* @dev_priv: DRM device private
*
*/
- static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
+ static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
{
u32 residency_C0_up = 0, residency_C0_down = 0;
- u8 new_delay, adj;
+ int new_delay, adj;
dev_priv->rps.ei_interrupt_count++;
struct drm_i915_private *dev_priv,
u32 master_ctl)
{
+ struct intel_engine_cs *ring;
u32 rcs, bcs, vcs;
uint32_t tmp = 0;
irqreturn_t ret = IRQ_NONE;
if (tmp) {
I915_WRITE(GEN8_GT_IIR(0), tmp);
ret = IRQ_HANDLED;
+
rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
- bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+ ring = &dev_priv->ring[RCS];
if (rcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[RCS]);
+ notify_ring(dev, ring);
+ if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
+
+ bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
+ ring = &dev_priv->ring[BCS];
if (bcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[BCS]);
+ notify_ring(dev, ring);
+ if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT0)!\n");
}
if (tmp) {
I915_WRITE(GEN8_GT_IIR(1), tmp);
ret = IRQ_HANDLED;
+
vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
+ ring = &dev_priv->ring[VCS];
if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
+ notify_ring(dev, ring);
+ if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
+
vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
+ ring = &dev_priv->ring[VCS2];
if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS2]);
+ notify_ring(dev, ring);
+ if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT1)!\n");
}
if (tmp) {
I915_WRITE(GEN8_GT_IIR(3), tmp);
ret = IRQ_HANDLED;
+
vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
+ ring = &dev_priv->ring[VECS];
if (vcs & GT_RENDER_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VECS]);
+ notify_ring(dev, ring);
+ if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
+ intel_execlists_handle_ctx_events(ring);
} else
DRM_ERROR("The master control interrupt lied (GT3)!\n");
}
long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
}
- DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
+ DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
+ port_name(port),
+ long_hpd ? "long" : "short");
/* for long HPD pulses we want to have the digital queue happen,
but we still want HPD storm detection to function. */
if (long_hpd) {
static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
{
- struct intel_crtc *crtc;
-
if (!drm_handle_vblank(dev, pipe))
return false;
- crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
- wake_up(&crtc->vbl_wait);
-
return true;
}
struct drm_i915_private *dev_priv = dev->dev_private;
u32 tmp;
- if (ring->hangcheck.acthd != acthd)
- return HANGCHECK_ACTIVE;
+ if (acthd != ring->hangcheck.acthd) {
+ if (acthd > ring->hangcheck.max_acthd) {
+ ring->hangcheck.max_acthd = acthd;
+ return HANGCHECK_ACTIVE;
+ }
+
+ return HANGCHECK_ACTIVE_LOOP;
+ }
if (IS_GEN2(dev))
return HANGCHECK_HUNG;
switch (ring->hangcheck.action) {
case HANGCHECK_IDLE:
case HANGCHECK_WAIT:
- break;
case HANGCHECK_ACTIVE:
+ break;
+ case HANGCHECK_ACTIVE_LOOP:
ring->hangcheck.score += BUSY;
break;
case HANGCHECK_KICK:
*/
if (ring->hangcheck.score > 0)
ring->hangcheck.score--;
+
+ ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
}
ring->hangcheck.seqno = seqno;
static void ibx_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_irqs, hotplug, enabled_irqs = 0;
if (HAS_PCH_IBX(dev)) {
hotplug_irqs = SDE_HOTPLUG_MASK;
- list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
} else {
hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
- list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
/* These are interrupts we'll toggle with the ring mask register */
uint32_t gt_interrupts[] = {
GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
0,
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
+ GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
};
for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
-
gen8_irq_reset(dev);
}
I915_WRITE(VLV_MASTER_IER, 0);
- intel_hpd_irq_uninstall(dev_priv);
-
for_each_pipe(pipe)
I915_WRITE(PIPESTAT(pipe), 0xffff);
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
-
ironlake_irq_reset(dev);
}
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
- intel_hpd_irq_uninstall(dev_priv);
-
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
static void i915_hpd_irq_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
u32 hotplug_en;
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
- list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
+ for_each_intel_encoder(dev, intel_encoder)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
/* Programming the CRT detection parameters tends
if (!dev_priv)
return;
- intel_hpd_irq_uninstall(dev_priv);
-
I915_WRITE(PORT_HOTPLUG_EN, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(IIR, I915_READ(IIR));
}
- static void intel_hpd_irq_reenable(unsigned long data)
+ static void intel_hpd_irq_reenable(struct work_struct *work)
{
- struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv),
+ hotplug_reenable_work.work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
unsigned long irqflags;
int i;
+ intel_runtime_pm_get(dev_priv);
+
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
struct drm_connector *connector;
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+
+ intel_runtime_pm_put(dev_priv);
}
void intel_irq_init(struct drm_device *dev)
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
(unsigned long) dev);
- setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
- (unsigned long) dev_priv);
+ INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
+ intel_hpd_irq_reenable);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
}
+ /*
+ * Opt out of the vblank disable timer on everything except gen2.
+ * Gen2 doesn't have a hardware frame counter and so depends on
+ * vblank interrupts to produce sane vblank seuquence numbers.
+ */
+ if (!IS_GEN2(dev))
+ dev->vblank_disable_immediate = true;
+
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
- static void intel_dp_set_m_n(struct intel_crtc *crtc);
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n);
+ struct intel_link_m_n *m_n,
+ struct intel_link_m_n *m2_n2);
static void ironlake_set_pipeconf(struct drm_crtc *crtc);
static void haswell_set_pipeconf(struct drm_crtc *crtc);
static void intel_set_pipe_csc(struct drm_crtc *crtc);
static void vlv_prepare_pll(struct intel_crtc *crtc);
+ static void chv_prepare_pll(struct intel_crtc *crtc);
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
}
}
+static void assert_vblank_disabled(struct drm_crtc *crtc)
+{
+ if (WARN_ON(drm_crtc_vblank_get(crtc) == 0))
+ drm_crtc_vblank_put(crtc);
+}
+
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
}
}
- static void intel_reset_dpio(struct drm_device *dev)
- {
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_CHERRYVIEW(dev)) {
- enum dpio_phy phy;
- u32 val;
-
- for (phy = DPIO_PHY0; phy < I915_NUM_PHYS_VLV; phy++) {
- /* Poll for phypwrgood signal */
- if (wait_for(I915_READ(DISPLAY_PHY_STATUS) &
- PHY_POWERGOOD(phy), 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
-
- /*
- * Deassert common lane reset for PHY.
- *
- * This should only be done on init and resume from S3
- * with both PLLs disabled, or we risk losing DPIO and
- * PLL synchronization.
- */
- val = I915_READ(DISPLAY_PHY_CONTROL);
- I915_WRITE(DISPLAY_PHY_CONTROL,
- PHY_COM_LANE_RESET_DEASSERT(phy, val));
- }
- }
- }
-
static void vlv_enable_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
assert_pipe_disabled(dev_priv, pipe);
/* Set PLL en = 0 */
- val = DPLL_SSC_REF_CLOCK_CHV;
+ val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
I915_WRITE(DPLL(pipe), val);
if (WARN_ON(pll->refcount == 0))
return;
- DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
+ DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
pll->name, pll->active, pll->on,
crtc->base.base.id);
pll->on = true;
}
- void intel_disable_shared_dpll(struct intel_crtc *crtc)
+ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
/**
* intel_enable_primary_hw_plane - enable the primary plane on a given pipe
- * @dev_priv: i915 private structure
- * @plane: plane to enable
- * @pipe: pipe being fed
+ * @plane: plane to be enabled
+ * @crtc: crtc for the plane
*
- * Enable @plane on @pipe, making sure that @pipe is running first.
+ * Enable @plane on @crtc, making sure that the pipe is running first.
*/
- static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+ static void intel_enable_primary_hw_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc)
{
- struct drm_device *dev = dev_priv->dev;
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- int reg;
- u32 val;
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* If the pipe isn't enabled, we can't pump pixels and may hang */
- assert_pipe_enabled(dev_priv, pipe);
+ assert_pipe_enabled(dev_priv, intel_crtc->pipe);
if (intel_crtc->primary_enabled)
return;
intel_crtc->primary_enabled = true;
- reg = DSPCNTR(plane);
- val = I915_READ(reg);
- WARN_ON(val & DISPLAY_PLANE_ENABLE);
-
- I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
- intel_flush_primary_plane(dev_priv, plane);
+ dev_priv->display.update_primary_plane(crtc, plane->fb,
+ crtc->x, crtc->y);
/*
* BDW signals flip done immediately if the plane
/**
* intel_disable_primary_hw_plane - disable the primary hardware plane
- * @dev_priv: i915 private structure
- * @plane: plane to disable
- * @pipe: pipe consuming the data
+ * @plane: plane to be disabled
+ * @crtc: crtc for the plane
*
- * Disable @plane; should be an independent operation.
+ * Disable @plane on @crtc, making sure that the pipe is running first.
*/
- static void intel_disable_primary_hw_plane(struct drm_i915_private *dev_priv,
- enum plane plane, enum pipe pipe)
+ static void intel_disable_primary_hw_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc)
{
- struct intel_crtc *intel_crtc =
- to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
- int reg;
- u32 val;
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ assert_pipe_enabled(dev_priv, intel_crtc->pipe);
if (!intel_crtc->primary_enabled)
return;
intel_crtc->primary_enabled = false;
- reg = DSPCNTR(plane);
- val = I915_READ(reg);
- WARN_ON((val & DISPLAY_PLANE_ENABLE) == 0);
-
- I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
- intel_flush_primary_plane(dev_priv, plane);
+ dev_priv->display.update_primary_plane(crtc, plane->fb,
+ crtc->x, crtc->y);
}
static bool need_vtd_wa(struct drm_device *dev)
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
- u32 reg;
+ u32 reg = DSPCNTR(plane);
+
+ if (!intel_crtc->primary_enabled) {
+ I915_WRITE(reg, 0);
+ if (INTEL_INFO(dev)->gen >= 4)
+ I915_WRITE(DSPSURF(plane), 0);
+ else
+ I915_WRITE(DSPADDR(plane), 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ if (INTEL_INFO(dev)->gen < 4) {
+ if (intel_crtc->pipe == PIPE_B)
+ dspcntr |= DISPPLANE_SEL_PIPE_B;
+
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE(DSPSIZE(plane),
+ ((intel_crtc->config.pipe_src_h - 1) << 16) |
+ (intel_crtc->config.pipe_src_w - 1));
+ I915_WRITE(DSPPOS(plane), 0);
+ }
- reg = DSPCNTR(plane);
- dspcntr = I915_READ(reg);
- /* Mask out pixel format bits in case we change it */
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
BUG();
}
- if (INTEL_INFO(dev)->gen >= 4) {
- if (obj->tiling_mode != I915_TILING_NONE)
- dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
- }
+ if (INTEL_INFO(dev)->gen >= 4 &&
+ obj->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
if (IS_G4X(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
int plane = intel_crtc->plane;
unsigned long linear_offset;
u32 dspcntr;
- u32 reg;
+ u32 reg = DSPCNTR(plane);
+
+ if (!intel_crtc->primary_enabled) {
+ I915_WRITE(reg, 0);
+ I915_WRITE(DSPSURF(plane), 0);
+ POSTING_READ(reg);
+ return;
+ }
+
+ dspcntr = DISPPLANE_GAMMA_ENABLE;
+
+ dspcntr |= DISPLAY_PLANE_ENABLE;
+
+ if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
- reg = DSPCNTR(plane);
- dspcntr = I915_READ(reg);
- /* Mask out pixel format bits in case we change it */
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
- else
- dspcntr &= ~DISPPLANE_TILED;
- if (IS_HASWELL(dev) || IS_BROADWELL(dev))
- dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
- else
+ if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
I915_WRITE(reg, dspcntr);
static void intel_crtc_enable_planes(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
+ assert_vblank_disabled(crtc);
+
drm_vblank_on(dev, pipe);
- intel_enable_primary_hw_plane(dev_priv, plane, pipe);
+ intel_enable_primary_hw_plane(crtc->primary, crtc);
intel_enable_planes(crtc);
intel_crtc_update_cursor(crtc, true);
intel_crtc_dpms_overlay(intel_crtc, true);
intel_crtc_dpms_overlay(intel_crtc, false);
intel_crtc_update_cursor(crtc, false);
intel_disable_planes(crtc);
- intel_disable_primary_hw_plane(dev_priv, plane, pipe);
+ intel_disable_primary_hw_plane(crtc->primary, crtc);
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
drm_vblank_off(dev, pipe);
+
+ assert_vblank_disabled(crtc);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- enum plane plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n);
+ &intel_crtc->config.fdi_m_n, NULL);
}
ironlake_set_pipeconf(crtc);
- /* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
- POSTING_READ(DSPCNTR(plane));
-
- dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
- crtc->x, crtc->y);
-
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- enum plane plane = intel_crtc->plane;
WARN_ON(!crtc->enabled);
if (intel_crtc->config.has_pch_encoder) {
intel_cpu_transcoder_set_m_n(intel_crtc,
- &intel_crtc->config.fdi_m_n);
+ &intel_crtc->config.fdi_m_n, NULL);
}
haswell_set_pipeconf(crtc);
intel_set_pipe_csc(crtc);
- /* Set up the display plane register */
- I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
- POSTING_READ(DSPCNTR(plane));
-
- dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
- crtc->x, crtc->y);
-
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
vlv_update_cdclk(dev);
}
+ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
+ {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val, cmd;
+
+ WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq);
+
+ switch (cdclk) {
+ case 400000:
+ cmd = 3;
+ break;
+ case 333333:
+ case 320000:
+ cmd = 2;
+ break;
+ case 266667:
+ cmd = 1;
+ break;
+ case 200000:
+ cmd = 0;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val &= ~DSPFREQGUAR_MASK_CHV;
+ val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+ 50)) {
+ DRM_ERROR("timed out waiting for CDclk change\n");
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ vlv_update_cdclk(dev);
+ }
+
static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
int max_pixclk)
{
int vco = valleyview_get_vco(dev_priv);
int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
+ /* FIXME: Punit isn't quite ready yet */
+ if (IS_CHERRYVIEW(dev_priv->dev))
+ return 400000;
+
/*
* Really only a few cases to deal with, as only 4 CDclks are supported:
* 200MHz
int max_pixclk = intel_mode_max_pixclk(dev_priv);
int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
- if (req_cdclk != dev_priv->vlv_cdclk_freq)
- valleyview_set_cdclk(dev, req_cdclk);
+ if (req_cdclk != dev_priv->vlv_cdclk_freq) {
+ if (IS_CHERRYVIEW(dev))
+ cherryview_set_cdclk(dev, req_cdclk);
+ else
+ valleyview_set_cdclk(dev, req_cdclk);
+ }
+
modeset_update_crtc_power_domains(dev);
}
static void valleyview_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
bool is_dsi;
- u32 dspcntr;
WARN_ON(!crtc->enabled);
is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
- if (!is_dsi && !IS_CHERRYVIEW(dev))
- vlv_prepare_pll(intel_crtc);
-
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
+ if (!is_dsi) {
+ if (IS_CHERRYVIEW(dev))
+ chv_prepare_pll(intel_crtc);
+ else
+ vlv_prepare_pll(intel_crtc);
+ }
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
- I915_WRITE(DSPPOS(plane), 0);
-
i9xx_set_pipeconf(intel_crtc);
- I915_WRITE(DSPCNTR(plane), dspcntr);
- POSTING_READ(DSPCNTR(plane));
-
- dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
- crtc->x, crtc->y);
-
intel_crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
- int plane = intel_crtc->plane;
- u32 dspcntr;
WARN_ON(!crtc->enabled);
i9xx_set_pll_dividers(intel_crtc);
- /* Set up the display plane register */
- dspcntr = DISPPLANE_GAMMA_ENABLE;
-
- if (pipe == 0)
- dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
- else
- dspcntr |= DISPPLANE_SEL_PIPE_B;
-
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
intel_set_pipe_timings(intel_crtc);
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- I915_WRITE(DSPSIZE(plane),
- ((intel_crtc->config.pipe_src_h - 1) << 16) |
- (intel_crtc->config.pipe_src_w - 1));
- I915_WRITE(DSPPOS(plane), 0);
-
i9xx_set_pipeconf(intel_crtc);
- I915_WRITE(DSPCNTR(plane), dspcntr);
- POSTING_READ(DSPCNTR(plane));
-
- dev_priv->display.update_primary_plane(crtc, crtc->primary->fb,
- crtc->x, crtc->y);
-
intel_crtc->active = true;
if (!IS_GEN2(dev))
u32 val;
int divider;
+ /* FIXME: Punit isn't quite ready yet */
+ if (IS_CHERRYVIEW(dev))
+ return 400000;
+
mutex_lock(&dev_priv->dpio_lock);
val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n)
+ struct intel_link_m_n *m_n,
+ struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+ /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
+ * for gen < 8) and if DRRS is supported (to make sure the
+ * registers are not unnecessarily accessed).
+ */
+ if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ crtc->config.has_drrs) {
+ I915_WRITE(PIPE_DATA_M2(transcoder),
+ TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
+ I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
+ I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
+ I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
+ }
} else {
I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
}
}
- static void intel_dp_set_m_n(struct intel_crtc *crtc)
+ void intel_dp_set_m_n(struct intel_crtc *crtc)
{
if (crtc->config.has_pch_encoder)
intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
else
- intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
+ intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n,
+ &crtc->config.dp_m2_n2);
}
static void vlv_update_pll(struct intel_crtc *crtc)
}
static void chv_update_pll(struct intel_crtc *crtc)
+ {
+ crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
+ DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
+ DPLL_VCO_ENABLE;
+ if (crtc->pipe != PIPE_A)
+ crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+
+ crtc->config.dpll_hw_state.dpll_md =
+ (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ }
+
+ static void chv_prepare_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
int refclk;
- crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
- DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
- DPLL_VCO_ENABLE;
- if (pipe != PIPE_A)
- crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
-
- crtc->config.dpll_hw_state.dpll_md =
- (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
-
bestn = crtc->config.dpll.n;
bestm2_frac = crtc->config.dpll.m2 & 0x3fffff;
bestm1 = crtc->config.dpll.m1;
u32 mdiv;
int refclk = 100000;
+ /* In case of MIPI DPLL will not even be used */
+ if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
+ return;
+
mutex_lock(&dev_priv->dpio_lock);
mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
mutex_unlock(&dev_priv->dpio_lock);
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
+ crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
u32 val, final;
bool has_lvds = false;
bool can_ssc = false;
/* We need to take the global config into account */
- list_for_each_entry(encoder, &mode_config->encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
has_panel = true;
static void lpt_init_pch_refclk(struct drm_device *dev)
{
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
bool has_vga = false;
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
enum transcoder transcoder,
- struct intel_link_m_n *m_n)
+ struct intel_link_m_n *m_n,
+ struct intel_link_m_n *m2_n2)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
+ * gen < 8) and if DRRS is supported (to make sure the
+ * registers are not unnecessarily read).
+ */
+ if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
+ crtc->config.has_drrs) {
+ m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
+ m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
+ m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
+ & ~TU_SIZE_MASK;
+ m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
+ m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
+ & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ }
} else {
m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
else
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
- &pipe_config->dp_m_n);
+ &pipe_config->dp_m_n,
+ &pipe_config->dp_m2_n2);
}
static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
- &pipe_config->fdi_m_n);
+ &pipe_config->fdi_m_n, NULL);
}
static void ironlake_get_pfit_config(struct intel_crtc *crtc,
crtc->base.primary->fb->height = ((val >> 0) & 0xfff) + 1;
val = I915_READ(DSPSTRIDE(pipe));
- crtc->base.primary->fb->pitches[0] = val & 0xffffff80;
+ crtc->base.primary->fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_align_height(dev, crtc->base.primary->fb->height,
plane_config->tiled);
return 0;
}
+ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
+ enum port port,
+ struct intel_crtc_config *pipe_config)
+ {
+ pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+
+ switch (pipe_config->ddi_pll_sel) {
+ case PORT_CLK_SEL_WRPLL1:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL1;
+ break;
+ case PORT_CLK_SEL_WRPLL2:
+ pipe_config->shared_dpll = DPLL_ID_WRPLL2;
+ break;
+ }
+ }
+
static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
struct intel_crtc_config *pipe_config)
{
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
-
- switch (pipe_config->ddi_pll_sel) {
- case PORT_CLK_SEL_WRPLL1:
- pipe_config->shared_dpll = DPLL_ID_WRPLL1;
- break;
- case PORT_CLK_SEL_WRPLL2:
- pipe_config->shared_dpll = DPLL_ID_WRPLL2;
- break;
- }
+ haswell_get_ddi_pll(dev_priv, port, pipe_config);
if (pipe_config->shared_dpll >= 0) {
pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t cntl;
+ uint32_t cntl = 0, size = 0;
- if (base != intel_crtc->cursor_base) {
- /* On these chipsets we can only modify the base whilst
- * the cursor is disabled.
- */
- if (intel_crtc->cursor_cntl) {
- I915_WRITE(_CURACNTR, 0);
- POSTING_READ(_CURACNTR);
- intel_crtc->cursor_cntl = 0;
+ if (base) {
+ unsigned int width = intel_crtc->cursor_width;
+ unsigned int height = intel_crtc->cursor_height;
+ unsigned int stride = roundup_pow_of_two(width) * 4;
+
+ switch (stride) {
+ default:
+ WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
+ width, stride);
+ stride = 256;
+ /* fallthrough */
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
}
- I915_WRITE(_CURABASE, base);
- POSTING_READ(_CURABASE);
+ cntl |= CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(stride);
+
+ size = (height << 12) | width;
}
- /* XXX width must be 64, stride 256 => 0x00 << 28 */
- cntl = 0;
- if (base)
- cntl = (CURSOR_ENABLE |
- CURSOR_GAMMA_ENABLE |
- CURSOR_FORMAT_ARGB);
- if (intel_crtc->cursor_cntl != cntl) {
- I915_WRITE(_CURACNTR, cntl);
+ if (intel_crtc->cursor_cntl != 0 &&
+ (intel_crtc->cursor_base != base ||
+ intel_crtc->cursor_size != size ||
+ intel_crtc->cursor_cntl != cntl)) {
+ /* On these chipsets we can only modify the base/size/stride
+ * whilst the cursor is disabled.
+ */
+ I915_WRITE(_CURACNTR, 0);
POSTING_READ(_CURACNTR);
- intel_crtc->cursor_cntl = cntl;
+ intel_crtc->cursor_cntl = 0;
}
- }
- static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
- {
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
- uint32_t cntl;
+ if (intel_crtc->cursor_base != base)
+ I915_WRITE(_CURABASE, base);
- cntl = 0;
- if (base) {
- cntl = MCURSOR_GAMMA_ENABLE;
- switch (intel_crtc->cursor_width) {
- case 64:
- cntl |= CURSOR_MODE_64_ARGB_AX;
- break;
- case 128:
- cntl |= CURSOR_MODE_128_ARGB_AX;
- break;
- case 256:
- cntl |= CURSOR_MODE_256_ARGB_AX;
- break;
- default:
- WARN_ON(1);
- return;
- }
- cntl |= pipe << 28; /* Connect to correct pipe */
+ if (intel_crtc->cursor_size != size) {
+ I915_WRITE(CURSIZE, size);
+ intel_crtc->cursor_size = size;
}
+
if (intel_crtc->cursor_cntl != cntl) {
- I915_WRITE(CURCNTR(pipe), cntl);
- POSTING_READ(CURCNTR(pipe));
+ I915_WRITE(_CURACNTR, cntl);
+ POSTING_READ(_CURACNTR);
intel_crtc->cursor_cntl = cntl;
}
-
- /* and commit changes on next vblank */
- I915_WRITE(CURBASE(pipe), base);
- POSTING_READ(CURBASE(pipe));
}
- static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
+ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(1);
return;
}
+ cntl |= pipe << 28; /* Connect to correct pipe */
}
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
I915_WRITE(CURPOS(pipe), pos);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
- ivb_update_cursor(crtc, base);
- else if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev) || IS_I865G(dev))
i845_update_cursor(crtc, base);
else
i9xx_update_cursor(crtc, base);
intel_crtc->cursor_base = base;
}
+ static bool cursor_size_ok(struct drm_device *dev,
+ uint32_t width, uint32_t height)
+ {
+ if (width == 0 || height == 0)
+ return false;
+
+ /*
+ * 845g/865g are special in that they are only limited by
+ * the width of their cursors, the height is arbitrary up to
+ * the precision of the register. Everything else requires
+ * square cursors, limited to a few power-of-two sizes.
+ */
+ if (IS_845G(dev) || IS_I865G(dev)) {
+ if ((width & 63) != 0)
+ return false;
+
+ if (width > (IS_845G(dev) ? 64 : 512))
+ return false;
+
+ if (height > 1023)
+ return false;
+ } else {
+ switch (width | height) {
+ case 256:
+ case 128:
+ if (IS_GEN2(dev))
+ return false;
+ case 64:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+ }
+
/*
* intel_crtc_cursor_set_obj - Set cursor to specified GEM object
*
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- unsigned old_width;
+ unsigned old_width, stride;
uint32_t addr;
int ret;
}
/* Check for which cursor types we support */
- if (!((width == 64 && height == 64) ||
- (width == 128 && height == 128 && !IS_GEN2(dev)) ||
- (width == 256 && height == 256 && !IS_GEN2(dev)))) {
+ if (!cursor_size_ok(dev, width, height)) {
DRM_DEBUG("Cursor dimension not supported\n");
return -EINVAL;
}
- if (obj->base.size < width * height * 4) {
+ stride = roundup_pow_of_two(width) * 4;
+ if (obj->base.size < stride * height) {
DRM_DEBUG_KMS("buffer is too small\n");
ret = -ENOMEM;
goto fail;
addr = obj->phys_handle->busaddr;
}
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, (height << 12) | width);
-
finish:
if (intel_crtc->cursor_bo) {
if (!INTEL_INFO(dev)->cursor_needs_physical)
connector->base.id, connector->name,
encoder->base.id, encoder->name);
- drm_modeset_acquire_init(ctx, 0);
-
retry:
ret = drm_modeset_lock(&config->connection_mutex, ctx);
if (ret)
i++;
if (!(encoder->possible_crtcs & (1 << i)))
continue;
- if (!possible_crtc->enabled) {
- crtc = possible_crtc;
- break;
- }
+ if (possible_crtc->enabled)
+ continue;
+ /* This can occur when applying the pipe A quirk on resume. */
+ if (to_intel_crtc(possible_crtc)->new_enabled)
+ continue;
+
+ crtc = possible_crtc;
+ break;
}
/*
goto retry;
}
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
-
return false;
}
void intel_release_load_detect_pipe(struct drm_connector *connector,
- struct intel_load_detect_pipe *old,
- struct drm_modeset_acquire_ctx *ctx)
+ struct intel_load_detect_pipe *old)
{
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
drm_framebuffer_unreference(old->release_fb);
}
- goto unlock;
return;
}
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
-
- unlock:
- drm_modeset_drop_locks(ctx);
- drm_modeset_acquire_fini(ctx);
}
static int i9xx_pll_refclk(struct drm_device *dev,
return false;
else if (i915.use_mmio_flip > 0)
return true;
+ else if (i915.enable_execlists)
+ return true;
else
return ring != obj->ring;
}
to_intel_encoder(connector->base.encoder);
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
encoder->new_crtc =
to_intel_crtc(encoder->base.crtc);
}
connector->base.encoder = &connector->new_encoder->base;
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
encoder->base.crtc = &encoder->new_crtc->base;
}
pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
pipe_config->dp_m_n.tu);
+
+ DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
+ pipe_config->has_dp_encoder,
+ pipe_config->dp_m2_n2.gmch_m,
+ pipe_config->dp_m2_n2.gmch_n,
+ pipe_config->dp_m2_n2.link_m,
+ pipe_config->dp_m2_n2.link_n,
+ pipe_config->dp_m2_n2.tu);
+
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->requested_mode);
DRM_DEBUG_KMS("adjusted mode:\n");
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *source_encoder;
- list_for_each_entry(source_encoder,
- &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, source_encoder) {
if (source_encoder->new_crtc != crtc)
continue;
struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc != crtc)
continue;
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (&encoder->new_crtc->base != crtc)
continue;
1 << connector->new_encoder->new_crtc->pipe;
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->base.crtc == &encoder->new_crtc->base)
continue;
struct intel_crtc *intel_crtc;
struct drm_connector *connector;
- list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, intel_encoder) {
if (!intel_encoder->base.crtc)
continue;
return false; \
}
+ /* This is required for BDW+ where there is only one set of registers for
+ * switching between high and low RR.
+ * This macro can be used whenever a comparison has to be made between one
+ * hw state and multiple sw state variables.
+ */
+ #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
+ if ((current_config->name != pipe_config->name) && \
+ (current_config->alt_name != pipe_config->name)) { \
+ DRM_ERROR("mismatch in " #name " " \
+ "(expected %i or %i, found %i)\n", \
+ current_config->name, \
+ current_config->alt_name, \
+ pipe_config->name); \
+ return false; \
+ }
+
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
DRM_ERROR("mismatch in " #name "(" #mask ") " \
PIPE_CONF_CHECK_I(fdi_m_n.tu);
PIPE_CONF_CHECK_I(has_dp_encoder);
- PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
- PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
- PIPE_CONF_CHECK_I(dp_m_n.link_m);
- PIPE_CONF_CHECK_I(dp_m_n.link_n);
- PIPE_CONF_CHECK_I(dp_m_n.tu);
+
+ if (INTEL_INFO(dev)->gen < 8) {
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
+ PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
+ PIPE_CONF_CHECK_I(dp_m_n.link_m);
+ PIPE_CONF_CHECK_I(dp_m_n.link_n);
+ PIPE_CONF_CHECK_I(dp_m_n.tu);
+
+ if (current_config->has_drrs) {
+ PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m);
+ PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n);
+ PIPE_CONF_CHECK_I(dp_m2_n2.link_m);
+ PIPE_CONF_CHECK_I(dp_m2_n2.link_n);
+ PIPE_CONF_CHECK_I(dp_m2_n2.tu);
+ }
+ } else {
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m);
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n);
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m);
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n);
+ PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu);
+ }
PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
+ #undef PIPE_CONF_CHECK_I_ALT
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_CLOCK_FUZZY
#undef PIPE_CONF_QUIRK
struct intel_encoder *encoder;
struct intel_connector *connector;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
bool enabled = false;
bool active = false;
enum pipe pipe, tracked_pipe;
WARN(crtc->active && !crtc->base.enabled,
"active crtc, but not enabled in sw tracking\n");
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->base.crtc != &crtc->base)
continue;
enabled = true;
if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
active = crtc->active;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
}
count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
encoder->new_crtc =
to_intel_crtc(config->save_encoder_crtcs[count++]);
}
}
/* Check for any encoders that needs to be disabled. */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
int num_connectors = 0;
list_for_each_entry(connector,
&dev->mode_config.connector_list,
for_each_intel_crtc(dev, crtc) {
crtc->new_enabled = false;
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc == crtc) {
crtc->new_enabled = true;
break;
connector->new_encoder = NULL;
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
if (encoder->new_crtc == crtc)
encoder->new_crtc = NULL;
}
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
} else if (config->fb_changed) {
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
intel_crtc_wait_for_pending_flips(set->crtc);
*/
if (!intel_crtc->primary_enabled && ret == 0) {
WARN_ON(!intel_crtc->active);
- intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
- intel_crtc->pipe);
+ intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
}
/*
intel_primary_plane_disable(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_crtc *intel_crtc;
if (!plane->fb)
goto disable_unpin;
intel_crtc_wait_for_pending_flips(plane->crtc);
- intel_disable_primary_hw_plane(dev_priv, intel_plane->plane,
- intel_plane->pipe);
+ intel_disable_primary_hw_plane(plane, plane->crtc);
+
disable_unpin:
mutex_lock(&dev->struct_mutex);
i915_gem_track_fb(intel_fb_obj(plane->fb), NULL,
uint32_t src_w, uint32_t src_h)
{
struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
struct drm_rect dest = {
INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
if (intel_crtc->primary_enabled)
- intel_disable_primary_hw_plane(dev_priv,
- intel_plane->plane,
- intel_plane->pipe);
+ intel_disable_primary_hw_plane(plane, crtc);
if (plane->fb != fb)
return ret;
if (!intel_crtc->primary_enabled)
- intel_enable_primary_hw_plane(dev_priv, intel_crtc->plane,
- intel_crtc->pipe);
+ intel_enable_primary_hw_plane(plane, crtc);
return 0;
}
};
const struct drm_rect clip = {
/* integer pixels */
- .x2 = intel_crtc->config.pipe_src_w,
- .y2 = intel_crtc->config.pipe_src_h,
+ .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
+ .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
};
bool visible;
int ret;
return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
} else {
intel_crtc_update_cursor(crtc, visible);
+
+ intel_frontbuffer_flip(crtc->dev,
+ INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
+
return 0;
}
}
intel_crtc->cursor_base = ~0;
intel_crtc->cursor_cntl = ~0;
-
- init_waitqueue_head(&intel_crtc->vbl_wait);
+ intel_crtc->cursor_size = ~0;
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
int index_mask = 0;
int entry = 0;
- list_for_each_entry(source_encoder,
- &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, source_encoder) {
if (encoders_cloneable(encoder, source_encoder))
index_mask |= (1 << entry);
intel_edp_psr_init(dev);
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
+ for_each_intel_encoder(dev, encoder) {
encoder->base.possible_crtcs = encoder->crtc_mask;
encoder->base.possible_clones =
intel_encoder_clones(encoder);
dev_priv->display.get_display_clock_speed =
i830_get_display_clock_speed;
- if (HAS_PCH_SPLIT(dev)) {
- if (IS_GEN5(dev)) {
- dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
- } else if (IS_GEN6(dev)) {
- dev_priv->display.fdi_link_train = gen6_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
- dev_priv->display.modeset_global_resources =
- snb_modeset_global_resources;
- } else if (IS_IVYBRIDGE(dev)) {
- /* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
- dev_priv->display.write_eld = ironlake_write_eld;
- dev_priv->display.modeset_global_resources =
- ivb_modeset_global_resources;
- } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
- dev_priv->display.fdi_link_train = hsw_fdi_link_train;
- dev_priv->display.write_eld = haswell_write_eld;
- dev_priv->display.modeset_global_resources =
- haswell_modeset_global_resources;
- }
- } else if (IS_G4X(dev)) {
+ if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
+ } else if (IS_GEN5(dev)) {
+ dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ } else if (IS_GEN6(dev)) {
+ dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ snb_modeset_global_resources;
+ } else if (IS_IVYBRIDGE(dev)) {
+ /* FIXME: detect B0+ stepping and use auto training */
+ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ dev_priv->display.write_eld = ironlake_write_eld;
+ dev_priv->display.modeset_global_resources =
+ ivb_modeset_global_resources;
+ } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
+ dev_priv->display.fdi_link_train = hsw_fdi_link_train;
+ dev_priv->display.write_eld = haswell_write_eld;
+ dev_priv->display.modeset_global_resources =
+ haswell_modeset_global_resources;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.modeset_global_resources =
valleyview_modeset_global_resources;
intel_init_clock_gating(dev);
- intel_reset_dpio(dev);
-
intel_enable_gt_powersave(dev);
}
dev->mode_config.max_height = 8192;
}
- if (IS_GEN2(dev)) {
+ if (IS_845G(dev) || IS_I865G(dev)) {
+ dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
+ dev->mode_config.cursor_height = 1023;
+ } else if (IS_GEN2(dev)) {
dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
} else {
}
intel_init_dpio(dev);
- intel_reset_dpio(dev);
intel_shared_dpll_init(dev);
struct intel_connector *connector;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
- struct drm_modeset_acquire_ctx ctx;
+ struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
if (!crt)
return;
- if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx))
- intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx);
-
-
+ if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
+ intel_release_load_detect_pipe(crt, &load_detect_temp);
}
static bool
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
/* restore vblank interrupts to correct state */
- if (crtc->active)
+ if (crtc->active) {
+ update_scanline_offset(crtc);
drm_vblank_on(dev, crtc->pipe);
- else
+ } else
drm_vblank_off(dev, crtc->pipe);
/* We need to sanitize the plane -> pipe mapping first because this will
*/
crtc->cpu_fifo_underrun_disabled = true;
crtc->pch_fifo_underrun_disabled = true;
-
- update_scanline_offset(crtc);
}
}
intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
}
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
}
/* HW state is read out, now we need to sanitize this mess. */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list,
- base.head) {
+ for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
}
* experience fancy races otherwise.
*/
drm_irq_uninstall(dev);
- cancel_work_sync(&dev_priv->hotplug_work);
+ intel_hpd_cancel_work(dev_priv);
dev_priv->pm._irqs_disabled = true;
/*
err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
}
}
+
+ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
+ {
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_unpin_work *work;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev->event_lock, irqflags);
+
+ work = crtc->unpin_work;
+
+ if (work && work->event &&
+ work->event->base.file_priv == file) {
+ kfree(work->event);
+ work->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ }
+ }
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+ #include <nvif/class.h>
+
#include "nouveau_fbcon.h"
#include "dispnv04/hw.h"
#include "nouveau_crtc.h"
#include "nouveau_fence.h"
- #include <engine/disp.h>
-
- #include <core/class.h>
+ #include <nvif/event.h>
static int
- nouveau_display_vblank_handler(void *data, u32 type, int head)
+ nouveau_display_vblank_handler(struct nvif_notify *notify)
{
- struct nouveau_drm *drm = data;
- drm_handle_vblank(drm->dev, head);
- return NVKM_EVENT_KEEP;
+ struct nouveau_crtc *nv_crtc =
+ container_of(notify, typeof(*nv_crtc), vblank);
+ drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index);
+ return NVIF_NOTIFY_KEEP;
}
int
nouveau_display_vblank_enable(struct drm_device *dev, int head)
{
- struct nouveau_display *disp = nouveau_display(dev);
- if (disp) {
- nouveau_event_get(disp->vblank[head]);
- return 0;
+ struct drm_crtc *crtc;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (nv_crtc->index == head) {
+ nvif_notify_get(&nv_crtc->vblank);
+ return 0;
+ }
}
- return -EIO;
+ return -EINVAL;
}
void
nouveau_display_vblank_disable(struct drm_device *dev, int head)
{
- struct nouveau_display *disp = nouveau_display(dev);
- if (disp)
- nouveau_event_put(disp->vblank[head]);
+ struct drm_crtc *crtc;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (nv_crtc->index == head) {
+ nvif_notify_put(&nv_crtc->vblank);
+ return;
+ }
+ }
}
static inline int
nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime)
{
- const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index;
+ struct {
+ struct nv04_disp_mthd_v0 base;
+ struct nv04_disp_scanoutpos_v0 scan;
+ } args = {
+ .base.method = NV04_DISP_SCANOUTPOS,
+ .base.head = nouveau_crtc(crtc)->index,
+ };
struct nouveau_display *disp = nouveau_display(crtc->dev);
- struct nv04_display_scanoutpos args;
int ret, retry = 1;
do {
- ret = nv_exec(disp->core, mthd, &args, sizeof(args));
+ ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args));
if (ret != 0)
return 0;
- if (args.vline) {
+ if (args.scan.vline) {
ret |= DRM_SCANOUTPOS_ACCURATE;
ret |= DRM_SCANOUTPOS_VALID;
break;
if (retry) ndelay(crtc->linedur_ns);
} while (retry--);
- *hpos = args.hline;
- *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
- if (stime) *stime = ns_to_ktime(args.time[0]);
- if (etime) *etime = ns_to_ktime(args.time[1]);
+ *hpos = args.scan.hline;
+ *vpos = calc(args.scan.vblanks, args.scan.vblanke,
+ args.scan.vtotal, args.scan.vline);
+ if (stime) *stime = ns_to_ktime(args.scan.time[0]);
+ if (etime) *etime = ns_to_ktime(args.scan.time[1]);
if (*vpos < 0)
- ret |= DRM_SCANOUTPOS_INVBL;
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
return ret;
}
static void
nouveau_display_vblank_fini(struct drm_device *dev)
{
- struct nouveau_display *disp = nouveau_display(dev);
- int i;
+ struct drm_crtc *crtc;
drm_vblank_cleanup(dev);
- if (disp->vblank) {
- for (i = 0; i < dev->mode_config.num_crtc; i++)
- nouveau_event_ref(NULL, &disp->vblank[i]);
- kfree(disp->vblank);
- disp->vblank = NULL;
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nvif_notify_fini(&nv_crtc->vblank);
}
}
nouveau_display_vblank_init(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_disp *pdisp = nouveau_disp(drm->device);
- int ret, i;
-
- disp->vblank = kzalloc(dev->mode_config.num_crtc *
- sizeof(*disp->vblank), GFP_KERNEL);
- if (!disp->vblank)
- return -ENOMEM;
+ struct drm_crtc *crtc;
+ int ret;
- for (i = 0; i < dev->mode_config.num_crtc; i++) {
- ret = nouveau_event_new(pdisp->vblank, 1, i,
- nouveau_display_vblank_handler,
- drm, &disp->vblank[i]);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ ret = nvif_notify_init(&disp->disp, NULL,
+ nouveau_display_vblank_handler, false,
+ NV04_DISP_NTFY_VBLANK,
+ &(struct nvif_notify_head_req_v0) {
+ .head = nv_crtc->index,
+ },
+ sizeof(struct nvif_notify_head_req_v0),
+ sizeof(struct nvif_notify_head_rep_v0),
+ &nv_crtc->vblank);
if (ret) {
nouveau_display_vblank_fini(dev);
return ret;
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct nouveau_display *disp = nouveau_display(drm_fb->dev);
+
+ if (disp->fb_dtor)
+ disp->fb_dtor(drm_fb);
if (fb->nvbo)
drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
struct drm_mode_fb_cmd2 *mode_cmd,
struct nouveau_bo *nvbo)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_display *disp = nouveau_display(dev);
struct drm_framebuffer *fb = &nv_fb->base;
int ret;
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
nv_fb->nvbo = nvbo;
- if (nv_device(drm->device)->card_type >= NV_50) {
- u32 tile_flags = nouveau_bo_tile_layout(nvbo);
- if (tile_flags == 0x7a00 ||
- tile_flags == 0xfe00)
- nv_fb->r_dma = NvEvoFB32;
- else
- if (tile_flags == 0x7000)
- nv_fb->r_dma = NvEvoFB16;
- else
- nv_fb->r_dma = NvEvoVRAM_LP;
-
- switch (fb->depth) {
- case 8: nv_fb->r_format = 0x1e00; break;
- case 15: nv_fb->r_format = 0xe900; break;
- case 16: nv_fb->r_format = 0xe800; break;
- case 24:
- case 32: nv_fb->r_format = 0xcf00; break;
- case 30: nv_fb->r_format = 0xd100; break;
- default:
- NV_ERROR(drm, "unknown depth %d\n", fb->depth);
- return -EINVAL;
- }
-
- if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
- NV_ERROR(drm, "framebuffer requires contiguous bo\n");
- return -EINVAL;
- }
-
- if (nv_device(drm->device)->chipset == 0x50)
- nv_fb->r_format |= (tile_flags << 8);
-
- if (!tile_flags) {
- if (nv_device(drm->device)->card_type < NV_D0)
- nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
- else
- nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
- } else {
- u32 mode = nvbo->tile_mode;
- if (nv_device(drm->device)->card_type >= NV_C0)
- mode >>= 4;
- nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
- }
- }
-
ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
- if (ret) {
+ if (ret)
return ret;
+
+ if (disp->fb_ctor) {
+ ret = disp->fb_ctor(fb);
+ if (ret)
+ disp->fb_dtor(fb);
}
- return 0;
+ return ret;
}
static struct drm_framebuffer *
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (conn->hpd) nouveau_event_get(conn->hpd);
+ nvif_notify_get(&conn->hpd);
}
return ret;
{
struct nouveau_display *disp = nouveau_display(dev);
struct drm_connector *connector;
+ int head;
+
+ /* Make sure that drm and hw vblank irqs get properly disabled. */
+ for (head = 0; head < dev->mode_config.num_crtc; head++)
+ drm_vblank_off(dev, head);
/* disable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- if (conn->hpd) nouveau_event_put(conn->hpd);
+ nvif_notify_put(&conn->hpd);
}
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
}
- int
- nouveau_display_create(struct drm_device *dev)
+ static void
+ nouveau_display_create_properties(struct drm_device *dev)
{
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_device *device = nouveau_dev(dev);
- struct nouveau_display *disp;
- int ret, gen;
-
- disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
- if (!disp)
- return -ENOMEM;
-
- drm_mode_config_init(dev);
- drm_mode_create_scaling_mode_property(dev);
- drm_mode_create_dvi_i_properties(dev);
+ struct nouveau_display *disp = nouveau_display(dev);
+ int gen;
- if (nv_device(drm->device)->card_type < NV_50)
+ if (disp->disp.oclass < NV50_DISP)
gen = 0;
else
- if (nv_device(drm->device)->card_type < NV_D0)
+ if (disp->disp.oclass < GF110_DISP)
gen = 1;
else
gen = 2;
disp->underscan_vborder_property =
drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
- if (gen >= 1) {
- /* -90..+90 */
- disp->vibrant_hue_property =
- drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+ if (gen < 1)
+ return;
- /* -100..+100 */
- disp->color_vibrance_property =
- drm_property_create_range(dev, 0, "color vibrance", 0, 200);
- }
+ /* -90..+90 */
+ disp->vibrant_hue_property =
+ drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+
+ /* -100..+100 */
+ disp->color_vibrance_property =
+ drm_property_create_range(dev, 0, "color vibrance", 0, 200);
+ }
+
+ int
+ nouveau_display_create(struct drm_device *dev)
+ {
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_display *disp;
+ int ret;
+
+ disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
+
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dvi_i_properties(dev);
dev->mode_config.funcs = &nouveau_mode_config_funcs;
- dev->mode_config.fb_base = nv_device_resource_start(device, 1);
+ dev->mode_config.fb_base = nv_device_resource_start(nvkm_device(&drm->device), 1);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- if (nv_device(drm->device)->card_type < NV_10) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
} else
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
dev->mode_config.max_width = 4096;
dev->mode_config.max_height = 4096;
} else {
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- if (nv_device(drm->device)->chipset < 0x11)
+ if (drm->device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
else
dev->mode_config.async_page_flip = true;
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (drm->vbios.dcb.entries) {
+ if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
static const u16 oclass[] = {
- GM107_DISP_CLASS,
- NVF0_DISP_CLASS,
- NVE0_DISP_CLASS,
- NVD0_DISP_CLASS,
- NVA3_DISP_CLASS,
- NV94_DISP_CLASS,
- NVA0_DISP_CLASS,
- NV84_DISP_CLASS,
- NV50_DISP_CLASS,
- NV04_DISP_CLASS,
+ GM107_DISP,
+ GK110_DISP,
+ GK104_DISP,
+ GF110_DISP,
+ GT214_DISP,
+ GT206_DISP,
+ GT200_DISP,
+ G82_DISP,
+ NV50_DISP,
+ NV04_DISP,
};
int i;
for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
- ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
- NVDRM_DISPLAY, oclass[i],
- NULL, 0, &disp->core);
+ ret = nvif_object_init(nvif_object(&drm->device), NULL,
+ NVDRM_DISPLAY, oclass[i],
+ NULL, 0, &disp->disp);
}
if (ret == 0) {
- if (nv_mclass(disp->core) < NV50_DISP_CLASS)
+ nouveau_display_create_properties(dev);
+ if (disp->disp.oclass < NV50_DISP)
ret = nv04_display_create(dev);
else
ret = nv50_display_create(dev);
nouveau_display_destroy(struct drm_device *dev)
{
struct nouveau_display *disp = nouveau_display(dev);
- struct nouveau_drm *drm = nouveau_drm(dev);
nouveau_backlight_exit(dev);
nouveau_display_vblank_fini(dev);
if (disp->dtor)
disp->dtor(dev);
- nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY);
+ nvif_object_fini(&disp->disp);
nouveau_drm(dev)->display = NULL;
kfree(disp);
if (!nouveau_fb || !nouveau_fb->nvbo)
continue;
- nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ NV_ERROR(drm, "Could not pin framebuffer\n");
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
nouveau_display_resume(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ int head;
+
nouveau_display_init(dev);
/* Force CLUT to get re-loaded during modeset */
nv_crtc->lut.depth = 0;
}
+ /* Make sure that drm and hw vblank irqs get resumed if needed. */
+ for (head = 0; head < dev->mode_config.num_crtc; head++)
+ drm_vblank_on(dev, head);
+
drm_helper_resume_force_mode(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Synchronize with the old framebuffer */
- ret = nouveau_fence_sync(old_bo->bo.sync_obj, chan);
+ ret = nouveau_fence_sync(old_bo, chan, false);
if (ret)
goto fail;
if (ret)
goto fail;
- if (nv_device(drm->device)->card_type < NV_C0)
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
else
BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan = drm->channel;
+ struct nouveau_channel *chan;
+ struct nouveau_cli *cli;
struct nouveau_fence *fence;
int ret;
- if (!drm->channel)
+ chan = drm->channel;
+ if (!chan)
return -ENODEV;
+ cli = (void *)nvif_client(&chan->device->base);
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
goto fail_free;
}
- mutex_lock(&chan->cli->mutex);
-
- /* synchronise rendering channel with the kernel's channel */
- spin_lock(&new_bo->bo.bdev->fence_lock);
- fence = nouveau_fence_ref(new_bo->bo.sync_obj);
- spin_unlock(&new_bo->bo.bdev->fence_lock);
- ret = nouveau_fence_sync(fence, chan);
- nouveau_fence_unref(&fence);
+ mutex_lock(&cli->mutex);
+ ret = ttm_bo_reserve(&new_bo->bo, true, false, false, NULL);
if (ret)
goto fail_unpin;
- ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
- if (ret)
+ /* synchronise rendering channel with the kernel's channel */
+ ret = nouveau_fence_sync(new_bo, chan, false);
+ if (ret) {
+ ttm_bo_unreserve(&new_bo->bo);
goto fail_unpin;
+ }
+
+ if (new_bo != old_bo) {
+ ttm_bo_unreserve(&new_bo->bo);
+
+ ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
+ if (ret)
+ goto fail_unpin;
+ }
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
drm_vblank_get(dev, nouveau_crtc(crtc)->index);
/* Emit a page flip */
- if (nv_device(drm->device)->card_type >= NV_50) {
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
if (ret)
goto fail_unreserve;
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
if (ret)
goto fail_unreserve;
- mutex_unlock(&chan->cli->mutex);
+ mutex_unlock(&cli->mutex);
/* Update the crtc struct and cleanup */
crtc->primary->fb = fb;
- nouveau_bo_fence(old_bo, fence);
+ nouveau_bo_fence(old_bo, fence, false);
ttm_bo_unreserve(&old_bo->bo);
if (old_bo != new_bo)
nouveau_bo_unpin(old_bo);
drm_vblank_put(dev, nouveau_crtc(crtc)->index);
ttm_bo_unreserve(&old_bo->bo);
fail_unpin:
- mutex_unlock(&chan->cli->mutex);
+ mutex_unlock(&cli->mutex);
if (old_bo != new_bo)
nouveau_bo_unpin(new_bo);
fail_free:
s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
/* Vblank timestamps/counts are only correct on >= NV-50 */
- if (nv_device(drm->device)->card_type >= NV_50)
+ if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
crtcid = s->crtc;
drm_send_vblank_event(dev, crtcid, s->event);
struct nouveau_page_flip_state state;
if (!nouveau_finish_page_flip(chan, &state)) {
- if (nv_device(drm->device)->card_type < NV_50) {
+ if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
nv_set_crtc_base(drm->dev, state.crtc, state.offset +
state.y * state.pitch +
state.x * state.bpp / 8);
r = radeon_fence_wait(work->fence, false);
if (r == -EDEADLK) {
up_read(&rdev->exclusive_lock);
- r = radeon_gpu_reset(rdev);
+ do {
+ r = radeon_gpu_reset(rdev);
+ } while (r == -EAGAIN);
down_read(&rdev->exclusive_lock);
}
if (r)
obj = new_radeon_fb->obj;
new_rbo = gem_to_radeon_bo(obj);
- spin_lock(&new_rbo->tbo.bdev->fence_lock);
- if (new_rbo->tbo.sync_obj)
- work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
- spin_unlock(&new_rbo->tbo.bdev->fence_lock);
-
/* pin the new buffer */
DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
work->old_rbo, new_rbo);
DRM_ERROR("failed to pin new rbo buffer before flip\n");
goto cleanup;
}
+ work->fence = (struct radeon_fence *)fence_get(reservation_object_get_excl(new_rbo->tbo.resv));
radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
radeon_bo_unreserve(new_rbo);
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
radeon_fence_unref(&work->fence);
kfree(work);
-
return r;
}
/* In vblank? */
if (in_vbl)
- ret |= DRM_SCANOUTPOS_INVBL;
+ ret |= DRM_SCANOUTPOS_IN_VBLANK;
/* Is vpos outside nominal vblank area, but less than
* 1/100 of a frame height away from start of vblank?
struct radeon_device *rdev = ddev->dev_private;
enum radeon_pm_state_type pm = rdev->pm.dpm.user_state;
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return snprintf(buf, PAGE_SIZE, "off\n");
-
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
struct drm_device *ddev = dev_get_drvdata(dev);
struct radeon_device *rdev = ddev->dev_private;
- /* Can't set dpm state when the card is off */
- if ((rdev->flags & RADEON_IS_PX) &&
- (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
- return -EINVAL;
-
mutex_lock(&rdev->pm.mutex);
if (strncmp("battery", buf, strlen("battery")) == 0)
rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY;
goto fail;
}
mutex_unlock(&rdev->pm.mutex);
- radeon_pm_compute_clocks(rdev);
+
+ /* Can't set dpm state when the card is off */
+ if (!(rdev->flags & RADEON_IS_PX) ||
+ (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
+ radeon_pm_compute_clocks(rdev);
+
fail:
return count;
}
if (rdev->pm.active_crtcs & (1 << crtc)) {
vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, 0, &vpos, &hpos, NULL, NULL);
if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
- !(vbl_status & DRM_SCANOUTPOS_INVBL))
+ !(vbl_status & DRM_SCANOUTPOS_IN_VBLANK))
in_vbl = false;
}
}
- /**
- * \file drmP.h
- * Private header for Direct Rendering Manager
- *
- */
-
/*
+ * Internal Header for the Direct Rendering Manager
+ *
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* Copyright (c) 2009-2010, Code Aurora Forum.
* All rights reserved.
*
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
#ifndef _DRM_P_H_
#define _DRM_P_H_
- #ifdef __KERNEL__
- #ifdef __alpha__
- /* add include of current.h so that "current" is defined
- * before static inline funcs in wait.h. Doing this so we
- * can build the DRM (part of PI DRI). 4/21/2000 S + B */
- #include <asm/current.h>
- #endif /* __alpha__ */
- #include <linux/kernel.h>
- #include <linux/kref.h>
- #include <linux/miscdevice.h>
+ #include <linux/agp_backend.h>
+ #include <linux/cdev.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/file.h>
#include <linux/fs.h>
+ #include <linux/highmem.h>
+ #include <linux/idr.h>
#include <linux/init.h>
- #include <linux/file.h>
- #include <linux/platform_device.h>
- #include <linux/pci.h>
+ #include <linux/io.h>
#include <linux/jiffies.h>
- #include <linux/dma-mapping.h>
+ #include <linux/kernel.h>
+ #include <linux/kref.h>
+ #include <linux/miscdevice.h>
#include <linux/mm.h>
- #include <linux/cdev.h>
#include <linux/mutex.h>
- #include <linux/io.h>
- #include <linux/slab.h>
+ #include <linux/pci.h>
+ #include <linux/platform_device.h>
+ #include <linux/poll.h>
#include <linux/ratelimit.h>
- #if defined(__alpha__) || defined(__powerpc__)
- #include <asm/pgtable.h> /* For pte_wrprotect */
- #endif
- #include <asm/mman.h>
- #include <asm/uaccess.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
#include <linux/types.h>
- #include <linux/agp_backend.h>
+ #include <linux/vmalloc.h>
#include <linux/workqueue.h>
- #include <linux/poll.h>
+
+ #include <asm/mman.h>
#include <asm/pgalloc.h>
- #include <drm/drm.h>
- #include <drm/drm_sarea.h>
- #include <drm/drm_vma_manager.h>
+ #include <asm/uaccess.h>
- #include <linux/idr.h>
+ #include <uapi/drm/drm.h>
+ #include <uapi/drm/drm_mode.h>
- #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
+ #include <drm/drm_agpsupport.h>
+ #include <drm/drm_crtc.h>
+ #include <drm/drm_global.h>
+ #include <drm/drm_hashtab.h>
+ #include <drm/drm_mem_util.h>
+ #include <drm/drm_mm.h>
+ #include <drm/drm_os_linux.h>
+ #include <drm/drm_sarea.h>
+ #include <drm/drm_vma_manager.h>
struct module;
struct drm_file;
struct drm_device;
+ struct drm_agp_head;
struct device_node;
struct videomode;
-
- #include <drm/drm_os_linux.h>
- #include <drm/drm_hashtab.h>
- #include <drm/drm_mm.h>
+ struct reservation_object;
/*
* 4 debug categories are defined:
/** \name Begin the DRM... */
/*@{*/
- #define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
- also include looping detection. */
-
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
- #define DRM_MAP_HASH_OFFSET 0x10000000
-
/*@}*/
/***********************************************************************/
* \param fmt printf() like format string.
* \param arg arguments
*/
- #if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, args...) \
do { \
if (unlikely(drm_debug & DRM_UT_CORE)) \
if (unlikely(drm_debug & DRM_UT_PRIME)) \
drm_ut_debug_printk(__func__, fmt, ##args); \
} while (0)
- #else
- #define DRM_DEBUG_DRIVER(fmt, args...) do { } while (0)
- #define DRM_DEBUG_KMS(fmt, args...) do { } while (0)
- #define DRM_DEBUG_PRIME(fmt, args...) do { } while (0)
- #define DRM_DEBUG(fmt, arg...) do { } while (0)
- #endif
/*@}*/
#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
[DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
- struct drm_magic_entry {
- struct list_head head;
- struct drm_hash_item hash_item;
- struct drm_file *priv;
- };
-
- struct drm_vma_entry {
- struct list_head head;
- struct vm_area_struct *vma;
- pid_t pid;
- };
-
/**
* DMA buffer.
*/
void *dev_private; /**< Per-buffer private storage */
};
- /** bufs is one longer than it has to be */
- struct drm_waitlist {
- int count; /**< Number of possible buffers */
- struct drm_buf **bufs; /**< List of pointers to buffers */
- struct drm_buf **rp; /**< Read pointer */
- struct drm_buf **wp; /**< Write pointer */
- struct drm_buf **end; /**< End pointer */
- spinlock_t read_lock;
- spinlock_t write_lock;
- };
-
typedef struct drm_dma_handle {
dma_addr_t busaddr;
void *vaddr;
/** File private data */
struct drm_file {
unsigned authenticated :1;
+ /* Whether we're master for a minor. Protected by master_mutex */
+ unsigned is_master :1;
/* true when the client has asked us to expose stereo 3D mode flags */
unsigned stereo_allowed :1;
/*
};
- /**
- * AGP memory entry. Stored as a doubly linked list.
- */
- struct drm_agp_mem {
- unsigned long handle; /**< handle */
- struct agp_memory *memory;
- unsigned long bound; /**< address */
- int pages;
- struct list_head head;
- };
-
- /**
- * AGP data.
- *
- * \sa drm_agp_init() and drm_device::agp.
- */
- struct drm_agp_head {
- struct agp_kern_info agp_info; /**< AGP device information */
- struct list_head memory;
- unsigned long mode; /**< AGP mode */
- struct agp_bridge_data *bridge;
- int enabled; /**< whether the AGP bus as been enabled */
- int acquired; /**< whether the AGP device has been acquired */
- unsigned long base;
- int agp_mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
- };
-
/**
* Scatter-gather memory.
*/
dma_addr_t *busaddr;
};
- struct drm_sigdata {
- int context;
- struct drm_hw_lock *lock;
- };
-
-
/**
* Kernel side of a mapping
*/
struct dma_buf_attachment *import_attach;
};
- #include <drm/drm_crtc.h>
-
/**
* struct drm_master - drm master structure
*
* @minor: Link back to minor char device we are master for. Immutable.
* @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex.
* @unique_len: Length of unique field. Protected by drm_global_mutex.
- * @unique_size: Amount allocated. Protected by drm_global_mutex.
* @magiclist: Hash of used authentication tokens. Protected by struct_mutex.
* @magicfree: List of used authentication tokens. Protected by struct_mutex.
* @lock: DRI lock information.
struct drm_minor *minor;
char *unique;
int unique_len;
- int unique_size;
struct drm_open_hash magiclist;
struct list_head magicfree;
struct drm_lock_data lock;
/* Flags and return codes for get_vblank_timestamp() driver function. */
#define DRM_CALLED_FROM_VBLIRQ 1
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
-#define DRM_VBLANKTIME_INVBL (1 << 1)
+#define DRM_VBLANKTIME_IN_VBLANK (1 << 1)
/* get_scanout_position() return flags */
#define DRM_SCANOUTPOS_VALID (1 << 0)
-#define DRM_SCANOUTPOS_INVBL (1 << 1)
+#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
- struct drm_bus {
- int (*set_busid)(struct drm_device *dev, struct drm_master *master);
- };
-
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
int (*dma_quiescent) (struct drm_device *);
int (*context_dtor) (struct drm_device *dev, int context);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
/**
* get_vblank_counter - get raw hardware vblank counter
/* low-level interface used by drm_gem_prime_{import,export} */
int (*gem_prime_pin)(struct drm_gem_object *obj);
void (*gem_prime_unpin)(struct drm_gem_object *obj);
+ struct reservation_object * (*gem_prime_res_obj)(
+ struct drm_gem_object *obj);
struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
struct drm_gem_object *(*gem_prime_import_sg_table)(
struct drm_device *dev, size_t size,
const struct drm_ioctl_desc *ioctls;
int num_ioctls;
const struct file_operations *fops;
- struct drm_bus *bus;
/* List of devices hanging off this driver with stealth attach. */
struct list_head legacy_dev_list;
/** \name Locks */
/*@{ */
struct mutex struct_mutex; /**< For others */
- struct mutex master_mutex; /**< For drm_minor::master */
+ struct mutex master_mutex; /**< For drm_minor::master and drm_file::is_master */
/*@} */
/** \name Usage Counters */
*/
bool vblank_disable_allowed;
+ /*
+ * If true, vblank interrupt will be disabled immediately when the
+ * refcount drops to zero, as opposed to via the vblank disable
+ * timer.
+ * This can be set to true it the hardware has a working vblank
+ * counter and the driver uses drm_vblank_on() and drm_vblank_off()
+ * appropriately.
+ */
+ bool vblank_disable_immediate;
+
/* array of size num_crtcs */
struct drm_vblank_crtc *vblank;
#endif
struct platform_device *platformdev; /**< Platform device struture */
- struct usb_device *usbdev;
struct drm_sg_mem *sg; /**< Scatter gather memory */
unsigned int num_crtcs; /**< Number of CRTCs on this device */
- struct drm_sigdata sigdata; /**< For block_all_signals */
sigset_t sigmask;
+ struct {
+ int context;
+ struct drm_hw_lock *lock;
+ } sigdata;
+
struct drm_local_map *agp_buffer_map;
unsigned int agp_buffer_token;
return file_priv->minor->type == DRM_MINOR_LEGACY;
}
- /**
- * drm_is_master() - Check whether a DRM open-file is DRM-Master
- * @file: DRM open-file context
- *
- * This checks whether a DRM open-file context is owner of the master context
- * attached to it. If a file owns a master context, it's called DRM-Master.
- * Per DRM device, only one such file can be DRM-Master at a time.
- *
- * Returns: True if the file is DRM-Master, otherwise false.
- */
- static inline bool drm_is_master(const struct drm_file *file)
- {
- return file->master && file->master == file->minor->master;
- }
-
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
extern void drm_vm_close_locked(struct drm_device *dev, struct vm_area_struct *vma);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
- /* Memory management support (drm_memory.h) */
- #include <drm/drm_memory.h>
-
-
/* Misc. IOCTL support (drm_ioctl.h) */
extern int drm_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void drm_clflush_sg(struct sg_table *st);
void drm_clflush_virt_range(void *addr, unsigned long length);
- /* Locking IOCTL support (drm_lock.h) */
- extern int drm_lock(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int drm_unlock(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
- extern void drm_idlelock_take(struct drm_lock_data *lock_data);
- extern void drm_idlelock_release(struct drm_lock_data *lock_data);
-
/*
* These are exported to drivers so that they can implement fencing using
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
- extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
-
- /* Buffer management support (drm_bufs.h) */
- extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
- extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
- extern int drm_addmap(struct drm_device *dev, resource_size_t offset,
- unsigned int size, enum drm_map_type type,
- enum drm_map_flags flags, struct drm_local_map **map_ptr);
- extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map);
- extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map);
- extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int drm_addbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int drm_infobufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int drm_markbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int drm_freebufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int drm_mapbufs(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
- extern int drm_dma_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
/* DMA support (drm_dma.h) */
extern int drm_legacy_dma_setup(struct drm_device *dev);
extern void drm_legacy_dma_takedown(struct drm_device *dev);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
+ extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
+ extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_on(struct drm_device *dev, int crtc);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
-extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
- struct timeval *tvblank, unsigned flags);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
int crtc, int *max_error,
struct timeval *vblank_time,
extern void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
+ /**
+ * drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
+ * @crtc: which CRTC's vblank waitqueue to retrieve
+ *
+ * This function returns a pointer to the vblank waitqueue for the CRTC.
+ * Drivers can use this to implement vblank waits using wait_event() & co.
+ */
+ static inline wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc)
+ {
+ return &crtc->dev->vblank[drm_crtc_index(crtc)].queue;
+ }
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
extern int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
- /* AGP/GART support (drm_agpsupport.h) */
-
- #include <drm/drm_agpsupport.h>
-
/* Stub support (drm_stub.h) */
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
-extern unsigned int drm_vblank_offdelay;
+extern int drm_vblank_offdelay;
extern unsigned int drm_timestamp_precision;
extern unsigned int drm_timestamp_monotonic;
extern struct class *drm_class;
- extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
-
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
- #if DRM_DEBUG_CODE
extern int drm_vma_info(struct seq_file *m, void *data);
- #endif
/* Scatter Gather Support (drm_scatter.h) */
extern void drm_legacy_sg_cleanup(struct drm_device *dev);
struct drm_master *master,
struct drm_unique *u);
+ /* Legacy Support */
+
+ int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags, struct drm_local_map **map_p);
+ int drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
+ int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
+ struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev);
+
+ int drm_legacy_addbufs_agp(struct drm_device *d, struct drm_buf_desc *req);
+ int drm_legacy_addbufs_pci(struct drm_device *d, struct drm_buf_desc *req);
+
+ void drm_legacy_vma_flush(struct drm_device *d);
+
+ void drm_legacy_idlelock_take(struct drm_lock_data *lock);
+ void drm_legacy_idlelock_release(struct drm_lock_data *lock);
+
/* sysfs support (drm_sysfs.c) */
struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
struct vm_area_struct *vma);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
- #include <drm/drm_global.h>
-
static inline void
drm_gem_object_reference(struct drm_gem_object *obj)
{
{
}
- #include <drm/drm_mem_util.h>
-
struct drm_device *drm_dev_alloc(struct drm_driver *driver,
struct device *parent);
void drm_dev_ref(struct drm_device *dev);
extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
+ extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
#define DRM_PCIE_SPEED_25 1
#define DRM_PCIE_SPEED_50 2
/* platform section */
extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
+ extern int drm_platform_set_busid(struct drm_device *d, struct drm_master *m);
/* returns true if currently okay to sleep */
static __inline__ bool drm_can_sleep(void)
return true;
}
- #endif /* __KERNEL__ */
#endif