]> Git Repo - linux.git/commitdiff
Merge tag 'drm-intel-next-2017-05-29' of git://anongit.freedesktop.org/git/drm-intel...
authorDave Airlie <[email protected]>
Tue, 30 May 2017 05:25:28 +0000 (15:25 +1000)
committerDave Airlie <[email protected]>
Tue, 30 May 2017 05:25:28 +0000 (15:25 +1000)
More stuff for 4.13:

- skl+ wm fixes from Mahesh Kumar
- some refactor and tests for i915_sw_fence (Chris)
- tune execlist/scheduler code (Chris)
- g4x,g33 gpu reset improvements (Chris, Mika)
- guc code cleanup (Michal Wajdeczko, MichaƂ Winiarski)
- dp aux backlight improvements (Puthikorn Voravootivat)
- buffer based guc/host communication (Michal Wajdeczko)

* tag 'drm-intel-next-2017-05-29' of git://anongit.freedesktop.org/git/drm-intel: (253 commits)
  drm/i915: Update DRIVER_DATE to 20170529
  drm/i915: Keep the forcewake timer alive for 1ms past the most recent use
  drm/i915/guc: capture GuC logs if FW fails to load
  drm/i915/guc: Introduce buffer based cmd transport
  drm/i915/guc: Disable send function on fini
  drm: Add definition for eDP backlight frequency
  drm/i915: Drop AUX backlight enable check for backlight control
  drm/i915: Consolidate #ifdef CONFIG_INTEL_IOMMU
  drm/i915: Only GGTT vma may be pinned and prevent shrinking
  drm/i915: Serialize GTT/Aperture accesses on BXT
  drm/i915: Convert i915_gem_object_ops->flags values to use BIT()
  drm/i915/selftests: Silence compiler warning in igt_ctx_exec
  drm/i915/guc: Skip port assign on first iteration of GuC dequeue
  drm/i915: Remove misleading comment in request_alloc
  drm/i915/g33: Improve reset reliability
  Revert "drm/i915: Restore lost "Initialized i915" welcome message"
  drm/i915/huc: Update GLK HuC version
  drm/i915: Check for allocation failure
  drm/i915/guc: Remove action status and statistics from debugfs
  drm/i915/g4x: Improve gpu reset reliability
  ...

18 files changed:
1  2 
drivers/gpu/drm/i915/Kconfig.debug
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_request.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
include/drm/drm_dp_helper.h
sound/x86/intel_hdmi_audio.c

index b00edd3b8800d2d327f7f5f14c537c61e63922bf,d4860c3a3bf9f290357439ecf56e1b159a39a643..78c5c049a347bcfe416b59b168f632be121e4b50
@@@ -61,6 -61,18 +61,18 @@@ config DRM_I915_SW_FENCE_DEBUG_OBJECT
  
            If in doubt, say "N".
  
+ config DRM_I915_SW_FENCE_CHECK_DAG
+         bool "Enable additional driver debugging for detecting dependency cycles"
+         depends on DRM_I915
+         default n
+         help
+           Choose this option to turn on extra driver debugging that may affect
+           performance but will catch some internal issues.
+           Recommended for driver developers only.
+           If in doubt, say "N".
  config DRM_I915_SELFTEST
        bool "Enable selftests upon driver load"
        depends on DRM_I915
@@@ -87,16 -99,3 +99,16 @@@ config DRM_I915_LOW_LEVEL_TRACEPOINT
            and also analyze the request dependency resolving timeline.
  
            If in doubt, say "N".
 +
 +config DRM_I915_DEBUG_VBLANK_EVADE
 +      bool "Enable extra debug warnings for vblank evasion"
 +      depends on DRM_I915
 +      default n
 +      help
 +        Choose this option to turn on extra debug warnings for the
 +        vblank evade mechanism. This gives a warning every time the
 +        the deadline allotted for the vblank evade critical section
 +        is exceeded, even if there isn't an actual risk of missing
 +        the vblank.
 +
 +        If in doubt, say "N".
index 1c66108f433380aca1e1ef8aeb34ea6b57ccfe6b,0615237d1ceab203c9cdf3ce29ce6858aebddecf..7e0816ccdc217debbd597cc213d2c62129a152d8
@@@ -229,7 -229,7 +229,7 @@@ static int i915_gem_stolen_list_info(st
        int ret;
  
        total = READ_ONCE(dev_priv->mm.object_count);
 -      objects = drm_malloc_ab(total, sizeof(*objects));
 +      objects = kvmalloc_array(total, sizeof(*objects), GFP_KERNEL);
        if (!objects)
                return -ENOMEM;
  
  
        mutex_unlock(&dev->struct_mutex);
  out:
 -      drm_free_large(objects);
 +      kvfree(objects);
        return ret;
  }
  
@@@ -2482,8 -2482,6 +2482,6 @@@ static void i915_guc_client_info(struc
                client->wq_size, client->wq_offset, client->wq_tail);
  
        seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
-       seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
-       seq_printf(m, "\tLast submission result: %d\n", client->retcode);
  
        for_each_engine(engine, dev_priv, id) {
                u64 submissions = client->submissions[id];
        seq_printf(m, "\tTotal: %llu\n", tot);
  }
  
- static int i915_guc_info(struct seq_file *m, void *data)
+ static bool check_guc_submission(struct seq_file *m)
  {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
        const struct intel_guc *guc = &dev_priv->guc;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-       u64 total;
  
        if (!guc->execbuf_client) {
                seq_printf(m, "GuC submission %s\n",
                           HAS_GUC_SCHED(dev_priv) ?
                           "disabled" :
                           "not supported");
-               return 0;
+               return false;
        }
  
+       return true;
+ }
+ static int i915_guc_info(struct seq_file *m, void *data)
+ {
+       struct drm_i915_private *dev_priv = node_to_i915(m->private);
+       const struct intel_guc *guc = &dev_priv->guc;
+       if (!check_guc_submission(m))
+               return 0;
        seq_printf(m, "Doorbell map:\n");
        seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
        seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
  
-       seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
-       seq_printf(m, "GuC action failure count: %u\n", guc->action_fail);
-       seq_printf(m, "GuC last action command: 0x%x\n", guc->action_cmd);
-       seq_printf(m, "GuC last action status: 0x%x\n", guc->action_status);
-       seq_printf(m, "GuC last action error code: %d\n", guc->action_err);
-       total = 0;
-       seq_printf(m, "\nGuC submissions:\n");
-       for_each_engine(engine, dev_priv, id) {
-               u64 submissions = guc->submissions[id];
-               total += submissions;
-               seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
-                       engine->name, submissions, guc->last_seqno[id]);
-       }
-       seq_printf(m, "\t%s: %llu\n", "Total", total);
        seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
        i915_guc_client_info(m, dev_priv, guc->execbuf_client);
  
        return 0;
  }
  
- static int i915_guc_log_dump(struct seq_file *m, void *data)
+ static int i915_guc_stage_pool(struct seq_file *m, void *data)
  {
        struct drm_i915_private *dev_priv = node_to_i915(m->private);
-       struct drm_i915_gem_object *obj;
-       int i = 0, pg;
+       const struct intel_guc *guc = &dev_priv->guc;
+       struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
+       struct i915_guc_client *client = guc->execbuf_client;
+       unsigned int tmp;
+       int index;
  
-       if (!dev_priv->guc.log.vma)
+       if (!check_guc_submission(m))
                return 0;
  
-       obj = dev_priv->guc.log.vma->obj;
-       for (pg = 0; pg < obj->base.size / PAGE_SIZE; pg++) {
-               u32 *log = kmap_atomic(i915_gem_object_get_page(obj, pg));
+       for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
+               struct intel_engine_cs *engine;
+               if (!(desc->attribute & GUC_STAGE_DESC_ATTR_ACTIVE))
+                       continue;
+               seq_printf(m, "GuC stage descriptor %u:\n", index);
+               seq_printf(m, "\tIndex: %u\n", desc->stage_id);
+               seq_printf(m, "\tAttribute: 0x%x\n", desc->attribute);
+               seq_printf(m, "\tPriority: %d\n", desc->priority);
+               seq_printf(m, "\tDoorbell id: %d\n", desc->db_id);
+               seq_printf(m, "\tEngines used: 0x%x\n",
+                          desc->engines_used);
+               seq_printf(m, "\tDoorbell trigger phy: 0x%llx, cpu: 0x%llx, uK: 0x%x\n",
+                          desc->db_trigger_phy,
+                          desc->db_trigger_cpu,
+                          desc->db_trigger_uk);
+               seq_printf(m, "\tProcess descriptor: 0x%x\n",
+                          desc->process_desc);
+               seq_printf(m, "\tWorkqueue address: 0x%x, size: 0x%x\n",
+                          desc->wq_addr, desc->wq_size);
+               seq_putc(m, '\n');
+               for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
+                       u32 guc_engine_id = engine->guc_id;
+                       struct guc_execlist_context *lrc =
+                                               &desc->lrc[guc_engine_id];
+                       seq_printf(m, "\t%s LRC:\n", engine->name);
+                       seq_printf(m, "\t\tContext desc: 0x%x\n",
+                                  lrc->context_desc);
+                       seq_printf(m, "\t\tContext id: 0x%x\n", lrc->context_id);
+                       seq_printf(m, "\t\tLRCA: 0x%x\n", lrc->ring_lrca);
+                       seq_printf(m, "\t\tRing begin: 0x%x\n", lrc->ring_begin);
+                       seq_printf(m, "\t\tRing end: 0x%x\n", lrc->ring_end);
+                       seq_putc(m, '\n');
+               }
+       }
+       return 0;
+ }
+ static int i915_guc_log_dump(struct seq_file *m, void *data)
+ {
+       struct drm_info_node *node = m->private;
+       struct drm_i915_private *dev_priv = node_to_i915(node);
+       bool dump_load_err = !!node->info_ent->data;
+       struct drm_i915_gem_object *obj = NULL;
+       u32 *log;
+       int i = 0;
+       if (dump_load_err)
+               obj = dev_priv->guc.load_err_log;
+       else if (dev_priv->guc.log.vma)
+               obj = dev_priv->guc.log.vma->obj;
  
-               for (i = 0; i < PAGE_SIZE / sizeof(u32); i += 4)
-                       seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
-                                  *(log + i), *(log + i + 1),
-                                  *(log + i + 2), *(log + i + 3));
+       if (!obj)
+               return 0;
  
-               kunmap_atomic(log);
+       log = i915_gem_object_pin_map(obj, I915_MAP_WC);
+       if (IS_ERR(log)) {
+               DRM_DEBUG("Failed to pin object\n");
+               seq_puts(m, "(log data unaccessible)\n");
+               return PTR_ERR(log);
        }
  
+       for (i = 0; i < obj->base.size / sizeof(u32); i += 4)
+               seq_printf(m, "0x%08x 0x%08x 0x%08x 0x%08x\n",
+                          *(log + i), *(log + i + 1),
+                          *(log + i + 2), *(log + i + 3));
        seq_putc(m, '\n');
  
+       i915_gem_object_unpin_map(obj);
        return 0;
  }
  
  static int i915_guc_log_control_get(void *data, u64 *val)
  {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
  
        if (!dev_priv->guc.log.vma)
                return -EINVAL;
  
  static int i915_guc_log_control_set(void *data, u64 val)
  {
-       struct drm_device *dev = data;
-       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_i915_private *dev_priv = data;
        int ret;
  
        if (!dev_priv->guc.log.vma)
                return -EINVAL;
  
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
        if (ret)
                return ret;
  
        ret = i915_guc_log_control(dev_priv, val);
        intel_runtime_pm_put(dev_priv);
  
-       mutex_unlock(&dev->struct_mutex);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
        return ret;
  }
  
@@@ -2855,7 -2907,8 +2907,8 @@@ static int i915_dmc_info(struct seq_fil
        seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
                   CSR_VERSION_MINOR(csr->version));
  
-       if (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6)) {
+       if (IS_KABYLAKE(dev_priv) ||
+           (IS_SKYLAKE(dev_priv) && csr->version >= CSR_VERSION(1, 6))) {
                seq_printf(m, "DC3 -> DC5 count: %d\n",
                           I915_READ(SKL_CSR_DC3_DC5_COUNT));
                seq_printf(m, "DC5 -> DC6 count: %d\n",
@@@ -3043,36 -3096,6 +3096,6 @@@ static void intel_connector_info(struc
                intel_seq_print_mode(m, 2, mode);
  }
  
- static bool cursor_active(struct drm_i915_private *dev_priv, int pipe)
- {
-       u32 state;
-       if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
-               state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
-       else
-               state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
-       return state;
- }
- static bool cursor_position(struct drm_i915_private *dev_priv,
-                           int pipe, int *x, int *y)
- {
-       u32 pos;
-       pos = I915_READ(CURPOS(pipe));
-       *x = (pos >> CURSOR_X_SHIFT) & CURSOR_POS_MASK;
-       if (pos & (CURSOR_POS_SIGN << CURSOR_X_SHIFT))
-               *x = -*x;
-       *y = (pos >> CURSOR_Y_SHIFT) & CURSOR_POS_MASK;
-       if (pos & (CURSOR_POS_SIGN << CURSOR_Y_SHIFT))
-               *y = -*y;
-       return cursor_active(dev_priv, pipe);
- }
  static const char *plane_type(enum drm_plane_type type)
  {
        switch (type) {
@@@ -3095,17 -3118,17 +3118,17 @@@ static const char *plane_rotation(unsig
  {
        static char buf[48];
        /*
 -       * According to doc only one DRM_ROTATE_ is allowed but this
 +       * According to doc only one DRM_MODE_ROTATE_ is allowed but this
         * will print them all to visualize if the values are misused
         */
        snprintf(buf, sizeof(buf),
                 "%s%s%s%s%s%s(0x%08x)",
 -               (rotation & DRM_ROTATE_0) ? "0 " : "",
 -               (rotation & DRM_ROTATE_90) ? "90 " : "",
 -               (rotation & DRM_ROTATE_180) ? "180 " : "",
 -               (rotation & DRM_ROTATE_270) ? "270 " : "",
 -               (rotation & DRM_REFLECT_X) ? "FLIPX " : "",
 -               (rotation & DRM_REFLECT_Y) ? "FLIPY " : "",
 +               (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
 +               (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
 +               (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
 +               (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
 +               (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
 +               (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
                 rotation);
  
        return buf;
@@@ -3194,9 -3217,7 +3217,7 @@@ static int i915_display_info(struct seq
        seq_printf(m, "CRTC info\n");
        seq_printf(m, "---------\n");
        for_each_intel_crtc(dev, crtc) {
-               bool active;
                struct intel_crtc_state *pipe_config;
-               int x, y;
  
                drm_modeset_lock(&crtc->base.mutex, NULL);
                pipe_config = to_intel_crtc_state(crtc->base.state);
                           yesno(pipe_config->dither), pipe_config->pipe_bpp);
  
                if (pipe_config->base.active) {
+                       struct intel_plane *cursor =
+                               to_intel_plane(crtc->base.cursor);
                        intel_crtc_info(m, crtc);
  
-                       active = cursor_position(dev_priv, crtc->pipe, &x, &y);
-                       seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n",
-                                  yesno(crtc->cursor_base),
-                                  x, y, crtc->base.cursor->state->crtc_w,
-                                  crtc->base.cursor->state->crtc_h,
-                                  crtc->cursor_addr, yesno(active));
+                       seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x\n",
+                                  yesno(cursor->base.state->visible),
+                                  cursor->base.state->crtc_x,
+                                  cursor->base.state->crtc_y,
+                                  cursor->base.state->crtc_w,
+                                  cursor->base.state->crtc_h,
+                                  cursor->cursor.base);
                        intel_scaler_info(m, crtc);
                        intel_plane_info(m, crtc);
                }
@@@ -3316,7 -3341,7 +3341,7 @@@ static int i915_engine_info(struct seq_
  
                if (i915.enable_execlists) {
                        u32 ptr, read, write;
-                       struct rb_node *rb;
+                       unsigned int idx;
  
                        seq_printf(m, "\tExeclist status: 0x%08x %08x\n",
                                   I915_READ(RING_EXECLIST_STATUS_LO(engine)),
                        if (read > write)
                                write += GEN8_CSB_ENTRIES;
                        while (read < write) {
-                               unsigned int idx = ++read % GEN8_CSB_ENTRIES;
+                               idx = ++read % GEN8_CSB_ENTRIES;
                                seq_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
                                           idx,
                                           I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
                        }
  
                        rcu_read_lock();
-                       rq = READ_ONCE(engine->execlist_port[0].request);
-                       if (rq) {
-                               seq_printf(m, "\t\tELSP[0] count=%d, ",
-                                          engine->execlist_port[0].count);
-                               print_request(m, rq, "rq: ");
-                       } else {
-                               seq_printf(m, "\t\tELSP[0] idle\n");
-                       }
-                       rq = READ_ONCE(engine->execlist_port[1].request);
-                       if (rq) {
-                               seq_printf(m, "\t\tELSP[1] count=%d, ",
-                                          engine->execlist_port[1].count);
-                               print_request(m, rq, "rq: ");
-                       } else {
-                               seq_printf(m, "\t\tELSP[1] idle\n");
+                       for (idx = 0; idx < ARRAY_SIZE(engine->execlist_port); idx++) {
+                               unsigned int count;
+                               rq = port_unpack(&engine->execlist_port[idx],
+                                                &count);
+                               if (rq) {
+                                       seq_printf(m, "\t\tELSP[%d] count=%d, ",
+                                                  idx, count);
+                                       print_request(m, rq, "rq: ");
+                               } else {
+                                       seq_printf(m, "\t\tELSP[%d] idle\n",
+                                                  idx);
+                               }
                        }
                        rcu_read_unlock();
  
                        spin_lock_irq(&engine->timeline->lock);
-                       for (rb = engine->execlist_first; rb; rb = rb_next(rb)) {
-                               rq = rb_entry(rb, typeof(*rq), priotree.node);
-                               print_request(m, rq, "\t\tQ ");
+                       for (rb = engine->execlist_first; rb; rb = rb_next(rb)){
+                               struct i915_priolist *p =
+                                       rb_entry(rb, typeof(*p), node);
+                               list_for_each_entry(rq, &p->requests,
+                                                   priotree.link)
+                                       print_request(m, rq, "\t\tQ ");
                        }
                        spin_unlock_irq(&engine->timeline->lock);
                } else if (INTEL_GEN(dev_priv) > 6) {
@@@ -3704,16 -3730,10 +3730,10 @@@ static ssize_t i915_displayport_test_ac
        if (len == 0)
                return 0;
  
-       input_buffer = kmalloc(len + 1, GFP_KERNEL);
-       if (!input_buffer)
-               return -ENOMEM;
+       input_buffer = memdup_user_nul(ubuf, len);
+       if (IS_ERR(input_buffer))
+               return PTR_ERR(input_buffer);
  
-       if (copy_from_user(input_buffer, ubuf, len)) {
-               status = -EFAULT;
-               goto out;
-       }
-       input_buffer[len] = '\0';
        DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
  
        drm_connector_list_iter_begin(dev, &conn_iter);
                }
        }
        drm_connector_list_iter_end(&conn_iter);
- out:
        kfree(input_buffer);
        if (status < 0)
                return status;
@@@ -3900,6 -3919,8 +3919,8 @@@ static void wm_latency_show(struct seq_
                num_levels = 3;
        else if (IS_VALLEYVIEW(dev_priv))
                num_levels = 1;
+       else if (IS_G4X(dev_priv))
+               num_levels = 3;
        else
                num_levels = ilk_wm_max_level(dev_priv) + 1;
  
                 * - WM1+ latency values in 0.5us units
                 * - latencies are in us on gen9/vlv/chv
                 */
-               if (INTEL_GEN(dev_priv) >= 9 || IS_VALLEYVIEW(dev_priv) ||
-                   IS_CHERRYVIEW(dev_priv))
+               if (INTEL_GEN(dev_priv) >= 9 ||
+                   IS_VALLEYVIEW(dev_priv) ||
+                   IS_CHERRYVIEW(dev_priv) ||
+                   IS_G4X(dev_priv))
                        latency *= 10;
                else if (level > 0)
                        latency *= 5;
@@@ -3974,7 -3997,7 +3997,7 @@@ static int pri_wm_latency_open(struct i
  {
        struct drm_i915_private *dev_priv = inode->i_private;
  
-       if (INTEL_GEN(dev_priv) < 5)
+       if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
                return -ENODEV;
  
        return single_open(file, pri_wm_latency_show, dev_priv);
@@@ -4016,6 -4039,8 +4039,8 @@@ static ssize_t wm_latency_write(struct 
                num_levels = 3;
        else if (IS_VALLEYVIEW(dev_priv))
                num_levels = 1;
+       else if (IS_G4X(dev_priv))
+               num_levels = 3;
        else
                num_levels = ilk_wm_max_level(dev_priv) + 1;
  
@@@ -4776,6 -4801,8 +4801,8 @@@ static const struct drm_info_list i915_
        {"i915_guc_info", i915_guc_info, 0},
        {"i915_guc_load_status", i915_guc_load_status_info, 0},
        {"i915_guc_log_dump", i915_guc_log_dump, 0},
+       {"i915_guc_load_err_log_dump", i915_guc_log_dump, 0, (void *)1},
+       {"i915_guc_stage_pool", i915_guc_stage_pool, 0},
        {"i915_huc_load_status", i915_huc_load_status_info, 0},
        {"i915_frequency_info", i915_frequency_info, 0},
        {"i915_hangcheck_info", i915_hangcheck_info, 0},
index 0e07f35e270ce8690abc3d16631443d30c3560f5,7572dcdf4745d0627d9db9d107db9ed8d1583047..7ab47a84671ff2eb39770ca708d23a0de91ff003
@@@ -46,8 -46,6 +46,6 @@@
  #include <linux/dma-buf.h>
  
  static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
- static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
- static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
  
  static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
  {
@@@ -705,6 -703,61 +703,61 @@@ i915_gem_create_ioctl(struct drm_devic
                               args->size, &args->handle);
  }
  
+ static inline enum fb_op_origin
+ fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
+ {
+       return (domain == I915_GEM_DOMAIN_GTT ?
+               obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
+ }
+ static void
+ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
+ {
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+       if (!(obj->base.write_domain & flush_domains))
+               return;
+       /* No actual flushing is required for the GTT write domain.  Writes
+        * to it "immediately" go to main memory as far as we know, so there's
+        * no chipset flush.  It also doesn't land in render cache.
+        *
+        * However, we do have to enforce the order so that all writes through
+        * the GTT land before any writes to the device, such as updates to
+        * the GATT itself.
+        *
+        * We also have to wait a bit for the writes to land from the GTT.
+        * An uncached read (i.e. mmio) seems to be ideal for the round-trip
+        * timing. This issue has only been observed when switching quickly
+        * between GTT writes and CPU reads from inside the kernel on recent hw,
+        * and it appears to only affect discrete GTT blocks (i.e. on LLC
+        * system agents we cannot reproduce this behaviour).
+        */
+       wmb();
+       switch (obj->base.write_domain) {
+       case I915_GEM_DOMAIN_GTT:
+               if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+                       if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+                               spin_lock_irq(&dev_priv->uncore.lock);
+                               POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+                               spin_unlock_irq(&dev_priv->uncore.lock);
+                               intel_runtime_pm_put(dev_priv);
+                       }
+               }
+               intel_fb_obj_flush(obj,
+                                  fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
+               break;
+       case I915_GEM_DOMAIN_CPU:
+               i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+               break;
+       }
+       obj->base.write_domain = 0;
+ }
  static inline int
  __copy_to_user_swizzled(char __user *cpu_vaddr,
                        const char *gpu_vaddr, int gpu_offset,
@@@ -794,7 -847,7 +847,7 @@@ int i915_gem_obj_prepare_shmem_read(str
                        goto out;
        }
  
-       i915_gem_object_flush_gtt_write_domain(obj);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  
        /* If we're not in the cpu read domain, set ourself into the gtt
         * read domain and manually flush cachelines (if required). This
@@@ -846,7 -899,7 +899,7 @@@ int i915_gem_obj_prepare_shmem_write(st
                        goto out;
        }
  
-       i915_gem_object_flush_gtt_write_domain(obj);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  
        /* If we're not in the cpu write domain, set ourself into the
         * gtt write domain and manually flush cachelines (as required).
@@@ -1501,13 -1554,6 +1554,6 @@@ err
        return ret;
  }
  
- static inline enum fb_op_origin
- write_origin(struct drm_i915_gem_object *obj, unsigned domain)
- {
-       return (domain == I915_GEM_DOMAIN_GTT ?
-               obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
- }
  static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
  {
        struct drm_i915_private *i915;
@@@ -1591,10 -1637,12 +1637,12 @@@ i915_gem_set_domain_ioctl(struct drm_de
        if (err)
                goto out_unpin;
  
-       if (read_domains & I915_GEM_DOMAIN_GTT)
-               err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
+       if (read_domains & I915_GEM_DOMAIN_WC)
+               err = i915_gem_object_set_to_wc_domain(obj, write_domain);
+       else if (read_domains & I915_GEM_DOMAIN_GTT)
+               err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
        else
-               err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+               err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
  
        /* And bump the LRU for this access */
        i915_gem_object_bump_inactive_ggtt(obj);
        mutex_unlock(&dev->struct_mutex);
  
        if (write_domain != 0)
-               intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
+               intel_fb_obj_invalidate(obj,
+                                       fb_write_origin(obj, write_domain));
  
  out_unpin:
        i915_gem_object_unpin_pages(obj);
@@@ -1737,6 -1786,9 +1786,9 @@@ static unsigned int tile_row_pages(stru
   *     into userspace. (This view is aligned and sized appropriately for
   *     fenced access.)
   *
+  * 2 - Recognise WC as a separate cache domain so that we can flush the
+  *     delayed writes via GTT before performing direct access via WC.
+  *
   * Restrictions:
   *
   *  * snoopable objects cannot be accessed via the GTT. It can cause machine
   */
  int i915_gem_mmap_gtt_version(void)
  {
-       return 1;
+       return 2;
  }
  
  static inline struct i915_ggtt_view
@@@ -2228,7 -2280,7 +2280,7 @@@ void __i915_gem_object_put_pages(struc
        if (obj->mm.mapping) {
                void *ptr;
  
-               ptr = ptr_mask_bits(obj->mm.mapping);
+               ptr = page_mask_bits(obj->mm.mapping);
                if (is_vmalloc_addr(ptr))
                        vunmap(ptr);
                else
@@@ -2504,7 -2556,7 +2556,7 @@@ static void *i915_gem_object_map(const 
  
        if (n_pages > ARRAY_SIZE(stack_pages)) {
                /* Too big for stack -- allocate temporary array instead */
 -              pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
 +              pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_TEMPORARY);
                if (!pages)
                        return NULL;
        }
        addr = vmap(pages, n_pages, 0, pgprot);
  
        if (pages != stack_pages)
 -              drm_free_large(pages);
 +              kvfree(pages);
  
        return addr;
  }
@@@ -2560,7 -2612,7 +2612,7 @@@ void *i915_gem_object_pin_map(struct dr
        }
        GEM_BUG_ON(!obj->mm.pages);
  
-       ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
+       ptr = page_unpack_bits(obj->mm.mapping, &has_type);
        if (ptr && has_type != type) {
                if (pinned) {
                        ret = -EBUSY;
                        goto err_unpin;
                }
  
-               obj->mm.mapping = ptr_pack_bits(ptr, type);
+               obj->mm.mapping = page_pack_bits(ptr, type);
        }
  
  out_unlock:
@@@ -2967,12 -3019,14 +3019,14 @@@ static void engine_set_wedged(struct in
         */
  
        if (i915.enable_execlists) {
+               struct execlist_port *port = engine->execlist_port;
                unsigned long flags;
+               unsigned int n;
  
                spin_lock_irqsave(&engine->timeline->lock, flags);
  
-               i915_gem_request_put(engine->execlist_port[0].request);
-               i915_gem_request_put(engine->execlist_port[1].request);
+               for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
+                       i915_gem_request_put(port_request(&port[n]));
                memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
                engine->execlist_queue = RB_ROOT;
                engine->execlist_first = NULL;
@@@ -3101,8 -3155,6 +3155,6 @@@ i915_gem_idle_work_handler(struct work_
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv), gt.idle_work.work);
        struct drm_device *dev = &dev_priv->drm;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
        bool rearm_hangcheck;
  
        if (!READ_ONCE(dev_priv->gt.awake))
        if (wait_for(intel_engines_are_idle(dev_priv), 10))
                DRM_ERROR("Timeout waiting for engines to idle\n");
  
-       for_each_engine(engine, dev_priv, id) {
-               intel_engine_disarm_breadcrumbs(engine);
-               i915_gem_batch_pool_fini(&engine->batch_pool);
-       }
+       intel_engines_mark_idle(dev_priv);
+       i915_gem_timelines_mark_idle(dev_priv);
  
        GEM_BUG_ON(!dev_priv->gt.awake);
        dev_priv->gt.awake = false;
@@@ -3320,56 -3370,6 +3370,6 @@@ int i915_gem_wait_for_idle(struct drm_i
        return ret;
  }
  
- /** Flushes the GTT write domain for the object if it's dirty. */
- static void
- i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
- {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
-               return;
-       /* No actual flushing is required for the GTT write domain.  Writes
-        * to it "immediately" go to main memory as far as we know, so there's
-        * no chipset flush.  It also doesn't land in render cache.
-        *
-        * However, we do have to enforce the order so that all writes through
-        * the GTT land before any writes to the device, such as updates to
-        * the GATT itself.
-        *
-        * We also have to wait a bit for the writes to land from the GTT.
-        * An uncached read (i.e. mmio) seems to be ideal for the round-trip
-        * timing. This issue has only been observed when switching quickly
-        * between GTT writes and CPU reads from inside the kernel on recent hw,
-        * and it appears to only affect discrete GTT blocks (i.e. on LLC
-        * system agents we cannot reproduce this behaviour).
-        */
-       wmb();
-       if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
-               if (intel_runtime_pm_get_if_in_use(dev_priv)) {
-                       spin_lock_irq(&dev_priv->uncore.lock);
-                       POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
-                       spin_unlock_irq(&dev_priv->uncore.lock);
-                       intel_runtime_pm_put(dev_priv);
-               }
-       }
-       intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
-       obj->base.write_domain = 0;
- }
- /** Flushes the CPU write domain for the object if it's dirty. */
- static void
- i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
- {
-       if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
-               return;
-       i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
-       obj->base.write_domain = 0;
- }
  static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
  {
        if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty)
@@@ -3389,6 -3389,69 +3389,69 @@@ void i915_gem_object_flush_if_display(s
        mutex_unlock(&obj->base.dev->struct_mutex);
  }
  
+ /**
+  * Moves a single object to the WC read, and possibly write domain.
+  * @obj: object to act on
+  * @write: ask for write access or read only
+  *
+  * This function returns when the move is complete, including waiting on
+  * flushes to occur.
+  */
+ int
+ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
+ {
+       int ret;
+       lockdep_assert_held(&obj->base.dev->struct_mutex);
+       ret = i915_gem_object_wait(obj,
+                                  I915_WAIT_INTERRUPTIBLE |
+                                  I915_WAIT_LOCKED |
+                                  (write ? I915_WAIT_ALL : 0),
+                                  MAX_SCHEDULE_TIMEOUT,
+                                  NULL);
+       if (ret)
+               return ret;
+       if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
+               return 0;
+       /* Flush and acquire obj->pages so that we are coherent through
+        * direct access in memory with previous cached writes through
+        * shmemfs and that our cache domain tracking remains valid.
+        * For example, if the obj->filp was moved to swap without us
+        * being notified and releasing the pages, we would mistakenly
+        * continue to assume that the obj remained out of the CPU cached
+        * domain.
+        */
+       ret = i915_gem_object_pin_pages(obj);
+       if (ret)
+               return ret;
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
+       /* Serialise direct access to this object with the barriers for
+        * coherent writes from the GPU, by effectively invalidating the
+        * WC domain upon first access.
+        */
+       if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
+               mb();
+       /* It should now be out of any other write domains, and we can update
+        * the domain values for our changes.
+        */
+       GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
+       obj->base.read_domains |= I915_GEM_DOMAIN_WC;
+       if (write) {
+               obj->base.read_domains = I915_GEM_DOMAIN_WC;
+               obj->base.write_domain = I915_GEM_DOMAIN_WC;
+               obj->mm.dirty = true;
+       }
+       i915_gem_object_unpin_pages(obj);
+       return 0;
+ }
  /**
   * Moves a single object to the GTT read, and possibly write domain.
   * @obj: object to act on
@@@ -3428,7 -3491,7 +3491,7 @@@ i915_gem_object_set_to_gtt_domain(struc
        if (ret)
                return ret;
  
-       i915_gem_object_flush_cpu_write_domain(obj);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
  
        /* Serialise direct access to this object with the barriers for
         * coherent writes from the GPU, by effectively invalidating the
@@@ -3802,7 -3865,7 +3865,7 @@@ i915_gem_object_set_to_cpu_domain(struc
        if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
                return 0;
  
-       i915_gem_object_flush_gtt_write_domain(obj);
+       flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
  
        /* Flush the CPU cache if it's still invalid. */
        if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
@@@ -3996,7 -4059,7 +4059,7 @@@ __busy_set_if_active(const struct dma_f
        if (i915_gem_request_completed(rq))
                return 0;
  
-       return flag(rq->engine->exec_id);
+       return flag(rq->engine->uabi_id);
  }
  
  static __always_inline unsigned int
@@@ -4195,7 -4258,7 +4258,7 @@@ i915_gem_object_create(struct drm_i915_
         * catch if we ever need to fix it. In the meantime, if you do spot
         * such a local variable, please consider fixing!
         */
-       if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
+       if (size >> PAGE_SHIFT > INT_MAX)
                return ERR_PTR(-E2BIG);
  
        if (overflows_type(size, obj->base.size))
@@@ -4302,6 -4365,8 +4365,8 @@@ static void __i915_gem_free_objects(str
        intel_runtime_pm_put(i915);
        mutex_unlock(&i915->drm.struct_mutex);
  
+       cond_resched();
        llist_for_each_entry_safe(obj, on, freed, freed) {
                GEM_BUG_ON(obj->bind_count);
                GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
@@@ -4349,8 -4414,11 +4414,11 @@@ static void __i915_gem_free_work(struc
         * unbound now.
         */
  
-       while ((freed = llist_del_all(&i915->mm.free_list)))
+       while ((freed = llist_del_all(&i915->mm.free_list))) {
                __i915_gem_free_objects(i915, freed);
+               if (need_resched())
+                       break;
+       }
  }
  
  static void __i915_gem_free_object_rcu(struct rcu_head *head)
@@@ -4415,10 -4483,9 +4483,9 @@@ void i915_gem_sanitize(struct drm_i915_
         * try to take over. The only way to remove the earlier state
         * is by resetting. However, resetting on earlier gen is tricky as
         * it may impact the display and we are uncertain about the stability
-        * of the reset, so we only reset recent machines with logical
-        * context support (that must be reset to remove any stray contexts).
+        * of the reset, so this could be applied to even earlier gen.
         */
-       if (HAS_HW_CONTEXTS(i915)) {
+       if (INTEL_GEN(i915) >= 5) {
                int reset = intel_gpu_reset(i915, ALL_ENGINES);
                WARN_ON(reset && reset != -ENODEV);
        }
@@@ -4661,11 -4728,9 +4728,9 @@@ bool intel_sanitize_semaphores(struct d
        if (value >= 0)
                return value;
  
- #ifdef CONFIG_INTEL_IOMMU
        /* Enable semaphores on SNB when IO remapping is off */
-       if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
+       if (IS_GEN6(dev_priv) && intel_vtd_active())
                return false;
- #endif
  
        return true;
  }
@@@ -4676,7 -4741,7 +4741,7 @@@ int i915_gem_init(struct drm_i915_priva
  
        mutex_lock(&dev_priv->drm.struct_mutex);
  
-       i915_gem_clflush_init(dev_priv);
+       dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
  
        if (!i915.enable_execlists) {
                dev_priv->gt.resume = intel_legacy_submission_resume;
@@@ -4789,7 -4854,7 +4854,7 @@@ i915_gem_load_init(struct drm_i915_priv
        dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
                                        SLAB_HWCACHE_ALIGN |
                                        SLAB_RECLAIM_ACCOUNT |
 -                                      SLAB_DESTROY_BY_RCU);
 +                                      SLAB_TYPESAFE_BY_RCU);
        if (!dev_priv->requests)
                goto err_vmas;
  
        if (!dev_priv->dependencies)
                goto err_requests;
  
+       dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
+       if (!dev_priv->priorities)
+               goto err_dependencies;
        mutex_lock(&dev_priv->drm.struct_mutex);
        INIT_LIST_HEAD(&dev_priv->gt.timelines);
        err = i915_gem_timeline_init__global(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        if (err)
-               goto err_dependencies;
+               goto err_priorities;
  
        INIT_LIST_HEAD(&dev_priv->context_list);
        INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
  
        init_waitqueue_head(&dev_priv->pending_flip_queue);
  
-       dev_priv->mm.interruptible = true;
        atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
  
        spin_lock_init(&dev_priv->fb_tracking.lock);
  
        return 0;
  
+ err_priorities:
+       kmem_cache_destroy(dev_priv->priorities);
  err_dependencies:
        kmem_cache_destroy(dev_priv->dependencies);
  err_requests:
@@@ -4853,6 -4922,7 +4922,7 @@@ void i915_gem_load_cleanup(struct drm_i
        WARN_ON(!list_empty(&dev_priv->gt.timelines));
        mutex_unlock(&dev_priv->drm.struct_mutex);
  
+       kmem_cache_destroy(dev_priv->priorities);
        kmem_cache_destroy(dev_priv->dependencies);
        kmem_cache_destroy(dev_priv->requests);
        kmem_cache_destroy(dev_priv->vmas);
  
  int i915_gem_freeze(struct drm_i915_private *dev_priv)
  {
-       mutex_lock(&dev_priv->drm.struct_mutex);
+       /* Discard all purgeable objects, let userspace recover those as
+        * required after resuming.
+        */
        i915_gem_shrink_all(dev_priv);
-       mutex_unlock(&dev_priv->drm.struct_mutex);
  
        return 0;
  }
@@@ -4891,12 -4962,13 +4962,13 @@@ int i915_gem_freeze_late(struct drm_i91
         * we update that state just before writing out the image.
         *
         * To try and reduce the hibernation image, we manually shrink
-        * the objects as well.
+        * the objects as well, see i915_gem_freeze()
         */
  
-       mutex_lock(&dev_priv->drm.struct_mutex);
        i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
+       i915_gem_drain_freed_objects(dev_priv);
  
+       mutex_lock(&dev_priv->drm.struct_mutex);
        for (p = phases; *p; p++) {
                list_for_each_entry(obj, *p, global_link) {
                        obj->base.read_domains = I915_GEM_DOMAIN_CPU;
index 4ee2dc38b7c94af5086778c904235636e1de067e,af1965774e7b7f4408c75e39081097e60b483d3a..04211c970b9f23d9eb7bd5d16d236f831c5f6a3b
@@@ -1019,11 -1019,11 +1019,11 @@@ i915_gem_execbuffer_relocate_slow(struc
        for (i = 0; i < count; i++)
                total += exec[i].relocation_count;
  
 -      reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
 -      reloc = drm_malloc_ab(total, sizeof(*reloc));
 +      reloc_offset = kvmalloc_array(count, sizeof(*reloc_offset), GFP_KERNEL);
 +      reloc = kvmalloc_array(total, sizeof(*reloc), GFP_KERNEL);
        if (reloc == NULL || reloc_offset == NULL) {
 -              drm_free_large(reloc);
 -              drm_free_large(reloc_offset);
 +              kvfree(reloc);
 +              kvfree(reloc_offset);
                mutex_lock(&dev->struct_mutex);
                return -ENOMEM;
        }
         */
  
  err:
 -      drm_free_large(reloc);
 -      drm_free_large(reloc_offset);
 +      kvfree(reloc);
 +      kvfree(reloc_offset);
        return ret;
  }
  
@@@ -1114,6 -1114,18 +1114,18 @@@ i915_gem_execbuffer_move_to_gpu(struct 
        list_for_each_entry(vma, vmas, exec_list) {
                struct drm_i915_gem_object *obj = vma->obj;
  
+               if (vma->exec_entry->flags & EXEC_OBJECT_CAPTURE) {
+                       struct i915_gem_capture_list *capture;
+                       capture = kmalloc(sizeof(*capture), GFP_KERNEL);
+                       if (unlikely(!capture))
+                               return -ENOMEM;
+                       capture->next = req->capture_list;
+                       capture->vma = vma;
+                       req->capture_list = capture;
+               }
                if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
                        continue;
  
@@@ -1859,13 -1871,13 +1871,13 @@@ i915_gem_execbuffer(struct drm_device *
        }
  
        /* Copy in the exec list from userland */
 -      exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
 -      exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
 +      exec_list = kvmalloc_array(sizeof(*exec_list), args->buffer_count, GFP_KERNEL);
 +      exec2_list = kvmalloc_array(sizeof(*exec2_list), args->buffer_count, GFP_KERNEL);
        if (exec_list == NULL || exec2_list == NULL) {
                DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
                          args->buffer_count);
 -              drm_free_large(exec_list);
 -              drm_free_large(exec2_list);
 +              kvfree(exec_list);
 +              kvfree(exec2_list);
                return -ENOMEM;
        }
        ret = copy_from_user(exec_list,
        if (ret != 0) {
                DRM_DEBUG("copy %d exec entries failed %d\n",
                          args->buffer_count, ret);
 -              drm_free_large(exec_list);
 -              drm_free_large(exec2_list);
 +              kvfree(exec_list);
 +              kvfree(exec2_list);
                return -EFAULT;
        }
  
                }
        }
  
 -      drm_free_large(exec_list);
 -      drm_free_large(exec2_list);
 +      kvfree(exec_list);
 +      kvfree(exec2_list);
        return ret;
  }
  
@@@ -1943,7 -1955,7 +1955,7 @@@ i915_gem_execbuffer2(struct drm_device 
                return -EINVAL;
        }
  
 -      exec2_list = drm_malloc_gfp(args->buffer_count,
 +      exec2_list = kvmalloc_array(args->buffer_count,
                                    sizeof(*exec2_list),
                                    GFP_TEMPORARY);
        if (exec2_list == NULL) {
        if (ret != 0) {
                DRM_DEBUG("copy %d exec entries failed %d\n",
                          args->buffer_count, ret);
 -              drm_free_large(exec2_list);
 +              kvfree(exec2_list);
                return -EFAULT;
        }
  
                }
        }
  
 -      drm_free_large(exec2_list);
 +      kvfree(exec2_list);
        return ret;
  }
index 7e3193aa7da11105a25b8ae00f45f1efea1dc8b3,f52068aee24736f4c0040e5f1f8ae97484812f9b..0c1008a2bbda88f5b723eeb01cb2eafe748effa6
@@@ -31,8 -31,6 +31,8 @@@
  #include <linux/seq_file.h>
  #include <linux/stop_machine.h>
  
 +#include <asm/set_memory.h>
 +
  #include <drm/drmP.h>
  #include <drm/i915_drm.h>
  
@@@ -168,13 -166,11 +168,11 @@@ int intel_sanitize_enable_ppgtt(struct 
        if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
                return 3;
  
- #ifdef CONFIG_INTEL_IOMMU
        /* Disable ppgtt on SNB if VT-d is on. */
-       if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
+       if (IS_GEN6(dev_priv) && intel_vtd_active()) {
                DRM_INFO("Disabling PPGTT because VT-d is on\n");
                return 0;
        }
- #endif
  
        /* Early VLV doesn't have this */
        if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
@@@ -195,9 -191,12 +193,12 @@@ static int ppgtt_bind_vma(struct i915_v
        u32 pte_flags;
        int ret;
  
-       ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size);
-       if (ret)
-               return ret;
+       if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
+               ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
+                                                vma->size);
+               if (ret)
+                       return ret;
+       }
  
        vma->pages = vma->obj->mm.pages;
  
@@@ -1989,14 -1988,10 +1990,10 @@@ void i915_ppgtt_release(struct kref *kr
   */
  static bool needs_idle_maps(struct drm_i915_private *dev_priv)
  {
- #ifdef CONFIG_INTEL_IOMMU
        /* Query intel_iommu to see if we need the workaround. Presumably that
         * was loaded first.
         */
-       if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
-               return true;
- #endif
-       return false;
+       return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
  }
  
  void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
@@@ -2188,6 -2183,101 +2185,101 @@@ static void gen8_ggtt_clear_range(struc
                gen8_set_pte(&gtt_base[i], scratch_pte);
  }
  
+ static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
+ {
+       struct drm_i915_private *dev_priv = vm->i915;
+       /*
+        * Make sure the internal GAM fifo has been cleared of all GTT
+        * writes before exiting stop_machine(). This guarantees that
+        * any aperture accesses waiting to start in another process
+        * cannot back up behind the GTT writes causing a hang.
+        * The register can be any arbitrary GAM register.
+        */
+       POSTING_READ(GFX_FLSH_CNTL_GEN6);
+ }
+ struct insert_page {
+       struct i915_address_space *vm;
+       dma_addr_t addr;
+       u64 offset;
+       enum i915_cache_level level;
+ };
+ static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
+ {
+       struct insert_page *arg = _arg;
+       gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+       return 0;
+ }
+ static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
+                                         dma_addr_t addr,
+                                         u64 offset,
+                                         enum i915_cache_level level,
+                                         u32 unused)
+ {
+       struct insert_page arg = { vm, addr, offset, level };
+       stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
+ }
+ struct insert_entries {
+       struct i915_address_space *vm;
+       struct sg_table *st;
+       u64 start;
+       enum i915_cache_level level;
+ };
+ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
+ {
+       struct insert_entries *arg = _arg;
+       gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0);
+       bxt_vtd_ggtt_wa(arg->vm);
+       return 0;
+ }
+ static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
+                                            struct sg_table *st,
+                                            u64 start,
+                                            enum i915_cache_level level,
+                                            u32 unused)
+ {
+       struct insert_entries arg = { vm, st, start, level };
+       stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
+ }
+ struct clear_range {
+       struct i915_address_space *vm;
+       u64 start;
+       u64 length;
+ };
+ static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
+ {
+       struct clear_range *arg = _arg;
+       gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
+       bxt_vtd_ggtt_wa(arg->vm);
+       return 0;
+ }
+ static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
+                                         u64 start,
+                                         u64 length)
+ {
+       struct clear_range arg = { vm, start, length };
+       stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
+ }
  static void gen6_ggtt_clear_range(struct i915_address_space *vm,
                                  u64 start, u64 length)
  {
@@@ -2306,10 -2396,11 +2398,11 @@@ static int aliasing_gtt_bind_vma(struc
        if (flags & I915_VMA_LOCAL_BIND) {
                struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
  
-               if (appgtt->base.allocate_va_range) {
+               if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
+                   appgtt->base.allocate_va_range) {
                        ret = appgtt->base.allocate_va_range(&appgtt->base,
                                                             vma->node.start,
-                                                            vma->node.size);
+                                                            vma->size);
                        if (ret)
                                goto err_pages;
                }
@@@ -2579,14 -2670,14 +2672,14 @@@ static size_t gen6_get_stolen_size(u16 
  {
        snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
        snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
-       return snb_gmch_ctl << 25; /* 32 MB units */
+       return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
  }
  
  static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
  {
        bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
        bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
-       return bdw_gmch_ctl << 25; /* 32 MB units */
+       return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
  }
  
  static size_t chv_get_stolen_size(u16 gmch_ctrl)
         * 0x17 to 0x1d: 4MB increments start at 36MB
         */
        if (gmch_ctrl < 0x11)
-               return gmch_ctrl << 25;
+               return (size_t)gmch_ctrl << 25;
        else if (gmch_ctrl < 0x17)
-               return (gmch_ctrl - 0x11 + 2) << 22;
+               return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
        else
-               return (gmch_ctrl - 0x17 + 9) << 22;
+               return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
  }
  
  static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
        gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
  
        if (gen9_gmch_ctl < 0xf0)
-               return gen9_gmch_ctl << 25; /* 32 MB units */
+               return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
        else
                /* 4MB increments starting at 0xf0 for 4MB */
-               return (gen9_gmch_ctl - 0xf0 + 1) << 22;
+               return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
  }
  
  static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
@@@ -2743,13 -2834,17 +2836,17 @@@ static int gen8_gmch_probe(struct i915_
        struct pci_dev *pdev = dev_priv->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
+       int err;
  
        /* TODO: We're not aware of mappable constraints on gen8 yet */
        ggtt->mappable_base = pci_resource_start(pdev, 2);
        ggtt->mappable_end = pci_resource_len(pdev, 2);
  
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
+       if (!err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
+       if (err)
+               DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
  
        pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  
  
        ggtt->base.insert_entries = gen8_ggtt_insert_entries;
  
+       /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
+       if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
+               ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
+               ggtt->base.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
+               if (ggtt->base.clear_range != nop_clear_range)
+                       ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+       }
        ggtt->invalidate = gen6_ggtt_invalidate;
  
        return ggtt_probe_common(ggtt, size);
@@@ -2792,6 -2895,7 +2897,7 @@@ static int gen6_gmch_probe(struct i915_
        struct pci_dev *pdev = dev_priv->drm.pdev;
        unsigned int size;
        u16 snb_gmch_ctl;
+       int err;
  
        ggtt->mappable_base = pci_resource_start(pdev, 2);
        ggtt->mappable_end = pci_resource_len(pdev, 2);
                return -ENXIO;
        }
  
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
-               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+       err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+       if (!err)
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+       if (err)
+               DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
        pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
  
        ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
@@@ -2924,10 -3031,8 +3033,8 @@@ int i915_ggtt_probe_hw(struct drm_i915_
                 ggtt->base.total >> 20);
        DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
        DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
- #ifdef CONFIG_INTEL_IOMMU
-       if (intel_iommu_gfx_mapped)
+       if (intel_vtd_active())
                DRM_INFO("VT-d active for gfx access\n");
- #endif
  
        return 0;
  }
@@@ -3102,7 -3207,7 +3209,7 @@@ intel_rotate_pages(struct intel_rotatio
        int ret = -ENOMEM;
  
        /* Allocate a temporary list of source pages for random access. */
 -      page_addr_list = drm_malloc_gfp(n_pages,
 +      page_addr_list = kvmalloc_array(n_pages,
                                        sizeof(dma_addr_t),
                                        GFP_TEMPORARY);
        if (!page_addr_list)
        DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
                      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
  
 -      drm_free_large(page_addr_list);
 +      kvfree(page_addr_list);
  
        return st;
  
  err_sg_alloc:
        kfree(st);
  err_st_alloc:
 -      drm_free_large(page_addr_list);
 +      kvfree(page_addr_list);
  
        DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
                      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
index 129c58bb4805509ee708830458e766cfaa24237e,8c508bd9088eab5dfd1cab9356c2d1822c664dbd..7b7c84369d782e7be6020102d3687ff0093fe0da
@@@ -67,12 -67,18 +67,18 @@@ struct i915_dependency 
  struct i915_priotree {
        struct list_head signalers_list; /* those before us, we depend upon */
        struct list_head waiters_list; /* those after us, they depend upon us */
-       struct rb_node node;
+       struct list_head link;
        int priority;
  #define I915_PRIORITY_MAX 1024
+ #define I915_PRIORITY_NORMAL 0
  #define I915_PRIORITY_MIN (-I915_PRIORITY_MAX)
  };
  
+ struct i915_gem_capture_list {
+       struct i915_gem_capture_list *next;
+       struct i915_vma *vma;
+ };
  /**
   * Request queue structure.
   *
@@@ -167,6 -173,12 +173,12 @@@ struct drm_i915_gem_request 
         * error state dump only).
         */
        struct i915_vma *batch;
+       /** Additional buffers requested by userspace to be captured upon
+        * a GPU hang. The vma/obj on this list are protected by their
+        * active reference - all objects on this list must also be
+        * on the active_list (of their final request).
+        */
+       struct i915_gem_capture_list *capture_list;
        struct list_head active_list;
  
        /** Time at which this request was emitted, in jiffies. */
@@@ -521,7 -533,7 +533,7 @@@ static inline struct drm_i915_gem_reque
  __i915_gem_active_get_rcu(const struct i915_gem_active *active)
  {
        /* Performing a lockless retrieval of the active request is super
 -       * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
 +       * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
         * slab of request objects will not be freed whilst we hold the
         * RCU read lock. It does not guarantee that the request itself
         * will not be freed and then *reused*. Viz,
index 04493ef1d2f7a5c4108c697cfb4015b7e9c4ce33,d63a2ba3bc4faf90ddde0fdb3c85aea6b640a2a7..7b7f55a28eec2038bcb94cb25353d8f0eaed386f
@@@ -720,7 -720,9 +720,7 @@@ static u32 i915_get_vblank_counter(stru
        struct drm_i915_private *dev_priv = to_i915(dev);
        i915_reg_t high_frame, low_frame;
        u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
 -      struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
 -                                                              pipe);
 -      const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
 +      const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode;
        unsigned long irqflags;
  
        htotal = mode->crtc_htotal;
@@@ -777,17 -779,13 +777,17 @@@ static int __intel_get_crtc_scanline(st
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
 -      const struct drm_display_mode *mode = &crtc->base.hwmode;
 +      const struct drm_display_mode *mode;
 +      struct drm_vblank_crtc *vblank;
        enum pipe pipe = crtc->pipe;
        int position, vtotal;
  
        if (!crtc->active)
                return -1;
  
 +      vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
 +      mode = &vblank->hwmode;
 +
        vtotal = mode->crtc_vtotal;
        if (mode->flags & DRM_MODE_FLAG_INTERLACE)
                vtotal /= 2;
        return (position + crtc->scanline_offset) % vtotal;
  }
  
 -static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 -                                  unsigned int flags, int *vpos, int *hpos,
 -                                  ktime_t *stime, ktime_t *etime,
 -                                  const struct drm_display_mode *mode)
 +static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
 +                                   bool in_vblank_irq, int *vpos, int *hpos,
 +                                   ktime_t *stime, ktime_t *etime,
 +                                   const struct drm_display_mode *mode)
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
        int position;
        int vbl_start, vbl_end, hsync_start, htotal, vtotal;
        bool in_vbl = true;
 -      int ret = 0;
        unsigned long irqflags;
  
        if (WARN_ON(!mode->crtc_clock)) {
                DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
                                 "pipe %c\n", pipe_name(pipe));
 -              return 0;
 +              return false;
        }
  
        htotal = mode->crtc_htotal;
                vtotal /= 2;
        }
  
 -      ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
 -
        /*
         * Lock uncore.lock, as we will do multiple timing critical raw
         * register reads, potentially with preemption disabled, so the
                *hpos = position - (*vpos * htotal);
        }
  
 -      /* In vblank? */
 -      if (in_vbl)
 -              ret |= DRM_SCANOUTPOS_IN_VBLANK;
 -
 -      return ret;
 +      return true;
  }
  
  int intel_get_crtc_scanline(struct intel_crtc *crtc)
        return position;
  }
  
 -static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
 -                            int *max_error,
 -                            struct timeval *vblank_time,
 -                            unsigned flags)
 -{
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -      struct intel_crtc *crtc;
 -
 -      if (pipe >= INTEL_INFO(dev_priv)->num_pipes) {
 -              DRM_ERROR("Invalid crtc %u\n", pipe);
 -              return -EINVAL;
 -      }
 -
 -      /* Get drm_crtc to timestamp: */
 -      crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
 -      if (crtc == NULL) {
 -              DRM_ERROR("Invalid crtc %u\n", pipe);
 -              return -EINVAL;
 -      }
 -
 -      if (!crtc->base.hwmode.crtc_clock) {
 -              DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
 -              return -EBUSY;
 -      }
 -
 -      /* Helper routine in DRM core does all the work: */
 -      return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
 -                                                   vblank_time, flags,
 -                                                   &crtc->base.hwmode);
 -}
 -
  static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
  {
        u32 busy_up, busy_down, max_avg, min_avg;
@@@ -1200,7 -1236,7 +1200,7 @@@ out
  static void ivybridge_parity_work(struct work_struct *work)
  {
        struct drm_i915_private *dev_priv =
-               container_of(work, struct drm_i915_private, l3_parity.error_work);
+               container_of(work, typeof(*dev_priv), l3_parity.error_work);
        u32 error_status, row, bank, subbank;
        char *parity_event[6];
        uint32_t misccpctl;
@@@ -1317,14 -1353,16 +1317,16 @@@ static void snb_gt_irq_handler(struct d
                ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
  }
  
- static __always_inline void
+ static void
  gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
  {
        bool tasklet = false;
  
        if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
-               set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
-               tasklet = true;
+               if (port_count(&engine->execlist_port[0])) {
+                       __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+                       tasklet = true;
+               }
        }
  
        if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
@@@ -2917,7 -2955,6 +2919,6 @@@ static void vlv_display_irq_postinstall
        u32 pipestat_mask;
        u32 enable_mask;
        enum pipe pipe;
-       u32 val;
  
        pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
                        PIPE_CRC_DONE_INTERRUPT_STATUS;
  
        enable_mask = I915_DISPLAY_PORT_INTERRUPT |
                I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
-               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
+               I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
+               I915_LPE_PIPE_A_INTERRUPT |
+               I915_LPE_PIPE_B_INTERRUPT;
        if (IS_CHERRYVIEW(dev_priv))
-               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
+               enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
+                       I915_LPE_PIPE_C_INTERRUPT;
  
        WARN_ON(dev_priv->irq_mask != ~0);
  
-       val = (I915_LPE_PIPE_A_INTERRUPT |
-               I915_LPE_PIPE_B_INTERRUPT |
-               I915_LPE_PIPE_C_INTERRUPT);
-       enable_mask |= val;
        dev_priv->irq_mask = ~enable_mask;
  
        GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
@@@ -4197,11 -4232,15 +4196,15 @@@ static void i965_irq_uninstall(struct d
  void intel_irq_init(struct drm_i915_private *dev_priv)
  {
        struct drm_device *dev = &dev_priv->drm;
+       int i;
  
        intel_hpd_init_work(dev_priv);
  
        INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
        INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
+       for (i = 0; i < MAX_L3_SLICES; ++i)
+               dev_priv->l3_parity.remap_info[i] = NULL;
  
        if (HAS_GUC_SCHED(dev_priv))
                dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT;
  
        dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
  
 -      dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
 +      dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
        dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
  
        if (IS_CHERRYVIEW(dev_priv)) {
        }
  }
  
+ /**
+  * intel_irq_fini - deinitializes IRQ support
+  * @i915: i915 device instance
+  *
+  * This function deinitializes all the IRQ support.
+  */
+ void intel_irq_fini(struct drm_i915_private *i915)
+ {
+       int i;
+       for (i = 0; i < MAX_L3_SLICES; ++i)
+               kfree(i915->l3_parity.remap_info[i]);
+ }
  /**
   * intel_irq_install - enables the hardware interrupt
   * @dev_priv: i915 device instance
index a40c82c654506a58f4205bd6c0ef5490faf2baeb,4a0ed0278ae99702228e602cd642d20975232662..4325cb0a04f5db5e3cdf37e70330283309d93d06
@@@ -55,7 -55,7 +55,7 @@@ intel_create_plane_state(struct drm_pla
                return NULL;
  
        state->base.plane = plane;
 -      state->base.rotation = DRM_ROTATE_0;
 +      state->base.rotation = DRM_MODE_ROTATE_0;
        state->ckey.flags = I915_SET_COLORKEY_NONE;
  
        return state;
@@@ -102,23 -102,7 +102,7 @@@ voi
  intel_plane_destroy_state(struct drm_plane *plane,
                          struct drm_plane_state *state)
  {
-       struct i915_vma *vma;
-       vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
-       /*
-        * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
-        * We currently don't clear all planes during driver unload, so we have
-        * to be able to unpin vma here for now.
-        *
-        * Normally this can only happen during unload when kmscon is disabled
-        * and userspace doesn't attempt to set a framebuffer at all.
-        */
-       if (vma) {
-               mutex_lock(&plane->dev->struct_mutex);
-               intel_unpin_fb_vma(vma);
-               mutex_unlock(&plane->dev->struct_mutex);
-       }
+       WARN_ON(to_intel_plane_state(state)->vma);
  
        drm_atomic_helper_plane_destroy_state(plane, state);
  }
@@@ -178,14 -162,14 +162,14 @@@ int intel_plane_atomic_check_with_state
  
        /* CHV ignores the mirror bit when the rotate bit is set :( */
        if (IS_CHERRYVIEW(dev_priv) &&
 -          state->rotation & DRM_ROTATE_180 &&
 -          state->rotation & DRM_REFLECT_X) {
 +          state->rotation & DRM_MODE_ROTATE_180 &&
 +          state->rotation & DRM_MODE_REFLECT_X) {
                DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
                return -EINVAL;
        }
  
        intel_state->base.visible = false;
-       ret = intel_plane->check_plane(plane, crtc_state, intel_state);
+       ret = intel_plane->check_plane(intel_plane, crtc_state, intel_state);
        if (ret)
                return ret;
  
@@@ -235,14 -219,14 +219,14 @@@ static void intel_plane_atomic_update(s
                trace_intel_update_plane(plane,
                                         to_intel_crtc(crtc));
  
-               intel_plane->update_plane(plane,
+               intel_plane->update_plane(intel_plane,
                                          to_intel_crtc_state(crtc->state),
                                          intel_state);
        } else {
                trace_intel_disable_plane(plane,
                                          to_intel_crtc(crtc));
  
-               intel_plane->disable_plane(plane, crtc);
+               intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
        }
  }
  
index 6a037b856d9672310a12f8652d4ed608fd93d59b,55c2c149ad0b11dde3857fe8e45a6dd70d45e0d7..7fa21df5bcd78334ff507a305b5325d81469d91d
@@@ -1277,7 -1277,7 +1277,7 @@@ static void assert_sprites_disabled(str
                I915_STATE_WARN(val & SPRITE_ENABLE,
                     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
                     plane_name(pipe), pipe_name(pipe));
-       } else if (INTEL_GEN(dev_priv) >= 5) {
+       } else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
                u32 val = I915_READ(DVSCNTR(pipe));
                I915_STATE_WARN(val & DVS_ENABLE,
                     "sprite %c assertion failure, should be off on pipe %c but is still active\n",
@@@ -2084,6 -2084,18 +2084,18 @@@ intel_fill_fb_ggtt_view(struct i915_ggt
        }
  }
  
+ static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
+ {
+       if (IS_I830(dev_priv))
+               return 16 * 1024;
+       else if (IS_I85X(dev_priv))
+               return 256;
+       else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
+               return 32;
+       else
+               return 4 * 1024;
+ }
  static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
  {
        if (INTEL_INFO(dev_priv)->gen >= 9)
@@@ -2386,11 -2398,17 +2398,17 @@@ u32 intel_compute_tile_offset(int *x, i
                              const struct intel_plane_state *state,
                              int plane)
  {
-       const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
+       struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
+       struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
        const struct drm_framebuffer *fb = state->base.fb;
        unsigned int rotation = state->base.rotation;
        int pitch = intel_fb_pitch(fb, plane, rotation);
-       u32 alignment = intel_surf_alignment(fb, plane);
+       u32 alignment;
+       if (intel_plane->id == PLANE_CURSOR)
+               alignment = intel_cursor_alignment(dev_priv);
+       else
+               alignment = intel_surf_alignment(fb, plane);
  
        return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
                                          rotation, alignment);
@@@ -2468,7 -2486,7 +2486,7 @@@ intel_fill_fb_info(struct drm_i915_priv
  
                offset = _intel_compute_tile_offset(dev_priv, &x, &y,
                                                    fb, i, fb->pitches[i],
 -                                                  DRM_ROTATE_0, tile_size);
 +                                                  DRM_MODE_ROTATE_0, tile_size);
                offset /= tile_size;
  
                if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
                        drm_rect_rotate(&r,
                                        rot_info->plane[i].width * tile_width,
                                        rot_info->plane[i].height * tile_height,
 -                                      DRM_ROTATE_270);
 +                                      DRM_MODE_ROTATE_270);
                        x = r.x1;
                        y = r.y1;
  
@@@ -2750,7 -2768,7 +2768,7 @@@ intel_find_initial_plane_obj(struct int
                                false);
        intel_pre_disable_primary_noatomic(&intel_crtc->base);
        trace_intel_disable_plane(primary, intel_crtc);
-       intel_plane->disable_plane(primary, &intel_crtc->base);
+       intel_plane->disable_plane(intel_plane, intel_crtc);
  
        return;
  
@@@ -2939,7 -2957,7 +2957,7 @@@ int skl_check_plane_surface(struct inte
        if (drm_rotation_90_or_270(rotation))
                drm_rect_rotate(&plane_state->base.src,
                                fb->width << 16, fb->height << 16,
 -                              DRM_ROTATE_270);
 +                              DRM_MODE_ROTATE_270);
  
        /*
         * Handle the AUX surface first since
@@@ -2981,10 -2999,8 +2999,8 @@@ static u32 i9xx_plane_ctl(const struct 
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
                dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
  
-       if (INTEL_GEN(dev_priv) < 4) {
-               if (crtc->pipe == PIPE_B)
-                       dspcntr |= DISPPLANE_SEL_PIPE_B;
-       }
+       if (INTEL_GEN(dev_priv) < 4)
+               dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
  
        switch (fb->format->format) {
        case DRM_FORMAT_C8:
            fb->modifier == I915_FORMAT_MOD_X_TILED)
                dspcntr |= DISPPLANE_TILED;
  
 -      if (rotation & DRM_ROTATE_180)
 +      if (rotation & DRM_MODE_ROTATE_180)
                dspcntr |= DISPPLANE_ROTATE_180;
  
 -      if (rotation & DRM_REFLECT_X)
 +      if (rotation & DRM_MODE_REFLECT_X)
                dspcntr |= DISPPLANE_MIRROR;
  
        return dspcntr;
@@@ -3048,10 -3064,10 +3064,10 @@@ int i9xx_check_plane_surface(struct int
                int src_w = drm_rect_width(&plane_state->base.src) >> 16;
                int src_h = drm_rect_height(&plane_state->base.src) >> 16;
  
 -              if (rotation & DRM_ROTATE_180) {
 +              if (rotation & DRM_MODE_ROTATE_180) {
                        src_x += src_w - 1;
                        src_y += src_h - 1;
 -              } else if (rotation & DRM_REFLECT_X) {
 +              } else if (rotation & DRM_MODE_REFLECT_X) {
                        src_x += src_w - 1;
                }
        }
        return 0;
  }
  
- static void i9xx_update_primary_plane(struct drm_plane *primary,
+ static void i9xx_update_primary_plane(struct intel_plane *primary,
                                      const struct intel_crtc_state *crtc_state,
                                      const struct intel_plane_state *plane_state)
  {
-       struct drm_i915_private *dev_priv = to_i915(primary->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_framebuffer *fb = plane_state->base.fb;
-       int plane = intel_crtc->plane;
+       struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum plane plane = primary->plane;
        u32 linear_offset;
        u32 dspcntr = plane_state->ctl;
        i915_reg_t reg = DSPCNTR(plane);
        linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
  
        if (INTEL_GEN(dev_priv) >= 4)
-               intel_crtc->dspaddr_offset = plane_state->main.offset;
+               crtc->dspaddr_offset = plane_state->main.offset;
        else
-               intel_crtc->dspaddr_offset = linear_offset;
+               crtc->dspaddr_offset = linear_offset;
  
-       intel_crtc->adjusted_x = x;
-       intel_crtc->adjusted_y = y;
+       crtc->adjusted_x = x;
+       crtc->adjusted_y = y;
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  
        if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                I915_WRITE_FW(DSPSURF(plane),
                              intel_plane_ggtt_offset(plane_state) +
-                             intel_crtc->dspaddr_offset);
+                             crtc->dspaddr_offset);
                I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
        } else if (INTEL_GEN(dev_priv) >= 4) {
                I915_WRITE_FW(DSPSURF(plane),
                              intel_plane_ggtt_offset(plane_state) +
-                             intel_crtc->dspaddr_offset);
+                             crtc->dspaddr_offset);
                I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
                I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
        } else {
                I915_WRITE_FW(DSPADDR(plane),
                              intel_plane_ggtt_offset(plane_state) +
-                             intel_crtc->dspaddr_offset);
+                             crtc->dspaddr_offset);
        }
        POSTING_READ_FW(reg);
  
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
- static void i9xx_disable_primary_plane(struct drm_plane *primary,
-                                      struct drm_crtc *crtc)
+ static void i9xx_disable_primary_plane(struct intel_plane *primary,
+                                      struct intel_crtc *crtc)
  {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int plane = intel_crtc->plane;
+       struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+       enum plane plane = primary->plane;
        unsigned long irqflags;
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@@ -3271,17 -3285,17 +3285,17 @@@ static u32 skl_plane_ctl_tiling(uint64_
  static u32 skl_plane_ctl_rotation(unsigned int rotation)
  {
        switch (rotation) {
 -      case DRM_ROTATE_0:
 +      case DRM_MODE_ROTATE_0:
                break;
        /*
 -       * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
 +       * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
         * while i915 HW rotation is clockwise, thats why this swapping.
         */
 -      case DRM_ROTATE_90:
 +      case DRM_MODE_ROTATE_90:
                return PLANE_CTL_ROTATE_270;
 -      case DRM_ROTATE_180:
 +      case DRM_MODE_ROTATE_180:
                return PLANE_CTL_ROTATE_180;
 -      case DRM_ROTATE_270:
 +      case DRM_MODE_ROTATE_270:
                return PLANE_CTL_ROTATE_90;
        default:
                MISSING_CASE(rotation);
@@@ -3321,16 -3335,15 +3335,15 @@@ u32 skl_plane_ctl(const struct intel_cr
        return plane_ctl;
  }
  
- static void skylake_update_primary_plane(struct drm_plane *plane,
+ static void skylake_update_primary_plane(struct intel_plane *plane,
                                         const struct intel_crtc_state *crtc_state,
                                         const struct intel_plane_state *plane_state)
  {
-       struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
-       struct drm_framebuffer *fb = plane_state->base.fb;
-       enum plane_id plane_id = to_intel_plane(plane)->id;
-       enum pipe pipe = to_intel_plane(plane)->pipe;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe = plane->pipe;
        u32 plane_ctl = plane_state->ctl;
        unsigned int rotation = plane_state->base.rotation;
        u32 stride = skl_plane_stride(fb, 0, rotation);
        dst_w--;
        dst_h--;
  
-       intel_crtc->dspaddr_offset = surf_addr;
+       crtc->dspaddr_offset = surf_addr;
  
-       intel_crtc->adjusted_x = src_x;
-       intel_crtc->adjusted_y = src_y;
+       crtc->adjusted_x = src_x;
+       crtc->adjusted_y = src_y;
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
- static void skylake_disable_primary_plane(struct drm_plane *primary,
-                                         struct drm_crtc *crtc)
+ static void skylake_disable_primary_plane(struct intel_plane *primary,
+                                         struct intel_crtc *crtc)
  {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       enum plane_id plane_id = to_intel_plane(primary)->id;
-       enum pipe pipe = to_intel_plane(primary)->pipe;
+       struct drm_i915_private *dev_priv = to_i915(primary->base.dev);
+       enum plane_id plane_id = primary->id;
+       enum pipe pipe = primary->pipe;
        unsigned long irqflags;
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@@ -3433,7 -3445,7 +3445,7 @@@ static void intel_update_primary_planes
                        trace_intel_update_plane(&plane->base,
                                                 to_intel_crtc(crtc));
  
-                       plane->update_plane(&plane->base,
+                       plane->update_plane(plane,
                                            to_intel_crtc_state(crtc->state),
                                            plane_state);
                }
@@@ -4671,7 -4683,7 +4683,7 @@@ int skl_update_scaler_crtc(struct intel
        const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
  
        return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
 -              &state->scaler_state.scaler_id, DRM_ROTATE_0,
 +              &state->scaler_state.scaler_id, DRM_MODE_ROTATE_0,
                state->pipe_src_w, state->pipe_src_h,
                adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
  }
@@@ -4861,12 -4873,9 +4873,9 @@@ static void intel_crtc_dpms_overlay_dis
  {
        if (intel_crtc->overlay) {
                struct drm_device *dev = intel_crtc->base.dev;
-               struct drm_i915_private *dev_priv = to_i915(dev);
  
                mutex_lock(&dev->struct_mutex);
-               dev_priv->mm.interruptible = false;
                (void) intel_overlay_switch_off(intel_crtc->overlay);
-               dev_priv->mm.interruptible = true;
                mutex_unlock(&dev->struct_mutex);
        }
  
@@@ -5086,7 -5095,7 +5095,7 @@@ static void intel_crtc_disable_planes(s
        intel_crtc_dpms_overlay_disable(intel_crtc);
  
        drm_for_each_plane_mask(p, dev, plane_mask)
-               to_intel_plane(p)->disable_plane(p, crtc);
+               to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc);
  
        /*
         * FIXME: Once we grow proper nuclear flip support out of this we need
@@@ -5722,6 -5731,8 +5731,8 @@@ static void i9xx_set_pll_dividers(struc
  static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
                             struct drm_atomic_state *old_state)
  {
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
        struct drm_crtc *crtc = pipe_config->base.crtc;
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
  
        intel_color_load_luts(&pipe_config->base);
  
-       intel_update_watermarks(intel_crtc);
+       if (dev_priv->display.initial_watermarks != NULL)
+               dev_priv->display.initial_watermarks(old_intel_state,
+                                                    intel_crtc->config);
+       else
+               intel_update_watermarks(intel_crtc);
        intel_enable_pipe(intel_crtc);
  
        assert_vblank_disabled(crtc);
@@@ -5920,9 -5935,10 +5935,10 @@@ void intel_encoder_destroy(struct drm_e
  
  /* Cross check the actual hw state with our own modeset state tracking (and it's
   * internal consistency). */
- static void intel_connector_verify_state(struct intel_connector *connector)
+ static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
+                                        struct drm_connector_state *conn_state)
  {
-       struct drm_crtc *crtc = connector->base.state->crtc;
+       struct intel_connector *connector = to_intel_connector(conn_state->connector);
  
        DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
                      connector->base.base.id,
  
        if (connector->get_hw_state(connector)) {
                struct intel_encoder *encoder = connector->encoder;
-               struct drm_connector_state *conn_state = connector->base.state;
  
-               I915_STATE_WARN(!crtc,
+               I915_STATE_WARN(!crtc_state,
                         "connector enabled without attached crtc\n");
  
-               if (!crtc)
+               if (!crtc_state)
                        return;
  
-               I915_STATE_WARN(!crtc->state->active,
+               I915_STATE_WARN(!crtc_state->active,
                      "connector is active, but attached crtc isn't\n");
  
                if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
                I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
                        "attached encoder crtc differs from connector crtc\n");
        } else {
-               I915_STATE_WARN(crtc && crtc->state->active,
+               I915_STATE_WARN(crtc_state && crtc_state->active,
                        "attached crtc is active, but connector isn't\n");
-               I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
+               I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
                        "best encoder set without crtc!\n");
        }
  }
@@@ -6372,8 -6387,8 +6387,8 @@@ static void vlv_pllb_recal_opamp(struc
        vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
  
        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
-       reg_val &= 0x8cffffff;
-       reg_val = 0x8c000000;
+       reg_val &= 0x00ffffff;
+       reg_val |= 0x8c000000;
        vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
  
        reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
@@@ -8177,9 -8192,6 +8192,6 @@@ static int ironlake_crtc_compute_clock(
  {
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct dpll reduced_clock;
-       bool has_reduced_clock = false;
-       struct intel_shared_dpll *pll;
        const struct intel_limit *limit;
        int refclk = 120000;
  
                return -EINVAL;
        }
  
-       ironlake_compute_dpll(crtc, crtc_state,
-                             has_reduced_clock ? &reduced_clock : NULL);
+       ironlake_compute_dpll(crtc, crtc_state, NULL);
  
-       pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
-       if (pll == NULL) {
+       if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
                DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
                                 pipe_name(crtc->pipe));
                return -EINVAL;
        }
  
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
-           has_reduced_clock)
-               crtc->lowfreq_avail = true;
        return 0;
  }
  
        return active;
  }
  
 -          plane_state->base.rotation & DRM_ROTATE_180)
+ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
+ {
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       u32 base;
+       if (INTEL_INFO(dev_priv)->cursor_needs_physical)
+               base = obj->phys_handle->busaddr;
+       else
+               base = intel_plane_ggtt_offset(plane_state);
+       base += plane_state->main.offset;
+       /* ILK+ do this automagically */
+       if (HAS_GMCH_DISPLAY(dev_priv) &&
++          plane_state->base.rotation & DRM_MODE_ROTATE_180)
+               base += (plane_state->base.crtc_h *
+                        plane_state->base.crtc_w - 1) * fb->format->cpp[0];
+       return base;
+ }
+ static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
+ {
+       int x = plane_state->base.crtc_x;
+       int y = plane_state->base.crtc_y;
+       u32 pos = 0;
+       if (x < 0) {
+               pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+               x = -x;
+       }
+       pos |= x << CURSOR_X_SHIFT;
+       if (y < 0) {
+               pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+               y = -y;
+       }
+       pos |= y << CURSOR_Y_SHIFT;
+       return pos;
+ }
+ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
+ {
+       const struct drm_mode_config *config =
+               &plane_state->base.plane->dev->mode_config;
+       int width = plane_state->base.crtc_w;
+       int height = plane_state->base.crtc_h;
+       return width > 0 && width <= config->cursor_width &&
+               height > 0 && height <= config->cursor_height;
+ }
+ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
+                             struct intel_plane_state *plane_state)
+ {
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int src_x, src_y;
+       u32 offset;
+       int ret;
+       ret = drm_plane_helper_check_state(&plane_state->base,
+                                          &plane_state->clip,
+                                          DRM_PLANE_HELPER_NO_SCALING,
+                                          DRM_PLANE_HELPER_NO_SCALING,
+                                          true, true);
+       if (ret)
+               return ret;
+       if (!fb)
+               return 0;
+       if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
+               DRM_DEBUG_KMS("cursor cannot be tiled\n");
+               return -EINVAL;
+       }
+       src_x = plane_state->base.src_x >> 16;
+       src_y = plane_state->base.src_y >> 16;
+       intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
+       offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0);
+       if (src_x != 0 || src_y != 0) {
+               DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
+               return -EINVAL;
+       }
+       plane_state->main.offset = offset;
+       return 0;
+ }
  static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
                           const struct intel_plane_state *plane_state)
  {
-       unsigned int width = plane_state->base.crtc_w;
-       unsigned int stride = roundup_pow_of_two(width) * 4;
+       const struct drm_framebuffer *fb = plane_state->base.fb;
  
-       switch (stride) {
-       default:
-               WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
-                         width, stride);
-               stride = 256;
-               /* fallthrough */
+       return CURSOR_ENABLE |
+               CURSOR_GAMMA_ENABLE |
+               CURSOR_FORMAT_ARGB |
+               CURSOR_STRIDE(fb->pitches[0]);
+ }
+ static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
+ {
+       int width = plane_state->base.crtc_w;
+       /*
+        * 845g/865g are only limited by the width of their cursors,
+        * the height is arbitrary up to the precision of the register.
+        */
+       return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
+ }
+ static int i845_check_cursor(struct intel_plane *plane,
+                            struct intel_crtc_state *crtc_state,
+                            struct intel_plane_state *plane_state)
+ {
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       int ret;
+       ret = intel_check_cursor(crtc_state, plane_state);
+       if (ret)
+               return ret;
+       /* if we want to turn off the cursor ignore width and height */
+       if (!fb)
+               return 0;
+       /* Check for which cursor types we support */
+       if (!i845_cursor_size_ok(plane_state)) {
+               DRM_DEBUG("Cursor dimension %dx%d not supported\n",
+                         plane_state->base.crtc_w,
+                         plane_state->base.crtc_h);
+               return -EINVAL;
+       }
+       switch (fb->pitches[0]) {
        case 256:
        case 512:
        case 1024:
        case 2048:
                break;
+       default:
+               DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
+                             fb->pitches[0]);
+               return -EINVAL;
        }
  
-       return CURSOR_ENABLE |
-               CURSOR_GAMMA_ENABLE |
-               CURSOR_FORMAT_ARGB |
-               CURSOR_STRIDE(stride);
+       plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
+       return 0;
  }
  
- static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
+ static void i845_update_cursor(struct intel_plane *plane,
+                              const struct intel_crtc_state *crtc_state,
                               const struct intel_plane_state *plane_state)
  {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       uint32_t cntl = 0, size = 0;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       u32 cntl = 0, base = 0, pos = 0, size = 0;
+       unsigned long irqflags;
  
        if (plane_state && plane_state->base.visible) {
                unsigned int width = plane_state->base.crtc_w;
  
                cntl = plane_state->ctl;
                size = (height << 12) | width;
-       }
  
-       if (intel_crtc->cursor_cntl != 0 &&
-           (intel_crtc->cursor_base != base ||
-            intel_crtc->cursor_size != size ||
-            intel_crtc->cursor_cntl != cntl)) {
-               /* On these chipsets we can only modify the base/size/stride
-                * whilst the cursor is disabled.
-                */
-               I915_WRITE_FW(CURCNTR(PIPE_A), 0);
-               POSTING_READ_FW(CURCNTR(PIPE_A));
-               intel_crtc->cursor_cntl = 0;
+               base = intel_cursor_base(plane_state);
+               pos = intel_cursor_position(plane_state);
        }
  
-       if (intel_crtc->cursor_base != base) {
-               I915_WRITE_FW(CURBASE(PIPE_A), base);
-               intel_crtc->cursor_base = base;
-       }
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  
-       if (intel_crtc->cursor_size != size) {
+       /* On these chipsets we can only modify the base/size/stride
+        * whilst the cursor is disabled.
+        */
+       if (plane->cursor.base != base ||
+           plane->cursor.size != size ||
+           plane->cursor.cntl != cntl) {
+               I915_WRITE_FW(CURCNTR(PIPE_A), 0);
+               I915_WRITE_FW(CURBASE(PIPE_A), base);
                I915_WRITE_FW(CURSIZE, size);
-               intel_crtc->cursor_size = size;
-       }
-       if (intel_crtc->cursor_cntl != cntl) {
+               I915_WRITE_FW(CURPOS(PIPE_A), pos);
                I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
-               POSTING_READ_FW(CURCNTR(PIPE_A));
-               intel_crtc->cursor_cntl = cntl;
+               plane->cursor.base = base;
+               plane->cursor.size = size;
+               plane->cursor.cntl = cntl;
+       } else {
+               I915_WRITE_FW(CURPOS(PIPE_A), pos);
        }
+       POSTING_READ_FW(CURCNTR(PIPE_A));
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ }
+ static void i845_disable_cursor(struct intel_plane *plane,
+                               struct intel_crtc *crtc)
+ {
+       i845_update_cursor(plane, NULL, NULL);
  }
  
  static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
        struct drm_i915_private *dev_priv =
                to_i915(plane_state->base.plane->dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
-       enum pipe pipe = crtc->pipe;
        u32 cntl;
  
        cntl = MCURSOR_GAMMA_ENABLE;
        if (HAS_DDI(dev_priv))
                cntl |= CURSOR_PIPE_CSC_ENABLE;
  
-       cntl |= pipe << 28; /* Connect to correct pipe */
+       cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
  
        switch (plane_state->base.crtc_w) {
        case 64:
                return 0;
        }
  
 -      if (plane_state->base.rotation & DRM_ROTATE_180)
 +      if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
                cntl |= CURSOR_ROTATE_180;
  
        return cntl;
  }
  
- static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
-                              const struct intel_plane_state *plane_state)
+ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
  {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-       uint32_t cntl = 0;
+       struct drm_i915_private *dev_priv =
+               to_i915(plane_state->base.plane->dev);
+       int width = plane_state->base.crtc_w;
+       int height = plane_state->base.crtc_h;
  
-       if (plane_state && plane_state->base.visible)
-               cntl = plane_state->ctl;
+       if (!intel_cursor_size_ok(plane_state))
+               return false;
  
-       if (intel_crtc->cursor_cntl != cntl) {
-               I915_WRITE_FW(CURCNTR(pipe), cntl);
-               POSTING_READ_FW(CURCNTR(pipe));
-               intel_crtc->cursor_cntl = cntl;
+       /* Cursor width is limited to a few power-of-two sizes */
+       switch (width) {
+       case 256:
+       case 128:
+       case 64:
+               break;
+       default:
+               return false;
        }
  
-       /* and commit changes on next vblank */
-       I915_WRITE_FW(CURBASE(pipe), base);
-       POSTING_READ_FW(CURBASE(pipe));
+       /*
+        * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
+        * height from 8 lines up to the cursor width, when the
+        * cursor is not rotated. Everything else requires square
+        * cursors.
+        */
+       if (HAS_CUR_FBC(dev_priv) &&
 -          plane_state->base.rotation & DRM_ROTATE_0) {
++          plane_state->base.rotation & DRM_MODE_ROTATE_0) {
+               if (height < 8 || height > width)
+                       return false;
+       } else {
+               if (height != width)
+                       return false;
+       }
  
-       intel_crtc->cursor_base = base;
+       return true;
  }
  
- /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
static void intel_crtc_update_cursor(struct drm_crtc *crtc,
-                                    const struct intel_plane_state *plane_state)
+ static int i9xx_check_cursor(struct intel_plane *plane,
                           struct intel_crtc_state *crtc_state,
+                            struct intel_plane_state *plane_state)
  {
-       struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       int pipe = intel_crtc->pipe;
-       u32 base = intel_crtc->cursor_addr;
-       unsigned long irqflags;
-       u32 pos = 0;
-       if (plane_state) {
-               int x = plane_state->base.crtc_x;
-               int y = plane_state->base.crtc_y;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum pipe pipe = plane->pipe;
+       int ret;
  
-               if (x < 0) {
-                       pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
-                       x = -x;
-               }
-               pos |= x << CURSOR_X_SHIFT;
+       ret = intel_check_cursor(crtc_state, plane_state);
+       if (ret)
+               return ret;
  
-               if (y < 0) {
-                       pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
-                       y = -y;
-               }
-               pos |= y << CURSOR_Y_SHIFT;
+       /* if we want to turn off the cursor ignore width and height */
+       if (!fb)
+               return 0;
  
-               /* ILK+ do this automagically */
-               if (HAS_GMCH_DISPLAY(dev_priv) &&
-                   plane_state->base.rotation & DRM_MODE_ROTATE_180) {
-                       base += (plane_state->base.crtc_h *
-                                plane_state->base.crtc_w - 1) * 4;
-               }
+       /* Check for which cursor types we support */
+       if (!i9xx_cursor_size_ok(plane_state)) {
+               DRM_DEBUG("Cursor dimension %dx%d not supported\n",
+                         plane_state->base.crtc_w,
+                         plane_state->base.crtc_h);
+               return -EINVAL;
        }
  
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
+               DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
+                             fb->pitches[0], plane_state->base.crtc_w);
+               return -EINVAL;
+       }
  
-       I915_WRITE_FW(CURPOS(pipe), pos);
+       /*
+        * There's something wrong with the cursor on CHV pipe C.
+        * If it straddles the left edge of the screen then
+        * moving it away from the edge or disabling it often
+        * results in a pipe underrun, and often that can lead to
+        * dead pipe (constant underrun reported, and it scans
+        * out just a solid color). To recover from that, the
+        * display power well must be turned off and on again.
+        * Refuse the put the cursor into that compromised position.
+        */
+       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
+           plane_state->base.visible && plane_state->base.crtc_x < 0) {
+               DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
+               return -EINVAL;
+       }
  
-       if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
-               i845_update_cursor(crtc, base, plane_state);
-       else
-               i9xx_update_cursor(crtc, base, plane_state);
+       plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
  
-       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+       return 0;
  }
  
- static bool cursor_size_ok(struct drm_i915_private *dev_priv,
-                          uint32_t width, uint32_t height)
+ static void i9xx_update_cursor(struct intel_plane *plane,
+                              const struct intel_crtc_state *crtc_state,
+                              const struct intel_plane_state *plane_state)
  {
-       if (width == 0 || height == 0)
-               return false;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
+       unsigned long irqflags;
  
-       /*
-        * 845g/865g are special in that they are only limited by
-        * the width of their cursors, the height is arbitrary up to
-        * the precision of the register. Everything else requires
-        * square cursors, limited to a few power-of-two sizes.
-        */
-       if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
-               if ((width & 63) != 0)
-                       return false;
+       if (plane_state && plane_state->base.visible) {
+               cntl = plane_state->ctl;
  
-               if (width > (IS_I845G(dev_priv) ? 64 : 512))
-                       return false;
+               if (plane_state->base.crtc_h != plane_state->base.crtc_w)
+                       fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
  
-               if (height > 1023)
-                       return false;
+               base = intel_cursor_base(plane_state);
+               pos = intel_cursor_position(plane_state);
+       }
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+       /*
+        * On some platforms writing CURCNTR first will also
+        * cause CURPOS to be armed by the CURBASE write.
+        * Without the CURCNTR write the CURPOS write would
+        * arm itself.
+        *
+        * CURCNTR and CUR_FBC_CTL are always
+        * armed by the CURBASE write only.
+        */
+       if (plane->cursor.base != base ||
+           plane->cursor.size != fbc_ctl ||
+           plane->cursor.cntl != cntl) {
+               I915_WRITE_FW(CURCNTR(pipe), cntl);
+               if (HAS_CUR_FBC(dev_priv))
+                       I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
+               I915_WRITE_FW(CURPOS(pipe), pos);
+               I915_WRITE_FW(CURBASE(pipe), base);
+               plane->cursor.base = base;
+               plane->cursor.size = fbc_ctl;
+               plane->cursor.cntl = cntl;
        } else {
-               switch (width | height) {
-               case 256:
-               case 128:
-                       if (IS_GEN2(dev_priv))
-                               return false;
-               case 64:
-                       break;
-               default:
-                       return false;
-               }
+               I915_WRITE_FW(CURPOS(pipe), pos);
        }
  
-       return true;
+       POSTING_READ_FW(CURBASE(pipe));
+       spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ }
+ static void i9xx_disable_cursor(struct intel_plane *plane,
+                               struct intel_crtc *crtc)
+ {
+       i9xx_update_cursor(plane, NULL, NULL);
  }
  
  /* VESA 640x480x72Hz mode to set on the pipe */
  static struct drm_display_mode load_detect_mode = {
        DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
@@@ -9566,6 -9748,7 +9748,7 @@@ int intel_get_load_detect_pipe(struct d
         */
        if (!crtc) {
                DRM_DEBUG_KMS("no pipe available for load-detect\n");
+               ret = -ENODEV;
                goto fail;
        }
  
@@@ -9622,6 -9805,7 +9805,7 @@@ found
                DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
        if (IS_ERR(fb)) {
                DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
+               ret = PTR_ERR(fb);
                goto fail;
        }
  
@@@ -10853,21 -11037,21 +11037,21 @@@ int intel_plane_atomic_calc_changes(str
                         turn_off, turn_on, mode_changed);
  
        if (turn_on) {
-               if (INTEL_GEN(dev_priv) < 5)
+               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
                        pipe_config->update_wm_pre = true;
  
                /* must disable cxsr around plane enable/disable */
                if (plane->id != PLANE_CURSOR)
                        pipe_config->disable_cxsr = true;
        } else if (turn_off) {
-               if (INTEL_GEN(dev_priv) < 5)
+               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
                        pipe_config->update_wm_post = true;
  
                /* must disable cxsr around plane enable/disable */
                if (plane->id != PLANE_CURSOR)
                        pipe_config->disable_cxsr = true;
        } else if (intel_wm_need_update(&plane->base, plane_state)) {
-               if (INTEL_GEN(dev_priv) < 5) {
+               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
                        /* FIXME bollocks */
                        pipe_config->update_wm_pre = true;
                        pipe_config->update_wm_post = true;
@@@ -11291,7 -11475,8 +11475,8 @@@ clear_intel_crtc_state(struct intel_crt
        shared_dpll = crtc_state->shared_dpll;
        dpll_hw_state = crtc_state->dpll_hw_state;
        force_thru = crtc_state->pch_pfit.force_thru;
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+       if (IS_G4X(dev_priv) ||
+           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                wm_state = crtc_state->wm;
  
        /* Keep base drm_crtc_state intact, only clear our extended struct */
        crtc_state->shared_dpll = shared_dpll;
        crtc_state->dpll_hw_state = dpll_hw_state;
        crtc_state->pch_pfit.force_thru = force_thru;
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+       if (IS_G4X(dev_priv) ||
+           IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                crtc_state->wm = wm_state;
  }
  
@@@ -11444,6 -11630,12 +11630,6 @@@ intel_modeset_update_crtc_state(struct 
        for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
                to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
  
 -              /* Update hwmode for vblank functions */
 -              if (new_crtc_state->active)
 -                      crtc->hwmode = new_crtc_state->adjusted_mode;
 -              else
 -                      crtc->hwmode.crtc_clock = 0;
 -
                /*
                 * Update legacy state to satisfy fbc code. This can
                 * be removed when fbc uses the atomic state.
@@@ -11865,7 -12057,7 +12051,7 @@@ static void verify_wm_state(struct drm_
         * allocation. In that case since the ddb allocation will be updated
         * once the plane becomes visible, we can skip this check
         */
-       if (intel_crtc->cursor_addr) {
+       if (1) {
                hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
                sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
  
@@@ -11921,11 -12113,15 +12107,15 @@@ verify_connector_state(struct drm_devic
  
        for_each_new_connector_in_state(state, connector, new_conn_state, i) {
                struct drm_encoder *encoder = connector->encoder;
+               struct drm_crtc_state *crtc_state = NULL;
  
                if (new_conn_state->crtc != crtc)
                        continue;
  
-               intel_connector_verify_state(to_intel_connector(connector));
+               if (crtc)
+                       crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
+               intel_connector_verify_state(crtc_state, new_conn_state);
  
                I915_STATE_WARN(new_conn_state->best_encoder != encoder,
                     "connector's atomic encoder doesn't match legacy encoder\n");
@@@ -12043,7 -12239,7 +12233,7 @@@ verify_crtc_state(struct drm_crtc *crtc
  
        intel_pipe_config_sanity_check(dev_priv, pipe_config);
  
-       sw_config = to_intel_crtc_state(crtc->state);
+       sw_config = to_intel_crtc_state(new_crtc_state);
        if (!intel_pipe_config_compare(dev_priv, sw_config,
                                       pipe_config, false)) {
                I915_STATE_WARN(1, "pipe state doesn't match!\n");
@@@ -13139,7 -13335,7 +13329,7 @@@ intel_prepare_plane_fb(struct drm_plan
        if (obj) {
                if (plane->type == DRM_PLANE_TYPE_CURSOR &&
                    INTEL_INFO(dev_priv)->cursor_needs_physical) {
-                       const int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
+                       const int align = intel_cursor_alignment(dev_priv);
  
                        ret = i915_gem_object_attach_phys(obj, align);
                        if (ret) {
@@@ -13269,11 -13465,11 +13459,11 @@@ skl_max_scale(struct intel_crtc *intel_
  }
  
  static int
- intel_check_primary_plane(struct drm_plane *plane,
+ intel_check_primary_plane(struct intel_plane *plane,
                          struct intel_crtc_state *crtc_state,
                          struct intel_plane_state *state)
  {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
        struct drm_crtc *crtc = state->base.crtc;
        int min_scale = DRM_PLANE_HELPER_NO_SCALING;
        int max_scale = DRM_PLANE_HELPER_NO_SCALING;
@@@ -13452,7 -13648,7 +13642,7 @@@ intel_legacy_cursor_update(struct drm_p
                goto out_free;
  
        if (INTEL_INFO(dev_priv)->cursor_needs_physical) {
-               int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
+               int align = intel_cursor_alignment(dev_priv);
  
                ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align);
                if (ret) {
  
        if (plane->state->visible) {
                trace_intel_update_plane(plane, to_intel_crtc(crtc));
-               intel_plane->update_plane(plane,
+               intel_plane->update_plane(intel_plane,
                                          to_intel_crtc_state(crtc->state),
                                          to_intel_plane_state(plane->state));
        } else {
                trace_intel_disable_plane(plane, to_intel_crtc(crtc));
-               intel_plane->disable_plane(plane, crtc);
+               intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc));
        }
  
        intel_cleanup_plane_fb(plane, new_plane_state);
@@@ -13607,22 -13803,22 +13797,22 @@@ intel_primary_plane_create(struct drm_i
  
        if (INTEL_GEN(dev_priv) >= 9) {
                supported_rotations =
 -                      DRM_ROTATE_0 | DRM_ROTATE_90 |
 -                      DRM_ROTATE_180 | DRM_ROTATE_270;
 +                      DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
 +                      DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
        } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
                supported_rotations =
 -                      DRM_ROTATE_0 | DRM_ROTATE_180 |
 -                      DRM_REFLECT_X;
 +                      DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
 +                      DRM_MODE_REFLECT_X;
        } else if (INTEL_GEN(dev_priv) >= 4) {
                supported_rotations =
 -                      DRM_ROTATE_0 | DRM_ROTATE_180;
 +                      DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
        } else {
 -              supported_rotations = DRM_ROTATE_0;
 +              supported_rotations = DRM_MODE_ROTATE_0;
        }
  
        if (INTEL_GEN(dev_priv) >= 4)
                drm_plane_create_rotation_property(&primary->base,
 -                                                 DRM_ROTATE_0,
 +                                                 DRM_MODE_ROTATE_0,
                                                   supported_rotations);
  
        drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
        return ERR_PTR(ret);
  }
  
- static int
- intel_check_cursor_plane(struct drm_plane *plane,
-                        struct intel_crtc_state *crtc_state,
-                        struct intel_plane_state *state)
- {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-       struct drm_framebuffer *fb = state->base.fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-       enum pipe pipe = to_intel_plane(plane)->pipe;
-       unsigned stride;
-       int ret;
-       ret = drm_plane_helper_check_state(&state->base,
-                                          &state->clip,
-                                          DRM_PLANE_HELPER_NO_SCALING,
-                                          DRM_PLANE_HELPER_NO_SCALING,
-                                          true, true);
-       if (ret)
-               return ret;
-       /* if we want to turn off the cursor ignore width and height */
-       if (!obj)
-               return 0;
-       /* Check for which cursor types we support */
-       if (!cursor_size_ok(dev_priv, state->base.crtc_w,
-                           state->base.crtc_h)) {
-               DRM_DEBUG("Cursor dimension %dx%d not supported\n",
-                         state->base.crtc_w, state->base.crtc_h);
-               return -EINVAL;
-       }
-       stride = roundup_pow_of_two(state->base.crtc_w) * 4;
-       if (obj->base.size < stride * state->base.crtc_h) {
-               DRM_DEBUG_KMS("buffer is too small\n");
-               return -ENOMEM;
-       }
-       if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
-               DRM_DEBUG_KMS("cursor cannot be tiled\n");
-               return -EINVAL;
-       }
-       /*
-        * There's something wrong with the cursor on CHV pipe C.
-        * If it straddles the left edge of the screen then
-        * moving it away from the edge or disabling it often
-        * results in a pipe underrun, and often that can lead to
-        * dead pipe (constant underrun reported, and it scans
-        * out just a solid color). To recover from that, the
-        * display power well must be turned off and on again.
-        * Refuse the put the cursor into that compromised position.
-        */
-       if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
-           state->base.visible && state->base.crtc_x < 0) {
-               DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
-               return -EINVAL;
-       }
-       if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
-               state->ctl = i845_cursor_ctl(crtc_state, state);
-       else
-               state->ctl = i9xx_cursor_ctl(crtc_state, state);
-       return 0;
- }
- static void
- intel_disable_cursor_plane(struct drm_plane *plane,
-                          struct drm_crtc *crtc)
- {
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       intel_crtc->cursor_addr = 0;
-       intel_crtc_update_cursor(crtc, NULL);
- }
- static void
- intel_update_cursor_plane(struct drm_plane *plane,
-                         const struct intel_crtc_state *crtc_state,
-                         const struct intel_plane_state *state)
- {
-       struct drm_crtc *crtc = crtc_state->base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-       struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
-       uint32_t addr;
-       if (!obj)
-               addr = 0;
-       else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
-               addr = intel_plane_ggtt_offset(state);
-       else
-               addr = obj->phys_handle->busaddr;
-       intel_crtc->cursor_addr = addr;
-       intel_crtc_update_cursor(crtc, state);
- }
  static struct intel_plane *
- intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
+ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
+                         enum pipe pipe)
  {
        struct intel_plane *cursor = NULL;
        struct intel_plane_state *state = NULL;
        cursor->plane = pipe;
        cursor->id = PLANE_CURSOR;
        cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
-       cursor->check_plane = intel_check_cursor_plane;
-       cursor->update_plane = intel_update_cursor_plane;
-       cursor->disable_plane = intel_disable_cursor_plane;
+       if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
+               cursor->update_plane = i845_update_cursor;
+               cursor->disable_plane = i845_disable_cursor;
+               cursor->check_plane = i845_check_cursor;
+       } else {
+               cursor->update_plane = i9xx_update_cursor;
+               cursor->disable_plane = i9xx_disable_cursor;
+               cursor->check_plane = i9xx_check_cursor;
+       }
+       cursor->cursor.base = ~0;
+       cursor->cursor.cntl = ~0;
+       if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
+               cursor->cursor.size = ~0;
  
        ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
                                       0, &intel_cursor_plane_funcs,
  
        if (INTEL_GEN(dev_priv) >= 4)
                drm_plane_create_rotation_property(&cursor->base,
 -                                                 DRM_ROTATE_0,
 -                                                 DRM_ROTATE_0 |
 -                                                 DRM_ROTATE_180);
 +                                                 DRM_MODE_ROTATE_0,
 +                                                 DRM_MODE_ROTATE_0 |
 +                                                 DRM_MODE_ROTATE_180);
  
        if (INTEL_GEN(dev_priv) >= 9)
                state->scaler_id = -1;
@@@ -13873,10 -13984,6 +13978,6 @@@ static int intel_crtc_init(struct drm_i
        intel_crtc->pipe = pipe;
        intel_crtc->plane = primary->plane;
  
-       intel_crtc->cursor_base = ~0;
-       intel_crtc->cursor_cntl = ~0;
-       intel_crtc->cursor_size = ~0;
        /* initialize shared scalers */
        intel_crtc_init_scalers(intel_crtc, crtc_state);
  
@@@ -14416,7 -14523,7 +14517,7 @@@ static int intel_framebuffer_init(struc
        case DRM_FORMAT_UYVY:
        case DRM_FORMAT_YVYU:
        case DRM_FORMAT_VYUY:
-               if (INTEL_GEN(dev_priv) < 5) {
+               if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
                        DRM_DEBUG_KMS("unsupported pixel format: %s\n",
                                      drm_get_format_name(mode_cmd->pixel_format, &format_name));
                        goto err;
@@@ -14928,6 -15035,7 +15029,7 @@@ int intel_modeset_init(struct drm_devic
  
        dev->mode_config.funcs = &intel_mode_funcs;
  
+       init_llist_head(&dev_priv->atomic_helper.free_list);
        INIT_WORK(&dev_priv->atomic_helper.free_work,
                  intel_atomic_helper_free_state_worker);
  
@@@ -15149,7 -15257,7 +15251,7 @@@ static void intel_sanitize_crtc(struct 
                                continue;
  
                        trace_intel_disable_plane(&plane->base, crtc);
-                       plane->disable_plane(&plane->base, &crtc->base);
+                       plane->disable_plane(plane, crtc);
                }
        }
  
@@@ -15419,6 -15527,8 +15521,6 @@@ static void intel_modeset_readout_hw_st
                        to_intel_crtc_state(crtc->base.state);
                int pixclk = 0;
  
 -              crtc->base.hwmode = crtc_state->base.adjusted_mode;
 -
                memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
                if (crtc_state->base.active) {
                        intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
                        if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
                                pixclk = DIV_ROUND_UP(pixclk * 100, 95);
  
 -                      drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
 +                      drm_calc_timestamping_constants(&crtc->base,
 +                                                      &crtc_state->base.adjusted_mode);
                        update_scanline_offset(crtc);
                }
  
@@@ -15520,7 -15629,10 +15622,10 @@@ intel_modeset_setup_hw_state(struct drm
                pll->on = false;
        }
  
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+       if (IS_G4X(dev_priv)) {
+               g4x_wm_get_hw_state(dev);
+               g4x_wm_sanitize(dev_priv);
+       } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                vlv_wm_get_hw_state(dev);
                vlv_wm_sanitize(dev_priv);
        } else if (IS_GEN9(dev_priv)) {
@@@ -15554,13 -15666,6 +15659,6 @@@ void intel_display_resume(struct drm_de
        if (state)
                state->acquire_ctx = &ctx;
  
-       /*
-        * This is a cludge because with real atomic modeset mode_config.mutex
-        * won't be taken. Unfortunately some probed state like
-        * audio_codec_enable is still protected by mode_config.mutex, so lock
-        * it here for now.
-        */
-       mutex_lock(&dev->mode_config.mutex);
        drm_modeset_acquire_init(&ctx, 0);
  
        while (1) {
  
        drm_modeset_drop_locks(&ctx);
        drm_modeset_acquire_fini(&ctx);
-       mutex_unlock(&dev->mode_config.mutex);
  
        if (ret)
                DRM_ERROR("Restoring old state failed with %i\n", ret);
index 1dee9933005fdbc9860907d5a248d7063de1e482,5af22a7c11bfae042334a8ac8f938e3eab971a68..3715386e427270bc7409224cd2312c7de9a28896
@@@ -39,7 -39,7 +39,7 @@@ static bool intel_dp_mst_compute_config
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct intel_connector *connector =
                to_intel_connector(conn_state->connector);
 -      struct drm_atomic_state *state;
 +      struct drm_atomic_state *state = pipe_config->base.state;
        int bpp;
        int lane_count, slots;
        const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
         * for MST we always configure max link bw - the spec doesn't
         * seem to suggest we should do otherwise.
         */
-       lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+       lane_count = intel_dp_max_lane_count(intel_dp);
        pipe_config->lane_count = lane_count;
  
        pipe_config->pipe_bpp = bpp;
 -      pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
  
 -      state = pipe_config->base.state;
 +      pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
  
        if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, connector->port))
                pipe_config->has_audio = true;
 -      mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
  
 +      mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
        pipe_config->pbn = mst_pbn;
 -      slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn);
 +
 +      slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
 +                                            connector->port, mst_pbn);
 +      if (slots < 0) {
 +              DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
 +              return false;
 +      }
  
        intel_link_compute_m_n(bpp, lane_count,
                               adjusted_mode->crtc_clock,
        pipe_config->dp_m_n.tu = slots;
  
        return true;
 +}
  
 +static int intel_dp_mst_atomic_check(struct drm_connector *connector,
 +              struct drm_connector_state *new_conn_state)
 +{
 +      struct drm_atomic_state *state = new_conn_state->state;
 +      struct drm_connector_state *old_conn_state;
 +      struct drm_crtc *old_crtc;
 +      struct drm_crtc_state *crtc_state;
 +      int slots, ret = 0;
 +
 +      old_conn_state = drm_atomic_get_old_connector_state(state, connector);
 +      old_crtc = old_conn_state->crtc;
 +      if (!old_crtc)
 +              return ret;
 +
 +      crtc_state = drm_atomic_get_new_crtc_state(state, old_crtc);
 +      slots = to_intel_crtc_state(crtc_state)->dp_m_n.tu;
 +      if (drm_atomic_crtc_needs_modeset(crtc_state) && slots > 0) {
 +              struct drm_dp_mst_topology_mgr *mgr;
 +              struct drm_encoder *old_encoder;
 +
 +              old_encoder = old_conn_state->best_encoder;
 +              mgr = &enc_to_mst(old_encoder)->primary->dp.mst_mgr;
 +
 +              ret = drm_dp_atomic_release_vcpi_slots(state, mgr, slots);
 +              if (ret)
 +                      DRM_DEBUG_KMS("failed releasing %d vcpi slots:%d\n", slots, ret);
 +              else
 +                      to_intel_crtc_state(crtc_state)->dp_m_n.tu = 0;
 +      }
 +      return ret;
  }
  
  static void intel_mst_disable_dp(struct intel_encoder *encoder,
@@@ -329,14 -294,6 +330,6 @@@ intel_dp_mst_detect(struct drm_connecto
        return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
  }
  
- static int
- intel_dp_mst_set_property(struct drm_connector *connector,
-                         struct drm_property *property,
-                         uint64_t val)
- {
-       return 0;
- }
  static void
  intel_dp_mst_connector_destroy(struct drm_connector *connector)
  {
@@@ -353,8 -310,7 +346,7 @@@ static const struct drm_connector_func
        .dpms = drm_atomic_helper_connector_dpms,
        .detect = intel_dp_mst_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
-       .set_property = intel_dp_mst_set_property,
-       .atomic_get_property = intel_connector_atomic_get_property,
+       .set_property = drm_atomic_helper_connector_set_property,
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_dp_mst_connector_destroy,
@@@ -378,7 -334,7 +370,7 @@@ intel_dp_mst_mode_valid(struct drm_conn
        int max_rate, mode_rate, max_lanes, max_link_clock;
  
        max_link_clock = intel_dp_max_link_rate(intel_dp);
-       max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
+       max_lanes = intel_dp_max_lane_count(intel_dp);
  
        max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
        mode_rate = intel_dp_link_required(mode->clock, bpp);
@@@ -422,7 -378,6 +414,7 @@@ static const struct drm_connector_helpe
        .mode_valid = intel_dp_mst_mode_valid,
        .atomic_best_encoder = intel_mst_atomic_best_encoder,
        .best_encoder = intel_mst_best_encoder,
 +      .atomic_check = intel_dp_mst_atomic_check,
  };
  
  static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder)
@@@ -495,7 -450,6 +487,6 @@@ static struct drm_connector *intel_dp_a
                drm_mode_connector_attach_encoder(&intel_connector->base,
                                                  &intel_dp->mst_encoders[i]->base.base);
        }
-       intel_dp_add_properties(intel_dp, connector);
  
        drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
        drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
index 48ea8d9d49fe27960ff4b8447fe3c6aa1a48250b,cc1370686193e8663d437393d00637566e564f48..bd500977b3fc63bb02053b3ed7efcb037c0c06e4
@@@ -88,7 -88,6 +88,6 @@@
        int cpu, ret, timeout = (US) * 1000; \
        u64 base; \
        _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
-       BUILD_BUG_ON((US) > 50000); \
        if (!(ATOMIC)) { \
                preempt_disable(); \
                cpu = smp_processor_id(); \
        ret__; \
  })
  
- #define wait_for_atomic(COND, MS)     _wait_for_atomic((COND), (MS) * 1000, 1)
- #define wait_for_atomic_us(COND, US)  _wait_for_atomic((COND), (US), 1)
+ #define wait_for_atomic_us(COND, US) \
+ ({ \
+       BUILD_BUG_ON(!__builtin_constant_p(US)); \
+       BUILD_BUG_ON((US) > 50000); \
+       _wait_for_atomic((COND), (US), 1); \
+ })
+ #define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
  
  #define KHz(x) (1000 * (x))
  #define MHz(x) KHz(1000 * (x))
@@@ -321,6 -326,9 +326,9 @@@ struct intel_connector 
        void *port; /* store this opaque as its illegal to dereference it */
  
        struct intel_dp *mst_port;
+       /* Work struct to schedule a uevent on link train failure */
+       struct work_struct modeset_retry_work;
  };
  
  struct dpll {
@@@ -504,8 -512,8 +512,8 @@@ enum vlv_wm_level 
  };
  
  struct vlv_wm_state {
-       struct vlv_pipe_wm wm[NUM_VLV_WM_LEVELS];
-       struct vlv_sr_wm sr[NUM_VLV_WM_LEVELS];
+       struct g4x_pipe_wm wm[NUM_VLV_WM_LEVELS];
+       struct g4x_sr_wm sr[NUM_VLV_WM_LEVELS];
        uint8_t num_levels;
        bool cxsr;
  };
@@@ -514,6 -522,22 +522,22 @@@ struct vlv_fifo_state 
        u16 plane[I915_MAX_PLANES];
  };
  
+ enum g4x_wm_level {
+       G4X_WM_LEVEL_NORMAL,
+       G4X_WM_LEVEL_SR,
+       G4X_WM_LEVEL_HPLL,
+       NUM_G4X_WM_LEVELS,
+ };
+ struct g4x_wm_state {
+       struct g4x_pipe_wm wm;
+       struct g4x_sr_wm sr;
+       struct g4x_sr_wm hpll;
+       bool cxsr;
+       bool hpll_en;
+       bool fbc_en;
+ };
  struct intel_crtc_wm_state {
        union {
                struct {
  
                struct {
                        /* "raw" watermarks (not inverted) */
-                       struct vlv_pipe_wm raw[NUM_VLV_WM_LEVELS];
+                       struct g4x_pipe_wm raw[NUM_VLV_WM_LEVELS];
                        /* intermediate watermarks (inverted) */
                        struct vlv_wm_state intermediate;
                        /* optimal watermarks (inverted) */
                        /* display FIFO split */
                        struct vlv_fifo_state fifo_state;
                } vlv;
+               struct {
+                       /* "raw" watermarks */
+                       struct g4x_pipe_wm raw[NUM_G4X_WM_LEVELS];
+                       /* intermediate watermarks */
+                       struct g4x_wm_state intermediate;
+                       /* optimal watermarks */
+                       struct g4x_wm_state optimal;
+               } g4x;
        };
  
        /*
@@@ -766,11 -799,6 +799,6 @@@ struct intel_crtc 
        int adjusted_x;
        int adjusted_y;
  
-       uint32_t cursor_addr;
-       uint32_t cursor_cntl;
-       uint32_t cursor_size;
-       uint32_t cursor_base;
        struct intel_crtc_state *config;
  
        /* global reset count when the last flip was submitted */
                union {
                        struct intel_pipe_wm ilk;
                        struct vlv_wm_state vlv;
+                       struct g4x_wm_state g4x;
                } active;
        } wm;
  
@@@ -811,18 -840,22 +840,22 @@@ struct intel_plane 
        int max_downscale;
        uint32_t frontbuffer_bit;
  
+       struct {
+               u32 base, cntl, size;
+       } cursor;
        /*
         * NOTE: Do not place new plane state fields here (e.g., when adding
         * new plane properties).  New runtime state should now be placed in
         * the intel_plane_state structure and accessed via plane_state.
         */
  
-       void (*update_plane)(struct drm_plane *plane,
+       void (*update_plane)(struct intel_plane *plane,
                             const struct intel_crtc_state *crtc_state,
                             const struct intel_plane_state *plane_state);
-       void (*disable_plane)(struct drm_plane *plane,
-                             struct drm_crtc *crtc);
-       int (*check_plane)(struct drm_plane *plane,
+       void (*disable_plane)(struct intel_plane *plane,
+                             struct intel_crtc *crtc);
+       int (*check_plane)(struct intel_plane *plane,
                           struct intel_crtc_state *crtc_state,
                           struct intel_plane_state *state);
  };
@@@ -869,6 -902,7 +902,6 @@@ struct intel_hdmi 
        bool has_audio;
        enum hdmi_force_audio force_audio;
        bool rgb_quant_range_selectable;
 -      enum hdmi_picture_aspect aspect_ratio;
        struct intel_connector *attached_connector;
        void (*write_infoframe)(struct drm_encoder *encoder,
                                const struct intel_crtc_state *crtc_state,
@@@ -948,13 -982,20 +981,20 @@@ struct intel_dp 
        uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
        uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
        uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
-       /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
-       uint8_t num_sink_rates;
+       /* source rates */
+       int num_source_rates;
+       const int *source_rates;
+       /* sink rates as reported by DP_MAX_LINK_RATE/DP_SUPPORTED_LINK_RATES */
+       int num_sink_rates;
        int sink_rates[DP_MAX_SUPPORTED_RATES];
-       /* Max lane count for the sink as per DPCD registers */
-       uint8_t max_sink_lane_count;
-       /* Max link BW for the sink as per DPCD registers */
-       int max_sink_link_bw;
+       bool use_rate_select;
+       /* intersection of source and sink rates */
+       int num_common_rates;
+       int common_rates[DP_MAX_SUPPORTED_RATES];
+       /* Max lane count for the current link */
+       int max_link_lane_count;
+       /* Max rate for the current link */
+       int max_link_rate;
        /* sink or branch descriptor */
        struct intel_dp_desc desc;
        struct drm_dp_aux aux;
@@@ -1491,10 -1532,10 +1531,10 @@@ void intel_edp_backlight_off(struct int
  void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
  void intel_edp_panel_on(struct intel_dp *intel_dp);
  void intel_edp_panel_off(struct intel_dp *intel_dp);
- void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
  void intel_dp_mst_suspend(struct drm_device *dev);
  void intel_dp_mst_resume(struct drm_device *dev);
  int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+ int intel_dp_max_lane_count(struct intel_dp *intel_dp);
  int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
  void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
  void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
@@@ -1825,6 -1866,7 +1865,7 @@@ void gen6_rps_boost(struct drm_i915_pri
                    struct intel_rps_client *rps,
                    unsigned long submitted);
  void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
+ void g4x_wm_get_hw_state(struct drm_device *dev);
  void vlv_wm_get_hw_state(struct drm_device *dev);
  void ilk_wm_get_hw_state(struct drm_device *dev);
  void skl_wm_get_hw_state(struct drm_device *dev);
@@@ -1832,6 -1874,7 +1873,7 @@@ void skl_ddb_get_hw_state(struct drm_i9
                          struct skl_ddb_allocation *ddb /* out */);
  void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
                              struct skl_pipe_wm *out);
+ void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
  void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
  bool intel_can_enable_sagv(struct drm_atomic_state *state);
  int intel_enable_sagv(struct drm_i915_private *dev_priv);
index db7f8f0a1f36521dbb30dbe1019637230898df60,64e44d9fa6a53adfc8de4288e0669a6971e54343..ff2fc5bc4af479b9c50855f47d8f55719865c007
@@@ -801,7 -801,7 +801,7 @@@ static bool intel_fbc_can_activate(stru
                return false;
        }
        if (INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
 -          cache->plane.rotation != DRM_ROTATE_0) {
 +          cache->plane.rotation != DRM_MODE_ROTATE_0) {
                fbc->no_fbc_reason = "rotation unsupported";
                return false;
        }
@@@ -1312,14 -1312,12 +1312,12 @@@ static int intel_sanitize_fbc_option(st
  
  static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
  {
- #ifdef CONFIG_INTEL_IOMMU
        /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
-       if (intel_iommu_gfx_mapped &&
+       if (intel_vtd_active() &&
            (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
                DRM_INFO("Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
                return true;
        }
- #endif
  
        return false;
  }
index c6b8207724fadf64d7c58e7ed8ec8fb3c8284c70,52f0b2d5fad269e90f65e34381efb2dfbdb6b774..58d690393b294b3f2b71ef6a004b14819dfe5b91
@@@ -1327,6 -1327,11 +1327,11 @@@ static bool hdmi_12bpc_possible(struct 
                        return false;
        }
  
+       /* Display Wa #1139 */
+       if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
+           crtc_state->base.adjusted_mode.htotal > 5460)
+               return false;
        return true;
  }
  
@@@ -1392,7 -1397,7 +1397,7 @@@ bool intel_hdmi_compute_config(struct i
        }
  
        if (!pipe_config->bw_constrained) {
-               DRM_DEBUG_KMS("forcing pipe bpc to %i for HDMI\n", desired_bpp);
+               DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp);
                pipe_config->pipe_bpp = desired_bpp;
        }
  
        }
  
        /* Set user selected PAR to incoming mode's member */
 -      adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
 +      adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
  
        pipe_config->lane_count = 4;
  
@@@ -1649,7 -1654,19 +1654,7 @@@ intel_hdmi_set_property(struct drm_conn
        }
  
        if (property == connector->dev->mode_config.aspect_ratio_property) {
 -              switch (val) {
 -              case DRM_MODE_PICTURE_ASPECT_NONE:
 -                      intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 -                      break;
 -              case DRM_MODE_PICTURE_ASPECT_4_3:
 -                      intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
 -                      break;
 -              case DRM_MODE_PICTURE_ASPECT_16_9:
 -                      intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
 -                      break;
 -              default:
 -                      return -EINVAL;
 -              }
 +              connector->state->picture_aspect_ratio = val;
                goto done;
        }
  
@@@ -1811,7 -1828,7 +1816,7 @@@ intel_hdmi_add_properties(struct intel_
        intel_attach_broadcast_rgb_property(connector);
        intel_hdmi->color_range_auto = true;
        intel_attach_aspect_ratio_property(connector);
 -      intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 +      connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
  }
  
  /*
index ef6fa87b2f8aa85c96b55b33de90afbae06e2a6b,496b24c03222e942dfad0b6e16118e897b725c94..6cc181203135e09f4f90eb0f8bef8a169ea6fd79
@@@ -106,6 -106,11 +106,6 @@@ struct intel_sdvo 
        uint32_t color_range;
        bool color_range_auto;
  
 -      /**
 -       * HDMI user specified aspect ratio
 -       */
 -      enum hdmi_picture_aspect aspect_ratio;
 -
        /**
         * This is set if we're going to treat the device as TV-out.
         *
@@@ -1181,7 -1186,7 +1181,7 @@@ static bool intel_sdvo_compute_config(s
  
        /* Set user selected PAR to incoming mode's member */
        if (intel_sdvo->is_hdmi)
 -              adjusted_mode->picture_aspect_ratio = intel_sdvo->aspect_ratio;
 +              adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
  
        return true;
  }
@@@ -2062,7 -2067,19 +2062,7 @@@ intel_sdvo_set_property(struct drm_conn
        }
  
        if (property == connector->dev->mode_config.aspect_ratio_property) {
 -              switch (val) {
 -              case DRM_MODE_PICTURE_ASPECT_NONE:
 -                      intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 -                      break;
 -              case DRM_MODE_PICTURE_ASPECT_4_3:
 -                      intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
 -                      break;
 -              case DRM_MODE_PICTURE_ASPECT_16_9:
 -                      intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
 -                      break;
 -              default:
 -                      return -EINVAL;
 -              }
 +              connector->state->picture_aspect_ratio = val;
                goto done;
        }
  
@@@ -2401,7 -2418,7 +2401,7 @@@ intel_sdvo_add_hdmi_properties(struct i
                intel_sdvo->color_range_auto = true;
        }
        intel_attach_aspect_ratio_property(&connector->base.base);
 -      intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
 +      connector->base.base.state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
  }
  
  static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
@@@ -2875,11 -2892,10 +2875,10 @@@ static bool intel_sdvo_create_enhance_p
  
        BUILD_BUG_ON(sizeof(enhancements) != 2);
  
-       enhancements.response = 0;
-       intel_sdvo_get_value(intel_sdvo,
-                            SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
-                            &enhancements, sizeof(enhancements));
-       if (enhancements.response == 0) {
+       if (!intel_sdvo_get_value(intel_sdvo,
+                                 SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+                                 &enhancements, sizeof(enhancements)) ||
+           enhancements.response == 0) {
                DRM_DEBUG_KMS("No enhancement is supported\n");
                return true;
        }
index 191e14ddde0c621bc502737a29201106e7fe4063,9dfd5b343497cdd50f96130149893463ab3ef7e8..c4bf19364e490c9358780b7d8872d681773aed69
@@@ -198,28 -198,23 +198,26 @@@ void intel_pipe_update_end(struct intel
                          ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
                          crtc->debug.min_vbl, crtc->debug.max_vbl,
                          crtc->debug.scanline_start, scanline_end);
 -      } else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
 -                 VBLANK_EVASION_TIME_US)
 +      }
 +#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
 +      else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
 +               VBLANK_EVASION_TIME_US)
                DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
                         pipe_name(pipe),
                         ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
                         VBLANK_EVASION_TIME_US);
 +#endif
  }
  
  static void
- skl_update_plane(struct drm_plane *drm_plane,
+ skl_update_plane(struct intel_plane *plane,
                 const struct intel_crtc_state *crtc_state,
                 const struct intel_plane_state *plane_state)
  {
-       struct drm_device *dev = drm_plane->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *intel_plane = to_intel_plane(drm_plane);
-       struct drm_framebuffer *fb = plane_state->base.fb;
-       enum plane_id plane_id = intel_plane->id;
-       enum pipe pipe = intel_plane->pipe;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe = plane->pipe;
        u32 plane_ctl = plane_state->ctl;
        const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
        u32 surf_addr = plane_state->main.offset;
  }
  
  static void
- skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
+ skl_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
  {
-       struct drm_device *dev = dplane->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *intel_plane = to_intel_plane(dplane);
-       enum plane_id plane_id = intel_plane->id;
-       enum pipe pipe = intel_plane->pipe;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum plane_id plane_id = plane->id;
+       enum pipe pipe = plane->pipe;
        unsigned long irqflags;
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  }
  
  static void
- chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
+ chv_update_csc(struct intel_plane *plane, uint32_t format)
  {
-       struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
-       enum plane_id plane_id = intel_plane->id;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum plane_id plane_id = plane->id;
  
        /* Seems RGB data bypasses the CSC always */
        if (!format_is_yuv(format))
@@@ -398,10 -391,10 +394,10 @@@ static u32 vlv_sprite_ctl(const struct 
        if (fb->modifier == I915_FORMAT_MOD_X_TILED)
                sprctl |= SP_TILED;
  
 -      if (rotation & DRM_ROTATE_180)
 +      if (rotation & DRM_MODE_ROTATE_180)
                sprctl |= SP_ROTATE_180;
  
 -      if (rotation & DRM_REFLECT_X)
 +      if (rotation & DRM_MODE_REFLECT_X)
                sprctl |= SP_MIRROR;
  
        if (key->flags & I915_SET_COLORKEY_SOURCE)
  }
  
  static void
- vlv_update_plane(struct drm_plane *dplane,
+ vlv_update_plane(struct intel_plane *plane,
                 const struct intel_crtc_state *crtc_state,
                 const struct intel_plane_state *plane_state)
  {
-       struct drm_device *dev = dplane->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *intel_plane = to_intel_plane(dplane);
-       struct drm_framebuffer *fb = plane_state->base.fb;
-       enum pipe pipe = intel_plane->pipe;
-       enum plane_id plane_id = intel_plane->id;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum pipe pipe = plane->pipe;
+       enum plane_id plane_id = plane->id;
        u32 sprctl = plane_state->ctl;
        u32 sprsurf_offset = plane_state->main.offset;
        u32 linear_offset;
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  
        if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
-               chv_update_csc(intel_plane, fb->format->format);
+               chv_update_csc(plane, fb->format->format);
  
        if (key->flags) {
                I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
  }
  
  static void
- vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
+ vlv_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
  {
-       struct drm_device *dev = dplane->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *intel_plane = to_intel_plane(dplane);
-       enum pipe pipe = intel_plane->pipe;
-       enum plane_id plane_id = intel_plane->id;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
+       enum plane_id plane_id = plane->id;
        unsigned long irqflags;
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@@ -533,7 -522,7 +525,7 @@@ static u32 ivb_sprite_ctl(const struct 
        if (fb->modifier == I915_FORMAT_MOD_X_TILED)
                sprctl |= SPRITE_TILED;
  
 -      if (rotation & DRM_ROTATE_180)
 +      if (rotation & DRM_MODE_ROTATE_180)
                sprctl |= SPRITE_ROTATE_180;
  
        if (key->flags & I915_SET_COLORKEY_DESTINATION)
  }
  
  static void
- ivb_update_plane(struct drm_plane *plane,
+ ivb_update_plane(struct intel_plane *plane,
                 const struct intel_crtc_state *crtc_state,
                 const struct intel_plane_state *plane_state)
  {
-       struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       struct drm_framebuffer *fb = plane_state->base.fb;
-       enum pipe pipe = intel_plane->pipe;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum pipe pipe = plane->pipe;
        u32 sprctl = plane_state->ctl, sprscale = 0;
        u32 sprsurf_offset = plane_state->main.offset;
        u32 linear_offset;
                I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
  
        I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
-       if (intel_plane->can_scale)
+       if (plane->can_scale)
                I915_WRITE_FW(SPRSCALE(pipe), sprscale);
        I915_WRITE_FW(SPRCTL(pipe), sprctl);
        I915_WRITE_FW(SPRSURF(pipe),
  }
  
  static void
- ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+ ivb_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
  {
-       struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       int pipe = intel_plane->pipe;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
        unsigned long irqflags;
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  
        I915_WRITE_FW(SPRCTL(pipe), 0);
        /* Can't leave the scaler enabled... */
-       if (intel_plane->can_scale)
+       if (plane->can_scale)
                I915_WRITE_FW(SPRSCALE(pipe), 0);
  
        I915_WRITE_FW(SPRSURF(pipe), 0);
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  }
  
- static u32 ilk_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
                          const struct intel_plane_state *plane_state)
  {
        struct drm_i915_private *dev_priv =
        if (fb->modifier == I915_FORMAT_MOD_X_TILED)
                dvscntr |= DVS_TILED;
  
 -      if (rotation & DRM_ROTATE_180)
 +      if (rotation & DRM_MODE_ROTATE_180)
                dvscntr |= DVS_ROTATE_180;
  
        if (key->flags & I915_SET_COLORKEY_DESTINATION)
  }
  
  static void
ilk_update_plane(struct drm_plane *plane,
g4x_update_plane(struct intel_plane *plane,
                 const struct intel_crtc_state *crtc_state,
                 const struct intel_plane_state *plane_state)
  {
-       struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       struct drm_framebuffer *fb = plane_state->base.fb;
-       int pipe = intel_plane->pipe;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       const struct drm_framebuffer *fb = plane_state->base.fb;
+       enum pipe pipe = plane->pipe;
        u32 dvscntr = plane_state->ctl, dvsscale = 0;
        u32 dvssurf_offset = plane_state->main.offset;
        u32 linear_offset;
  }
  
  static void
ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
g4x_disable_plane(struct intel_plane *plane, struct intel_crtc *crtc)
  {
-       struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       int pipe = intel_plane->pipe;
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       enum pipe pipe = plane->pipe;
        unsigned long irqflags;
  
        spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  }
  
  static int
- intel_check_sprite_plane(struct drm_plane *plane,
+ intel_check_sprite_plane(struct intel_plane *plane,
                         struct intel_crtc_state *crtc_state,
                         struct intel_plane_state *state)
  {
-       struct drm_i915_private *dev_priv = to_i915(plane->dev);
-       struct drm_crtc *crtc = state->base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        struct drm_framebuffer *fb = state->base.fb;
        int crtc_x, crtc_y;
        unsigned int crtc_w, crtc_h;
        }
  
        /* Don't modify another pipe's plane */
-       if (intel_plane->pipe != intel_crtc->pipe) {
+       if (plane->pipe != crtc->pipe) {
                DRM_DEBUG_KMS("Wrong plane <-> crtc mapping\n");
                return -EINVAL;
        }
                if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
                        can_scale = 1;
                        min_scale = 1;
-                       max_scale = skl_max_scale(intel_crtc, crtc_state);
+                       max_scale = skl_max_scale(crtc, crtc_state);
                } else {
                        can_scale = 0;
                        min_scale = DRM_PLANE_HELPER_NO_SCALING;
                        max_scale = DRM_PLANE_HELPER_NO_SCALING;
                }
        } else {
-               can_scale = intel_plane->can_scale;
-               max_scale = intel_plane->max_downscale << 16;
-               min_scale = intel_plane->can_scale ? 1 : (1 << 16);
+               can_scale = plane->can_scale;
+               max_scale = plane->max_downscale << 16;
+               min_scale = plane->can_scale ? 1 : (1 << 16);
        }
  
        /*
                if (ret)
                        return ret;
  
-               state->ctl = ilk_sprite_ctl(crtc_state, state);
+               state->ctl = g4x_sprite_ctl(crtc_state, state);
        }
  
        return 0;
@@@ -1027,7 -1006,7 +1009,7 @@@ out
        return ret;
  }
  
- static const uint32_t ilk_plane_formats[] = {
+ static const uint32_t g4x_plane_formats[] = {
        DRM_FORMAT_XRGB8888,
        DRM_FORMAT_YUYV,
        DRM_FORMAT_YVYU,
@@@ -1131,29 -1110,29 +1113,29 @@@ intel_sprite_plane_create(struct drm_i9
                intel_plane->can_scale = true;
                intel_plane->max_downscale = 16;
  
-               intel_plane->update_plane = ilk_update_plane;
-               intel_plane->disable_plane = ilk_disable_plane;
+               intel_plane->update_plane = g4x_update_plane;
+               intel_plane->disable_plane = g4x_disable_plane;
  
                if (IS_GEN6(dev_priv)) {
                        plane_formats = snb_plane_formats;
                        num_plane_formats = ARRAY_SIZE(snb_plane_formats);
                } else {
-                       plane_formats = ilk_plane_formats;
-                       num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
+                       plane_formats = g4x_plane_formats;
+                       num_plane_formats = ARRAY_SIZE(g4x_plane_formats);
                }
        }
  
        if (INTEL_GEN(dev_priv) >= 9) {
                supported_rotations =
 -                      DRM_ROTATE_0 | DRM_ROTATE_90 |
 -                      DRM_ROTATE_180 | DRM_ROTATE_270;
 +                      DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
 +                      DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
        } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
                supported_rotations =
 -                      DRM_ROTATE_0 | DRM_ROTATE_180 |
 -                      DRM_REFLECT_X;
 +                      DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
 +                      DRM_MODE_REFLECT_X;
        } else {
                supported_rotations =
 -                      DRM_ROTATE_0 | DRM_ROTATE_180;
 +                      DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
        }
  
        intel_plane->pipe = pipe;
                goto fail;
  
        drm_plane_create_rotation_property(&intel_plane->base,
 -                                         DRM_ROTATE_0,
 +                                         DRM_MODE_ROTATE_0,
                                           supported_rotations);
  
        drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);
index 9f24c5da3f8d3c62b32343e870564f73a740cfd2,4af00000ce9147873fc2a632b4e39bee2232c8a1..627e2aa097665f7667f2cf6a834112dd28ac1a3a
@@@ -30,6 -30,7 +30,7 @@@
  #include "mock_gem_device.h"
  #include "mock_gem_object.h"
  #include "mock_gtt.h"
+ #include "mock_uncore.h"
  
  void mock_device_flush(struct drm_i915_private *i915)
  {
@@@ -73,6 -74,7 +74,7 @@@ static void mock_device_release(struct 
  
        destroy_workqueue(i915->wq);
  
+       kmem_cache_destroy(i915->priorities);
        kmem_cache_destroy(i915->dependencies);
        kmem_cache_destroy(i915->requests);
        kmem_cache_destroy(i915->vmas);
@@@ -119,6 -121,7 +121,7 @@@ struct drm_i915_private *mock_gem_devic
                goto err;
  
        device_initialize(&pdev->dev);
+       pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
        pdev->dev.release = release_dev;
        dev_set_name(&pdev->dev, "mock");
        dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
        mkwrite_device_info(i915)->gen = -1;
  
        spin_lock_init(&i915->mm.object_stat_lock);
+       mock_uncore_init(i915);
  
        init_waitqueue_head(&i915->gpu_error.wait_queue);
        init_waitqueue_head(&i915->gpu_error.reset_queue);
        i915->requests = KMEM_CACHE(mock_request,
                                    SLAB_HWCACHE_ALIGN |
                                    SLAB_RECLAIM_ACCOUNT |
 -                                  SLAB_DESTROY_BY_RCU);
 +                                  SLAB_TYPESAFE_BY_RCU);
        if (!i915->requests)
                goto err_vmas;
  
        if (!i915->dependencies)
                goto err_requests;
  
+       i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
+       if (!i915->priorities)
+               goto err_dependencies;
        mutex_lock(&i915->drm.struct_mutex);
        INIT_LIST_HEAD(&i915->gt.timelines);
        err = i915_gem_timeline_init__global(i915);
        if (err) {
                mutex_unlock(&i915->drm.struct_mutex);
-               goto err_dependencies;
+               goto err_priorities;
        }
  
        mock_init_ggtt(i915);
  err_engine:
        for_each_engine(engine, i915, id)
                mock_engine_free(engine);
+ err_priorities:
+       kmem_cache_destroy(i915->priorities);
  err_dependencies:
        kmem_cache_destroy(i915->dependencies);
  err_requests:
index f7007e544f29b5558ebf055e424a55f6ab965854,23837f5dc5e1b858cf0f717897cab2b533282fe2..20eb5ca285942653dfa362374eee8fa36c05b1c8
  
  #define DP_GUID                                   0x030   /* 1.2 */
  
 +#define DP_DSC_SUPPORT                      0x060   /* DP 1.4 */
 +# define DP_DSC_DECOMPRESSION_IS_SUPPORTED  (1 << 0)
 +
 +#define DP_DSC_REV                          0x061
 +# define DP_DSC_MAJOR_MASK                  (0xf << 0)
 +# define DP_DSC_MINOR_MASK                  (0xf << 4)
 +# define DP_DSC_MAJOR_SHIFT                 0
 +# define DP_DSC_MINOR_SHIFT                 4
 +
 +#define DP_DSC_RC_BUF_BLK_SIZE              0x062
 +# define DP_DSC_RC_BUF_BLK_SIZE_1           0x0
 +# define DP_DSC_RC_BUF_BLK_SIZE_4           0x1
 +# define DP_DSC_RC_BUF_BLK_SIZE_16          0x2
 +# define DP_DSC_RC_BUF_BLK_SIZE_64          0x3
 +
 +#define DP_DSC_RC_BUF_SIZE                  0x063
 +
 +#define DP_DSC_SLICE_CAP_1                  0x064
 +# define DP_DSC_1_PER_DP_DSC_SINK           (1 << 0)
 +# define DP_DSC_2_PER_DP_DSC_SINK           (1 << 1)
 +# define DP_DSC_4_PER_DP_DSC_SINK           (1 << 3)
 +# define DP_DSC_6_PER_DP_DSC_SINK           (1 << 4)
 +# define DP_DSC_8_PER_DP_DSC_SINK           (1 << 5)
 +# define DP_DSC_10_PER_DP_DSC_SINK          (1 << 6)
 +# define DP_DSC_12_PER_DP_DSC_SINK          (1 << 7)
 +
 +#define DP_DSC_LINE_BUF_BIT_DEPTH           0x065
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_MASK     (0xf << 0)
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_9        0x0
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_10       0x1
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_11       0x2
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_12       0x3
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_13       0x4
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_14       0x5
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_15       0x6
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_16       0x7
 +# define DP_DSC_LINE_BUF_BIT_DEPTH_8        0x8
 +
 +#define DP_DSC_BLK_PREDICTION_SUPPORT       0x066
 +# define DP_DSC_BLK_PREDICTION_IS_SUPPORTED (1 << 0)
 +
 +#define DP_DSC_MAX_BITS_PER_PIXEL_LOW       0x067   /* eDP 1.4 */
 +
 +#define DP_DSC_MAX_BITS_PER_PIXEL_HI        0x068   /* eDP 1.4 */
 +
 +#define DP_DSC_DEC_COLOR_FORMAT_CAP         0x069
 +# define DP_DSC_RGB                         (1 << 0)
 +# define DP_DSC_YCbCr444                    (1 << 1)
 +# define DP_DSC_YCbCr422_Simple             (1 << 2)
 +# define DP_DSC_YCbCr422_Native             (1 << 3)
 +# define DP_DSC_YCbCr420_Native             (1 << 4)
 +
 +#define DP_DSC_DEC_COLOR_DEPTH_CAP          0x06A
 +# define DP_DSC_8_BPC                       (1 << 1)
 +# define DP_DSC_10_BPC                      (1 << 2)
 +# define DP_DSC_12_BPC                      (1 << 3)
 +
 +#define DP_DSC_PEAK_THROUGHPUT              0x06B
 +# define DP_DSC_THROUGHPUT_MODE_0_MASK      (0xf << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_SHIFT     0
 +# define DP_DSC_THROUGHPUT_MODE_0_340       (1 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_400       (2 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_450       (3 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_500       (4 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_550       (5 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_600       (6 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_650       (7 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_700       (8 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_750       (9 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_800       (10 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_850       (11 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_900       (12 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_950       (13 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_0_1000      (14 << 0)
 +# define DP_DSC_THROUGHPUT_MODE_1_MASK      (0xf << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_SHIFT     4
 +# define DP_DSC_THROUGHPUT_MODE_1_340       (1 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_400       (2 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_450       (3 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_500       (4 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_550       (5 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_600       (6 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_650       (7 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_700       (8 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_750       (9 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_800       (10 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_850       (11 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_900       (12 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_950       (13 << 4)
 +# define DP_DSC_THROUGHPUT_MODE_1_1000      (14 << 4)
 +
 +#define DP_DSC_MAX_SLICE_WIDTH              0x06C
 +
 +#define DP_DSC_SLICE_CAP_2                  0x06D
 +# define DP_DSC_16_PER_DP_DSC_SINK          (1 << 0)
 +# define DP_DSC_20_PER_DP_DSC_SINK          (1 << 1)
 +# define DP_DSC_24_PER_DP_DSC_SINK          (1 << 2)
 +
 +#define DP_DSC_BITS_PER_PIXEL_INC           0x06F
 +# define DP_DSC_BITS_PER_PIXEL_1_16         0x0
 +# define DP_DSC_BITS_PER_PIXEL_1_8          0x1
 +# define DP_DSC_BITS_PER_PIXEL_1_4          0x2
 +# define DP_DSC_BITS_PER_PIXEL_1_2          0x3
 +# define DP_DSC_BITS_PER_PIXEL_1            0x4
 +
  #define DP_PSR_SUPPORT                      0x070   /* XXX 1.2? */
  # define DP_PSR_IS_SUPPORTED                1
  # define DP_PSR2_IS_SUPPORTED             2       /* eDP 1.4 */
  #define DP_AUX_FRAME_SYNC_VALUE                   0x15c   /* eDP 1.4 */
  # define DP_AUX_FRAME_SYNC_VALID          (1 << 0)
  
 +#define DP_DSC_ENABLE                       0x160   /* DP 1.4 */
 +
  #define DP_PSR_EN_CFG                     0x170   /* XXX 1.2? */
  # define DP_PSR_ENABLE                            (1 << 0)
  # define DP_PSR_MAIN_LINK_ACTIVE          (1 << 1)
  #define DP_EDP_PWMGEN_BIT_COUNT             0x724
  #define DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN     0x725
  #define DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX     0x726
+ # define DP_EDP_PWMGEN_BIT_COUNT_MASK       (0x1f << 0)
  
  #define DP_EDP_BACKLIGHT_CONTROL_STATUS     0x727
  
  #define DP_EDP_BACKLIGHT_FREQ_SET           0x728
+ # define DP_EDP_BACKLIGHT_FREQ_BASE_KHZ     27000
  
  #define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MSB   0x72a
  #define DP_EDP_BACKLIGHT_FREQ_CAP_MIN_MID   0x72b
  #define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0   0x2003   /* 1.2 */
  
  #define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1   0x2004   /* 1.2 */
 +# define DP_RX_GTC_MSTR_REQ_STATUS_CHANGE    (1 << 0)
 +# define DP_LOCK_ACQUISITION_REQUEST         (1 << 1)
 +# define DP_CEC_IRQ                          (1 << 2)
  
  #define DP_LINK_SERVICE_IRQ_VECTOR_ESI0     0x2005   /* 1.2 */
  
  # define DP_VSC_EXT_CEA_SDP_SUPPORTED                 (1 << 6)  /* DP 1.4 */
  # define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED                (1 << 7)  /* DP 1.4 */
  
 +/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
 +#define DP_CEC_TUNNELING_CAPABILITY            0x3000
 +# define DP_CEC_TUNNELING_CAPABLE               (1 << 0)
 +# define DP_CEC_SNOOPING_CAPABLE                (1 << 1)
 +# define DP_CEC_MULTIPLE_LA_CAPABLE             (1 << 2)
 +
 +#define DP_CEC_TUNNELING_CONTROL               0x3001
 +# define DP_CEC_TUNNELING_ENABLE                (1 << 0)
 +# define DP_CEC_SNOOPING_ENABLE                 (1 << 1)
 +
 +#define DP_CEC_RX_MESSAGE_INFO                 0x3002
 +# define DP_CEC_RX_MESSAGE_LEN_MASK             (0xf << 0)
 +# define DP_CEC_RX_MESSAGE_LEN_SHIFT            0
 +# define DP_CEC_RX_MESSAGE_HPD_STATE            (1 << 4)
 +# define DP_CEC_RX_MESSAGE_HPD_LOST             (1 << 5)
 +# define DP_CEC_RX_MESSAGE_ACKED                (1 << 6)
 +# define DP_CEC_RX_MESSAGE_ENDED                (1 << 7)
 +
 +#define DP_CEC_TX_MESSAGE_INFO                 0x3003
 +# define DP_CEC_TX_MESSAGE_LEN_MASK             (0xf << 0)
 +# define DP_CEC_TX_MESSAGE_LEN_SHIFT            0
 +# define DP_CEC_TX_RETRY_COUNT_MASK             (0x7 << 4)
 +# define DP_CEC_TX_RETRY_COUNT_SHIFT            4
 +# define DP_CEC_TX_MESSAGE_SEND                 (1 << 7)
 +
 +#define DP_CEC_TUNNELING_IRQ_FLAGS             0x3004
 +# define DP_CEC_RX_MESSAGE_INFO_VALID           (1 << 0)
 +# define DP_CEC_RX_MESSAGE_OVERFLOW             (1 << 1)
 +# define DP_CEC_TX_MESSAGE_SENT                 (1 << 4)
 +# define DP_CEC_TX_LINE_ERROR                   (1 << 5)
 +# define DP_CEC_TX_ADDRESS_NACK_ERROR           (1 << 6)
 +# define DP_CEC_TX_DATA_NACK_ERROR              (1 << 7)
 +
 +#define DP_CEC_LOGICAL_ADDRESS_MASK            0x300E /* 0x300F word */
 +# define DP_CEC_LOGICAL_ADDRESS_0               (1 << 0)
 +# define DP_CEC_LOGICAL_ADDRESS_1               (1 << 1)
 +# define DP_CEC_LOGICAL_ADDRESS_2               (1 << 2)
 +# define DP_CEC_LOGICAL_ADDRESS_3               (1 << 3)
 +# define DP_CEC_LOGICAL_ADDRESS_4               (1 << 4)
 +# define DP_CEC_LOGICAL_ADDRESS_5               (1 << 5)
 +# define DP_CEC_LOGICAL_ADDRESS_6               (1 << 6)
 +# define DP_CEC_LOGICAL_ADDRESS_7               (1 << 7)
 +#define DP_CEC_LOGICAL_ADDRESS_MASK_2          0x300F /* 0x300E word */
 +# define DP_CEC_LOGICAL_ADDRESS_8               (1 << 0)
 +# define DP_CEC_LOGICAL_ADDRESS_9               (1 << 1)
 +# define DP_CEC_LOGICAL_ADDRESS_10              (1 << 2)
 +# define DP_CEC_LOGICAL_ADDRESS_11              (1 << 3)
 +# define DP_CEC_LOGICAL_ADDRESS_12              (1 << 4)
 +# define DP_CEC_LOGICAL_ADDRESS_13              (1 << 5)
 +# define DP_CEC_LOGICAL_ADDRESS_14              (1 << 6)
 +# define DP_CEC_LOGICAL_ADDRESS_15              (1 << 7)
 +
 +#define DP_CEC_RX_MESSAGE_BUFFER               0x3010
 +#define DP_CEC_TX_MESSAGE_BUFFER               0x3020
 +#define DP_CEC_MESSAGE_BUFFER_LENGTH             0x10
 +
  /* DP 1.2 Sideband message defines */
  /* peer device type - DP 1.2a Table 2-92 */
  #define DP_PEER_DEVICE_NONE           0x0
index 664b7fe206d65457cf3e7b1486a05b7e92493cb8,909391d5270cb66cb8e34ca9a20b6282f483ec3c..c19efc9708d7f2fb30e0ac64f4ee34c0023e2186
@@@ -30,7 -30,7 +30,7 @@@
  #include <linux/pm_runtime.h>
  #include <linux/dma-mapping.h>
  #include <linux/delay.h>
 -#include <asm/cacheflush.h>
 +#include <asm/set_memory.h>
  #include <sound/core.h>
  #include <sound/asoundef.h>
  #include <sound/pcm.h>
  #include <drm/intel_lpe_audio.h>
  #include "intel_hdmi_audio.h"
  
+ #define for_each_pipe(card_ctx, pipe) \
+       for ((pipe) = 0; (pipe) < (card_ctx)->num_pipes; (pipe)++)
+ #define for_each_port(card_ctx, port) \
+       for ((port) = 0; (port) < (card_ctx)->num_ports; (port)++)
  /*standard module options for ALSA. This module supports only one card*/
  static int hdmi_card_index = SNDRV_DEFAULT_IDX1;
  static char *hdmi_card_id = SNDRV_DEFAULT_STR1;
@@@ -189,15 -194,30 +194,30 @@@ static void had_substream_put(struct sn
        spin_unlock_irqrestore(&intelhaddata->had_spinlock, flags);
  }
  
+ static u32 had_config_offset(int pipe)
+ {
+       switch (pipe) {
+       default:
+       case 0:
+               return AUDIO_HDMI_CONFIG_A;
+       case 1:
+               return AUDIO_HDMI_CONFIG_B;
+       case 2:
+               return AUDIO_HDMI_CONFIG_C;
+       }
+ }
  /* Register access functions */
- static u32 had_read_register_raw(struct snd_intelhad *ctx, u32 reg)
+ static u32 had_read_register_raw(struct snd_intelhad_card *card_ctx,
+                                int pipe, u32 reg)
  {
-       return ioread32(ctx->mmio_start + ctx->had_config_offset + reg);
+       return ioread32(card_ctx->mmio_start + had_config_offset(pipe) + reg);
  }
  
- static void had_write_register_raw(struct snd_intelhad *ctx, u32 reg, u32 val)
+ static void had_write_register_raw(struct snd_intelhad_card *card_ctx,
+                                  int pipe, u32 reg, u32 val)
  {
-       iowrite32(val, ctx->mmio_start + ctx->had_config_offset + reg);
+       iowrite32(val, card_ctx->mmio_start + had_config_offset(pipe) + reg);
  }
  
  static void had_read_register(struct snd_intelhad *ctx, u32 reg, u32 *val)
        if (!ctx->connected)
                *val = 0;
        else
-               *val = had_read_register_raw(ctx, reg);
+               *val = had_read_register_raw(ctx->card_ctx, ctx->pipe, reg);
  }
  
  static void had_write_register(struct snd_intelhad *ctx, u32 reg, u32 val)
  {
        if (ctx->connected)
-               had_write_register_raw(ctx, reg, val);
+               had_write_register_raw(ctx->card_ctx, ctx->pipe, reg, val);
  }
  
  /*
@@@ -1358,6 -1378,9 +1378,9 @@@ static void had_process_hot_plug(struc
                return;
        }
  
+       /* Disable Audio */
+       had_enable_audio(intelhaddata, false);
        intelhaddata->connected = true;
        dev_dbg(intelhaddata->dev,
                "%s @ %d:DEBUG PLUG/UNPLUG : HAD_DRV_CONNECTED\n",
@@@ -1519,22 -1542,32 +1542,32 @@@ static const struct snd_kcontrol_new ha
   */
  static irqreturn_t display_pipe_interrupt_handler(int irq, void *dev_id)
  {
-       struct snd_intelhad *ctx = dev_id;
-       u32 audio_stat;
+       struct snd_intelhad_card *card_ctx = dev_id;
+       u32 audio_stat[3] = {};
+       int pipe, port;
+       for_each_pipe(card_ctx, pipe) {
+               /* use raw register access to ack IRQs even while disconnected */
+               audio_stat[pipe] = had_read_register_raw(card_ctx, pipe,
+                                                        AUD_HDMI_STATUS) &
+                       (HDMI_AUDIO_UNDERRUN | HDMI_AUDIO_BUFFER_DONE);
+               if (audio_stat[pipe])
+                       had_write_register_raw(card_ctx, pipe,
+                                              AUD_HDMI_STATUS, audio_stat[pipe]);
+       }
  
-       /* use raw register access to ack IRQs even while disconnected */
-       audio_stat = had_read_register_raw(ctx, AUD_HDMI_STATUS);
+       for_each_port(card_ctx, port) {
+               struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
+               int pipe = ctx->pipe;
  
-       if (audio_stat & HDMI_AUDIO_UNDERRUN) {
-               had_write_register_raw(ctx, AUD_HDMI_STATUS,
-                                      HDMI_AUDIO_UNDERRUN);
-               had_process_buffer_underrun(ctx);
-       }
+               if (pipe < 0)
+                       continue;
  
-       if (audio_stat & HDMI_AUDIO_BUFFER_DONE) {
-               had_write_register_raw(ctx, AUD_HDMI_STATUS,
-                                      HDMI_AUDIO_BUFFER_DONE);
-               had_process_buffer_done(ctx);
+               if (audio_stat[pipe] & HDMI_AUDIO_BUFFER_DONE)
+                       had_process_buffer_done(ctx);
+               if (audio_stat[pipe] & HDMI_AUDIO_UNDERRUN)
+                       had_process_buffer_underrun(ctx);
        }
  
        return IRQ_HANDLED;
  /*
   * monitor plug/unplug notification from i915; just kick off the work
   */
- static void notify_audio_lpe(struct platform_device *pdev)
+ static void notify_audio_lpe(struct platform_device *pdev, int port)
  {
-       struct snd_intelhad *ctx = platform_get_drvdata(pdev);
+       struct snd_intelhad_card *card_ctx = platform_get_drvdata(pdev);
+       struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
  
        schedule_work(&ctx->hdmi_audio_wq);
  }
@@@ -1556,47 -1590,51 +1590,51 @@@ static void had_audio_wq(struct work_st
        struct snd_intelhad *ctx =
                container_of(work, struct snd_intelhad, hdmi_audio_wq);
        struct intel_hdmi_lpe_audio_pdata *pdata = ctx->dev->platform_data;
+       struct intel_hdmi_lpe_audio_port_pdata *ppdata = &pdata->port[ctx->port];
  
        pm_runtime_get_sync(ctx->dev);
        mutex_lock(&ctx->mutex);
-       if (!pdata->hdmi_connected) {
-               dev_dbg(ctx->dev, "%s: Event: HAD_NOTIFY_HOT_UNPLUG\n",
-                       __func__);
+       if (ppdata->pipe < 0) {
+               dev_dbg(ctx->dev, "%s: Event: HAD_NOTIFY_HOT_UNPLUG : port = %d\n",
+                       __func__, ctx->port);
                memset(ctx->eld, 0, sizeof(ctx->eld)); /* clear the old ELD */
+               ctx->dp_output = false;
+               ctx->tmds_clock_speed = 0;
+               ctx->link_rate = 0;
+               /* Shut down the stream */
                had_process_hot_unplug(ctx);
-       } else {
-               struct intel_hdmi_lpe_audio_eld *eld = &pdata->eld;
  
+               ctx->pipe = -1;
+       } else {
                dev_dbg(ctx->dev, "%s: HAD_NOTIFY_ELD : port = %d, tmds = %d\n",
-                       __func__, eld->port_id, pdata->tmds_clock_speed);
+                       __func__, ctx->port, ppdata->ls_clock);
  
-               switch (eld->pipe_id) {
-               case 0:
-                       ctx->had_config_offset = AUDIO_HDMI_CONFIG_A;
-                       break;
-               case 1:
-                       ctx->had_config_offset = AUDIO_HDMI_CONFIG_B;
-                       break;
-               case 2:
-                       ctx->had_config_offset = AUDIO_HDMI_CONFIG_C;
-                       break;
-               default:
-                       dev_dbg(ctx->dev, "Invalid pipe %d\n",
-                               eld->pipe_id);
-                       break;
-               }
-               memcpy(ctx->eld, eld->eld_data, sizeof(ctx->eld));
+               memcpy(ctx->eld, ppdata->eld, sizeof(ctx->eld));
  
-               ctx->dp_output = pdata->dp_output;
-               ctx->tmds_clock_speed = pdata->tmds_clock_speed;
-               ctx->link_rate = pdata->link_rate;
+               ctx->dp_output = ppdata->dp_output;
+               if (ctx->dp_output) {
+                       ctx->tmds_clock_speed = 0;
+                       ctx->link_rate = ppdata->ls_clock;
+               } else {
+                       ctx->tmds_clock_speed = ppdata->ls_clock;
+                       ctx->link_rate = 0;
+               }
  
+               /*
+                * Shut down the stream before we change
+                * the pipe assignment for this pcm device
+                */
                had_process_hot_plug(ctx);
  
-               /* Process mode change if stream is active */
+               ctx->pipe = ppdata->pipe;
+               /* Restart the stream if necessary */
                had_process_mode_change(ctx);
        }
        mutex_unlock(&ctx->mutex);
        pm_runtime_mark_last_busy(ctx->dev);
        pm_runtime_put_autosuspend(ctx->dev);
  /*
   * Jack interface
   */
- static int had_create_jack(struct snd_intelhad *ctx)
+ static int had_create_jack(struct snd_intelhad *ctx,
+                          struct snd_pcm *pcm)
  {
+       char hdmi_str[32];
        int err;
  
-       err = snd_jack_new(ctx->card, "HDMI/DP", SND_JACK_AVOUT, &ctx->jack,
+       snprintf(hdmi_str, sizeof(hdmi_str),
+                "HDMI/DP,pcm=%d", pcm->device);
+       err = snd_jack_new(ctx->card_ctx->card, hdmi_str,
+                          SND_JACK_AVOUT, &ctx->jack,
                           true, false);
        if (err < 0)
                return err;
  
  static int hdmi_lpe_audio_runtime_suspend(struct device *dev)
  {
-       struct snd_intelhad *ctx = dev_get_drvdata(dev);
-       struct snd_pcm_substream *substream;
+       struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
+       int port;
  
-       substream = had_substream_get(ctx);
-       if (substream) {
-               snd_pcm_suspend(substream);
-               had_substream_put(ctx);
+       for_each_port(card_ctx, port) {
+               struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
+               struct snd_pcm_substream *substream;
+               substream = had_substream_get(ctx);
+               if (substream) {
+                       snd_pcm_suspend(substream);
+                       had_substream_put(ctx);
+               }
        }
  
        return 0;
  
  static int __maybe_unused hdmi_lpe_audio_suspend(struct device *dev)
  {
-       struct snd_intelhad *ctx = dev_get_drvdata(dev);
+       struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
        int err;
  
        err = hdmi_lpe_audio_runtime_suspend(dev);
        if (!err)
-               snd_power_change_state(ctx->card, SNDRV_CTL_POWER_D3hot);
+               snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D3hot);
        return err;
  }
  
@@@ -1654,24 -1703,34 +1703,34 @@@ static int hdmi_lpe_audio_runtime_resum
  
  static int __maybe_unused hdmi_lpe_audio_resume(struct device *dev)
  {
-       struct snd_intelhad *ctx = dev_get_drvdata(dev);
+       struct snd_intelhad_card *card_ctx = dev_get_drvdata(dev);
  
        hdmi_lpe_audio_runtime_resume(dev);
-       snd_power_change_state(ctx->card, SNDRV_CTL_POWER_D0);
+       snd_power_change_state(card_ctx->card, SNDRV_CTL_POWER_D0);
        return 0;
  }
  
  /* release resources */
  static void hdmi_lpe_audio_free(struct snd_card *card)
  {
-       struct snd_intelhad *ctx = card->private_data;
+       struct snd_intelhad_card *card_ctx = card->private_data;
+       struct intel_hdmi_lpe_audio_pdata *pdata = card_ctx->dev->platform_data;
+       int port;
  
-       cancel_work_sync(&ctx->hdmi_audio_wq);
+       spin_lock_irq(&pdata->lpe_audio_slock);
+       pdata->notify_audio_lpe = NULL;
+       spin_unlock_irq(&pdata->lpe_audio_slock);
  
-       if (ctx->mmio_start)
-               iounmap(ctx->mmio_start);
-       if (ctx->irq >= 0)
-               free_irq(ctx->irq, ctx);
+       for_each_port(card_ctx, port) {
+               struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
+               cancel_work_sync(&ctx->hdmi_audio_wq);
+       }
+       if (card_ctx->mmio_start)
+               iounmap(card_ctx->mmio_start);
+       if (card_ctx->irq >= 0)
+               free_irq(card_ctx->irq, card_ctx);
  }
  
  /*
  static int hdmi_lpe_audio_probe(struct platform_device *pdev)
  {
        struct snd_card *card;
-       struct snd_intelhad *ctx;
+       struct snd_intelhad_card *card_ctx;
        struct snd_pcm *pcm;
        struct intel_hdmi_lpe_audio_pdata *pdata;
        int irq;
        struct resource *res_mmio;
-       int i, ret;
+       int port, ret;
  
        pdata = pdev->dev.platform_data;
        if (!pdata) {
  
        /* create a card instance with ALSA framework */
        ret = snd_card_new(&pdev->dev, hdmi_card_index, hdmi_card_id,
-                          THIS_MODULE, sizeof(*ctx), &card);
+                          THIS_MODULE, sizeof(*card_ctx), &card);
        if (ret)
                return ret;
  
-       ctx = card->private_data;
-       spin_lock_init(&ctx->had_spinlock);
-       mutex_init(&ctx->mutex);
-       ctx->connected = false;
-       ctx->dev = &pdev->dev;
-       ctx->card = card;
-       ctx->aes_bits = SNDRV_PCM_DEFAULT_CON_SPDIF;
+       card_ctx = card->private_data;
+       card_ctx->dev = &pdev->dev;
+       card_ctx->card = card;
        strcpy(card->driver, INTEL_HAD);
        strcpy(card->shortname, "Intel HDMI/DP LPE Audio");
        strcpy(card->longname, "Intel HDMI/DP LPE Audio");
  
-       ctx->irq = -1;
-       ctx->tmds_clock_speed = DIS_SAMPLE_RATE_148_5;
-       INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
+       card_ctx->irq = -1;
  
        card->private_free = hdmi_lpe_audio_free;
  
-       /* assume pipe A as default */
-       ctx->had_config_offset = AUDIO_HDMI_CONFIG_A;
-       platform_set_drvdata(pdev, ctx);
+       platform_set_drvdata(pdev, card_ctx);
  
        dev_dbg(&pdev->dev, "%s: mmio_start = 0x%x, mmio_end = 0x%x\n",
                __func__, (unsigned int)res_mmio->start,
                (unsigned int)res_mmio->end);
  
-       ctx->mmio_start = ioremap_nocache(res_mmio->start,
-                                         (size_t)(resource_size(res_mmio)));
-       if (!ctx->mmio_start) {
+       card_ctx->mmio_start = ioremap_nocache(res_mmio->start,
+                                              (size_t)(resource_size(res_mmio)));
+       if (!card_ctx->mmio_start) {
                dev_err(&pdev->dev, "Could not get ioremap\n");
                ret = -EACCES;
                goto err;
  
        /* setup interrupt handler */
        ret = request_irq(irq, display_pipe_interrupt_handler, 0,
-                         pdev->name, ctx);
+                         pdev->name, card_ctx);
        if (ret < 0) {
                dev_err(&pdev->dev, "request_irq failed\n");
                goto err;
        }
  
-       ctx->irq = irq;
-       ret = snd_pcm_new(card, INTEL_HAD, PCM_INDEX, MAX_PB_STREAMS,
-                         MAX_CAP_STREAMS, &pcm);
-       if (ret)
-               goto err;
-       /* setup private data which can be retrieved when required */
-       pcm->private_data = ctx;
-       pcm->info_flags = 0;
-       strncpy(pcm->name, card->shortname, strlen(card->shortname));
-       /* setup the ops for playabck */
-       snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &had_pcm_ops);
+       card_ctx->irq = irq;
  
        /* only 32bit addressable */
        dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
        dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
  
-       /* allocate dma pages;
-        * try to allocate 600k buffer as default which is large enough
-        */
-       snd_pcm_lib_preallocate_pages_for_all(pcm,
-                       SNDRV_DMA_TYPE_DEV, NULL,
-                       HAD_DEFAULT_BUFFER, HAD_MAX_BUFFER);
+       init_channel_allocations();
  
-       /* create controls */
-       for (i = 0; i < ARRAY_SIZE(had_controls); i++) {
-               ret = snd_ctl_add(card, snd_ctl_new1(&had_controls[i], ctx));
-               if (ret < 0)
+       card_ctx->num_pipes = pdata->num_pipes;
+       card_ctx->num_ports = pdata->num_ports;
+       for_each_port(card_ctx, port) {
+               struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
+               int i;
+               ctx->card_ctx = card_ctx;
+               ctx->dev = card_ctx->dev;
+               ctx->port = port;
+               ctx->pipe = -1;
+               INIT_WORK(&ctx->hdmi_audio_wq, had_audio_wq);
+               ret = snd_pcm_new(card, INTEL_HAD, port, MAX_PB_STREAMS,
+                                 MAX_CAP_STREAMS, &pcm);
+               if (ret)
                        goto err;
-       }
  
-       init_channel_allocations();
+               /* setup private data which can be retrieved when required */
+               pcm->private_data = ctx;
+               pcm->info_flags = 0;
+               strncpy(pcm->name, card->shortname, strlen(card->shortname));
+               /* setup the ops for playabck */
+               snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &had_pcm_ops);
  
-       /* Register channel map controls */
-       ret = had_register_chmap_ctls(ctx, pcm);
-       if (ret < 0)
-               goto err;
+               /* allocate dma pages;
+                * try to allocate 600k buffer as default which is large enough
+                */
+               snd_pcm_lib_preallocate_pages_for_all(pcm,
+                                                     SNDRV_DMA_TYPE_DEV, NULL,
+                                                     HAD_DEFAULT_BUFFER, HAD_MAX_BUFFER);
+               /* create controls */
+               for (i = 0; i < ARRAY_SIZE(had_controls); i++) {
+                       struct snd_kcontrol *kctl;
+                       kctl = snd_ctl_new1(&had_controls[i], ctx);
+                       if (!kctl) {
+                               ret = -ENOMEM;
+                               goto err;
+                       }
  
-       ret = had_create_jack(ctx);
-       if (ret < 0)
-               goto err;
+                       kctl->id.device = pcm->device;
+                       ret = snd_ctl_add(card, kctl);
+                       if (ret < 0)
+                               goto err;
+               }
+               /* Register channel map controls */
+               ret = had_register_chmap_ctls(ctx, pcm);
+               if (ret < 0)
+                       goto err;
+               ret = had_create_jack(ctx, pcm);
+               if (ret < 0)
+                       goto err;
+       }
  
        ret = snd_card_register(card);
        if (ret)
  
        spin_lock_irq(&pdata->lpe_audio_slock);
        pdata->notify_audio_lpe = notify_audio_lpe;
-       pdata->notify_pending = false;
        spin_unlock_irq(&pdata->lpe_audio_slock);
  
-       /* runtime PM isn't enabled as default, since it won't save much on
-        * BYT/CHT devices; user who want the runtime PM should adjust the
-        * power/ontrol and power/autosuspend_delay_ms sysfs entries instead
-        */
        pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_mark_last_busy(&pdev->dev);
        pm_runtime_set_active(&pdev->dev);
  
        dev_dbg(&pdev->dev, "%s: handle pending notification\n", __func__);
-       schedule_work(&ctx->hdmi_audio_wq);
+       for_each_port(card_ctx, port) {
+               struct snd_intelhad *ctx = &card_ctx->pcm_ctx[port];
+               schedule_work(&ctx->hdmi_audio_wq);
+       }
  
        return 0;
  
@@@ -1834,9 -1908,9 +1908,9 @@@ err
   */
  static int hdmi_lpe_audio_remove(struct platform_device *pdev)
  {
-       struct snd_intelhad *ctx = platform_get_drvdata(pdev);
+       struct snd_intelhad_card *card_ctx = platform_get_drvdata(pdev);
  
-       snd_card_free(ctx->card);
+       snd_card_free(card_ctx->card);
        return 0;
  }
  
This page took 0.363225 seconds and 4 git commands to generate.