]> Git Repo - linux.git/commitdiff
Merge tag 'v4.10-rc2' into drm-intel-next-queued
authorDaniel Vetter <[email protected]>
Wed, 4 Jan 2017 10:34:01 +0000 (11:34 +0100)
committerDaniel Vetter <[email protected]>
Wed, 4 Jan 2017 10:35:18 +0000 (11:35 +0100)
Backmerge Linux 4.10-rc2 to resync with our -fixes cherry-picks. I've
done the backmerge directly because Dave is on vacation.

Signed-off-by: Daniel Vetter <[email protected]>
1  2 
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_request.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_uncore.c

index c61c8a73820228b9724ae546effc151e8a236542,4db24225023520b879b12fce1a924cc236384bd5..fd2b026f7ecde98a6da7c97ce855d972939a27b9
@@@ -177,8 -177,8 +177,8 @@@ static int dispatch_workload(struct int
        rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
                gvt_err("fail to allocate gem request\n");
-               workload->status = PTR_ERR(rq);
-               return workload->status;
+               ret = PTR_ERR(rq);
+               goto out;
        }
  
        gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
@@@ -212,7 -212,8 +212,8 @@@ out
        if (ret)
                workload->status = ret;
  
-       i915_add_request_no_flush(rq);
+       if (!IS_ERR_OR_NULL(rq))
+               i915_add_request_no_flush(rq);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        return ret;
  }
@@@ -460,7 -461,8 +461,8 @@@ complete
  
                complete_current_workload(gvt, ring_id);
  
-               i915_gem_request_put(fetch_and_zero(&workload->req));
+               if (workload->req)
+                       i915_gem_request_put(fetch_and_zero(&workload->req));
  
                if (need_force_wake)
                        intel_uncore_forcewake_put(gvt->dev_priv,
@@@ -547,10 -549,18 +549,10 @@@ err
  
  void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
  {
 -      struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 -
        atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
                        &vgpu->shadow_ctx_notifier_block);
  
 -      mutex_lock(&dev_priv->drm.struct_mutex);
 -
 -      /* a little hacky to mark as ctx closed */
 -      vgpu->shadow_ctx->closed = true;
 -      i915_gem_context_put(vgpu->shadow_ctx);
 -
 -      mutex_unlock(&dev_priv->drm.struct_mutex);
 +      i915_gem_context_put_unlocked(vgpu->shadow_ctx);
  }
  
  int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
index d70ea316d83a63e7fb3c0b8247bca26d9f2cbe85,4a31b7a891ecaf3e2732c9f2d6ce62fa633419f6..d8760acf800183b6b8edb383977327944bd24d47
@@@ -38,7 -38,6 +38,7 @@@
  #include <linux/reservation.h>
  #include <linux/shmem_fs.h>
  #include <linux/slab.h>
 +#include <linux/stop_machine.h>
  #include <linux/swap.h>
  #include <linux/pci.h>
  #include <linux/dma-buf.h>
@@@ -70,8 -69,7 +70,8 @@@ insert_mappable_node(struct i915_ggtt *
  {
        memset(node, 0, sizeof(*node));
        return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
 -                                                 size, 0, -1,
 +                                                 size, 0,
 +                                                 I915_COLOR_UNEVICTABLE,
                                                   0, ggtt->mappable_end,
                                                   DRM_MM_SEARCH_DEFAULT,
                                                   DRM_MM_CREATE_DEFAULT);
@@@ -246,16 -244,14 +246,16 @@@ err_phys
  
  static void
  __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
 -                              struct sg_table *pages)
 +                              struct sg_table *pages,
 +                              bool needs_clflush)
  {
        GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
  
        if (obj->mm.madv == I915_MADV_DONTNEED)
                obj->mm.dirty = false;
  
 -      if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
 +      if (needs_clflush &&
 +          (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
            !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
                drm_clflush_sg(pages);
  
@@@ -267,7 -263,7 +267,7 @@@ static voi
  i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
                               struct sg_table *pages)
  {
 -      __i915_gem_object_release_shmem(obj, pages);
 +      __i915_gem_object_release_shmem(obj, pages, false);
  
        if (obj->mm.dirty) {
                struct address_space *mapping = obj->base.filp->f_mapping;
@@@ -640,8 -636,9 +640,8 @@@ out
        return ret;
  }
  
 -void *i915_gem_object_alloc(struct drm_device *dev)
 +void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
        return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
  }
  
@@@ -653,7 -650,7 +653,7 @@@ void i915_gem_object_free(struct drm_i9
  
  static int
  i915_gem_create(struct drm_file *file,
 -              struct drm_device *dev,
 +              struct drm_i915_private *dev_priv,
                uint64_t size,
                uint32_t *handle_p)
  {
                return -EINVAL;
  
        /* Allocate the new object */
 -      obj = i915_gem_object_create(dev, size);
 +      obj = i915_gem_object_create(dev_priv, size);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
  
@@@ -688,7 -685,7 +688,7 @@@ i915_gem_dumb_create(struct drm_file *f
        /* have to work out size/pitch and return them */
        args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
        args->size = args->pitch * args->height;
 -      return i915_gem_create(file, dev,
 +      return i915_gem_create(file, to_i915(dev),
                               args->size, &args->handle);
  }
  
@@@ -702,12 -699,11 +702,12 @@@ in
  i915_gem_create_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file)
  {
 +      struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_create *args = data;
  
 -      i915_gem_flush_free_objects(to_i915(dev));
 +      i915_gem_flush_free_objects(dev_priv);
  
 -      return i915_gem_create(file, dev,
 +      return i915_gem_create(file, dev_priv,
                               args->size, &args->handle);
  }
  
@@@ -1142,7 -1138,8 +1142,7 @@@ i915_gem_pread_ioctl(struct drm_device 
                return -ENOENT;
  
        /* Bounds check source.  */
 -      if (args->offset > obj->base.size ||
 -          args->size > obj->base.size - args->offset) {
 +      if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
                ret = -EINVAL;
                goto out;
        }
@@@ -1455,7 -1452,8 +1455,7 @@@ i915_gem_pwrite_ioctl(struct drm_devic
                return -ENOENT;
  
        /* Bounds check destination. */
 -      if (args->offset > obj->base.size ||
 -          args->size > obj->base.size - args->offset) {
 +      if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
                ret = -EINVAL;
                goto err;
        }
@@@ -1517,7 -1515,7 +1517,7 @@@ static void i915_gem_object_bump_inacti
  
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
                if (!i915_vma_is_ggtt(vma))
 -                      continue;
 +                      break;
  
                if (i915_vma_is_active(vma))
                        continue;
@@@ -1813,8 -1811,7 +1813,7 @@@ int i915_gem_fault(struct vm_area_struc
        int ret;
  
        /* We don't use vmf->pgoff since that has the fake offset */
-       page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
-               PAGE_SHIFT;
+       page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
  
        trace_i915_gem_object_fault(obj, page_offset, true, write);
  
@@@ -2099,8 -2096,7 +2098,8 @@@ u64 i915_gem_get_ggtt_alignment(struct 
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
 -      if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
 +      if (INTEL_GEN(dev_priv) >= 4 ||
 +          (!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) ||
            tiling_mode == I915_TILING_NONE)
                return 4096;
  
@@@ -2235,7 -2231,7 +2234,7 @@@ i915_gem_object_put_pages_gtt(struct dr
        struct sgt_iter sgt_iter;
        struct page *page;
  
 -      __i915_gem_object_release_shmem(obj, pages);
 +      __i915_gem_object_release_shmem(obj, pages, true);
  
        i915_gem_gtt_finish_pages(obj, pages);
  
@@@ -2326,7 -2322,7 +2325,7 @@@ static void i915_sg_trim(struct sg_tabl
        if (orig_st->nents == orig_st->orig_nents)
                return;
  
 -      if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
 +      if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
                return;
  
        new_sg = new_st.sgl;
                /* called before being DMA mapped, no need to copy sg->dma_* */
                new_sg = sg_next(new_sg);
        }
 +      GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
  
        sg_free_table(orig_st);
  
@@@ -2657,34 -2652,35 +2656,34 @@@ err_unlock
        goto out_unlock;
  }
  
 -static bool i915_context_is_banned(const struct i915_gem_context *ctx)
 +static bool ban_context(const struct i915_gem_context *ctx)
  {
 -      unsigned long elapsed;
 +      return (i915_gem_context_is_bannable(ctx) &&
 +              ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD);
 +}
  
 -      if (ctx->hang_stats.banned)
 -              return true;
 +static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
 +{
 +      ctx->guilty_count++;
 +      ctx->ban_score += CONTEXT_SCORE_GUILTY;
 +      if (ban_context(ctx))
 +              i915_gem_context_set_banned(ctx);
  
 -      elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
 -      if (ctx->hang_stats.ban_period_seconds &&
 -          elapsed <= ctx->hang_stats.ban_period_seconds) {
 -              DRM_DEBUG("context hanging too fast, banning!\n");
 -              return true;
 -      }
 +      DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
 +                       ctx->name, ctx->ban_score,
 +                       yesno(i915_gem_context_is_banned(ctx)));
 +
 +      if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv))
 +              return;
  
 -      return false;
 +      ctx->file_priv->context_bans++;
 +      DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
 +                       ctx->name, ctx->file_priv->context_bans);
  }
  
 -static void i915_set_reset_status(struct i915_gem_context *ctx,
 -                                const bool guilty)
 +static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
  {
 -      struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
 -
 -      if (guilty) {
 -              hs->banned = i915_context_is_banned(ctx);
 -              hs->batch_active++;
 -              hs->guilty_ts = get_seconds();
 -      } else {
 -              hs->batch_pending++;
 -      }
 +      ctx->active_count++;
  }
  
  struct drm_i915_gem_request *
@@@ -2732,7 -2728,6 +2731,7 @@@ static void i915_gem_reset_engine(struc
        struct drm_i915_gem_request *request;
        struct i915_gem_context *incomplete_ctx;
        struct intel_timeline *timeline;
 +      unsigned long flags;
        bool ring_hung;
  
        if (engine->irq_seqno_barrier)
        if (!request)
                return;
  
 -      ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 -      if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
 +      ring_hung = engine->hangcheck.stalled;
 +      if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
 +              DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n",
 +                               engine->name,
 +                               yesno(ring_hung));
                ring_hung = false;
 +      }
 +
 +      if (ring_hung)
 +              i915_gem_context_mark_guilty(request->ctx);
 +      else
 +              i915_gem_context_mark_innocent(request->ctx);
  
 -      i915_set_reset_status(request->ctx, ring_hung);
        if (!ring_hung)
                return;
  
        if (i915_gem_context_is_default(incomplete_ctx))
                return;
  
 +      timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
 +
 +      spin_lock_irqsave(&engine->timeline->lock, flags);
 +      spin_lock(&timeline->lock);
 +
        list_for_each_entry_continue(request, &engine->timeline->requests, link)
                if (request->ctx == incomplete_ctx)
                        reset_request(request);
  
 -      timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
        list_for_each_entry(request, &timeline->requests, link)
                reset_request(request);
 +
 +      spin_unlock(&timeline->lock);
 +      spin_unlock_irqrestore(&engine->timeline->lock, flags);
  }
  
  void i915_gem_reset(struct drm_i915_private *dev_priv)
@@@ -2822,12 -2802,6 +2821,12 @@@ static void nop_submit_request(struct d
  
  static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
  {
 +      /* We need to be sure that no thread is running the old callback as
 +       * we install the nop handler (otherwise we would submit a request
 +       * to hardware that will never complete). In order to prevent this
 +       * race, we wait until the machine is idle before making the swap
 +       * (using stop_machine()).
 +       */
        engine->submit_request = nop_submit_request;
  
        /* Mark all pending requests as complete so that any concurrent
        }
  }
  
 -void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
 +static int __i915_gem_set_wedged_BKL(void *data)
  {
 +      struct drm_i915_private *i915 = data;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
  
 +      for_each_engine(engine, i915, id)
 +              i915_gem_cleanup_engine(engine);
 +
 +      return 0;
 +}
 +
 +void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
 +{
        lockdep_assert_held(&dev_priv->drm.struct_mutex);
        set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
  
 -      i915_gem_context_lost(dev_priv);
 -      for_each_engine(engine, dev_priv, id)
 -              i915_gem_cleanup_engine(engine);
 -      mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 +      stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
  
 +      i915_gem_context_lost(dev_priv);
        i915_gem_retire_requests(dev_priv);
 +
 +      mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
  }
  
  static void
@@@ -3566,7 -3531,7 +3565,7 @@@ err_unpin_display
  void
  i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
  {
 -      lockdep_assert_held(&vma->vm->dev->struct_mutex);
 +      lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
  
        if (WARN_ON(vma->obj->pin_display == 0))
                return;
@@@ -4005,8 -3970,9 +4004,8 @@@ static const struct drm_i915_gem_object
        (sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))
  
  struct drm_i915_gem_object *
 -i915_gem_object_create(struct drm_device *dev, u64 size)
 +i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_object *obj;
        struct address_space *mapping;
        gfp_t mask;
        if (overflows_type(size, obj->base.size))
                return ERR_PTR(-E2BIG);
  
 -      obj = i915_gem_object_alloc(dev);
 +      obj = i915_gem_object_alloc(dev_priv);
        if (obj == NULL)
                return ERR_PTR(-ENOMEM);
  
 -      ret = drm_gem_object_init(dev, &obj->base, size);
 +      ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
        if (ret)
                goto fail;
  
        mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
 -      if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
 +      if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
                /* 965gm cannot relocate objects above 4GiB. */
                mask &= ~__GFP_HIGHMEM;
                mask |= __GFP_DMA32;
@@@ -4225,12 -4191,12 +4224,12 @@@ static void assert_kernel_context_is_cu
        enum intel_engine_id id;
  
        for_each_engine(engine, dev_priv, id)
 -              GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
 +              GEM_BUG_ON(engine->last_retired_context != dev_priv->kernel_context);
  }
  
 -int i915_gem_suspend(struct drm_device *dev)
 +int i915_gem_suspend(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct drm_device *dev = &dev_priv->drm;
        int ret;
  
        intel_suspend_gt_powersave(dev_priv);
  
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        cancel_delayed_work_sync(&dev_priv->gt.retire_work);
 -      flush_delayed_work(&dev_priv->gt.idle_work);
 -      flush_work(&dev_priv->mm.free_work);
 +
 +      /* As the idle_work is rearming if it detects a race, play safe and
 +       * repeat the flush until it is definitely idle.
 +       */
 +      while (flush_delayed_work(&dev_priv->gt.idle_work))
 +              ;
 +
 +      i915_gem_drain_freed_objects(dev_priv);
  
        /* Assert that we sucessfully flushed all the work and
         * reset the GPU back to its idle, low power state.
@@@ -4310,9 -4270,9 +4309,9 @@@ err
        return ret;
  }
  
 -void i915_gem_resume(struct drm_device *dev)
 +void i915_gem_resume(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct drm_device *dev = &dev_priv->drm;
  
        WARN_ON(dev_priv->gt.awake);
  
@@@ -4377,8 -4337,9 +4376,8 @@@ static void init_unused_rings(struct dr
  }
  
  int
 -i915_gem_init_hw(struct drm_device *dev)
 +i915_gem_init_hw(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        int ret;
                        goto out;
        }
  
 -      intel_mocs_init_l3cc_table(dev);
 +      intel_mocs_init_l3cc_table(dev_priv);
  
        /* We can't enable contexts until all firmware is loaded */
 -      ret = intel_guc_setup(dev);
 +      ret = intel_guc_setup(dev_priv);
        if (ret)
                goto out;
  
@@@ -4465,11 -4426,12 +4464,11 @@@ bool intel_sanitize_semaphores(struct d
        return true;
  }
  
 -int i915_gem_init(struct drm_device *dev)
 +int i915_gem_init(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
  
 -      mutex_lock(&dev->struct_mutex);
 +      mutex_lock(&dev_priv->drm.struct_mutex);
  
        if (!i915.enable_execlists) {
                dev_priv->gt.resume = intel_legacy_submission_resume;
        if (ret)
                goto out_unlock;
  
 -      ret = i915_gem_context_init(dev);
 +      ret = i915_gem_context_init(dev_priv);
        if (ret)
                goto out_unlock;
  
 -      ret = intel_engines_init(dev);
 +      ret = intel_engines_init(dev_priv);
        if (ret)
                goto out_unlock;
  
 -      ret = i915_gem_init_hw(dev);
 +      ret = i915_gem_init_hw(dev_priv);
        if (ret == -EIO) {
                /* Allow engine initialisation to fail by marking the GPU as
                 * wedged. But we only want to do this where the GPU is angry,
  
  out_unlock:
        intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 -      mutex_unlock(&dev->struct_mutex);
 +      mutex_unlock(&dev_priv->drm.struct_mutex);
  
        return ret;
  }
  
  void
 -i915_gem_cleanup_engines(struct drm_device *dev)
 +i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
  
@@@ -4537,9 -4500,8 +4536,9 @@@ i915_gem_load_init_fences(struct drm_i9
        if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
            !IS_CHERRYVIEW(dev_priv))
                dev_priv->num_fence_regs = 32;
 -      else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
 -               IS_I945GM(dev_priv) || IS_G33(dev_priv))
 +      else if (INTEL_INFO(dev_priv)->gen >= 4 ||
 +               IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
 +               IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
                dev_priv->num_fence_regs = 16;
        else
                dev_priv->num_fence_regs = 8;
  }
  
  int
 -i915_gem_load_init(struct drm_device *dev)
 +i915_gem_load_init(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
        int err = -ENOMEM;
  
        dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
@@@ -4632,8 -4595,10 +4631,8 @@@ err_out
        return err;
  }
  
 -void i915_gem_load_cleanup(struct drm_device *dev)
 +void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
  {
 -      struct drm_i915_private *dev_priv = to_i915(dev);
 -
        WARN_ON(!llist_empty(&dev_priv->mm.free_list));
  
        mutex_lock(&dev_priv->drm.struct_mutex);
@@@ -4784,7 -4749,7 +4783,7 @@@ void i915_gem_track_fb(struct drm_i915_
  
  /* Allocate a new GEM object and fill it with the supplied data */
  struct drm_i915_gem_object *
 -i915_gem_object_create_from_data(struct drm_device *dev,
 +i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
                                 const void *data, size_t size)
  {
        struct drm_i915_gem_object *obj;
        size_t bytes;
        int ret;
  
 -      obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
 +      obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
        if (IS_ERR(obj))
                return obj;
  
index 7427aac749236d3c86e0b62a82c6f57b5fd74354,b8f403faadbb867af596e1f3d967e1d411ed443e..99056b948edabe5d1945573a192727fd56af5454
@@@ -62,15 -62,6 +62,15 @@@ static void i915_fence_release(struct d
  {
        struct drm_i915_gem_request *req = to_request(fence);
  
 +      /* The request is put onto a RCU freelist (i.e. the address
 +       * is immediately reused), mark the fences as being freed now.
 +       * Otherwise the debugobjects for the fences are only marked as
 +       * freed when the slab cache itself is freed, and so we would get
 +       * caught trying to reuse dead objects.
 +       */
 +      i915_sw_fence_fini(&req->submit);
 +      i915_sw_fence_fini(&req->execute);
 +
        kmem_cache_free(req->i915->requests, req);
  }
  
@@@ -206,7 -197,6 +206,7 @@@ void i915_gem_retire_noop(struct i915_g
  
  static void i915_gem_request_retire(struct drm_i915_gem_request *request)
  {
 +      struct intel_engine_cs *engine = request->engine;
        struct i915_gem_active *active, *next;
  
        lockdep_assert_held(&request->i915->drm.struct_mutex);
  
        trace_i915_gem_request_retire(request);
  
 -      spin_lock_irq(&request->engine->timeline->lock);
 +      spin_lock_irq(&engine->timeline->lock);
        list_del_init(&request->link);
 -      spin_unlock_irq(&request->engine->timeline->lock);
 +      spin_unlock_irq(&engine->timeline->lock);
  
        /* We know the GPU must have read the request to have
         * sent us the seqno + interrupt, so use the position
  
        i915_gem_request_remove_from_client(request);
  
 -      if (request->previous_context) {
 -              if (i915.enable_execlists)
 -                      intel_lr_context_unpin(request->previous_context,
 -                                             request->engine);
 -      }
 +      /* Retirement decays the ban score as it is a sign of ctx progress */
 +      if (request->ctx->ban_score > 0)
 +              request->ctx->ban_score--;
  
 -      i915_gem_context_put(request->ctx);
 +      /* The backing object for the context is done after switching to the
 +       * *next* context. Therefore we cannot retire the previous context until
 +       * the next context has already started running. However, since we
 +       * cannot take the required locks at i915_gem_request_submit() we
 +       * defer the unpinning of the active context to now, retirement of
 +       * the subsequent request.
 +       */
 +      if (engine->last_retired_context)
 +              engine->context_unpin(engine, engine->last_retired_context);
 +      engine->last_retired_context = request->ctx;
  
        dma_fence_signal(&request->fence);
  
@@@ -294,8 -277,6 +294,8 @@@ void i915_gem_request_retire_upto(struc
        struct drm_i915_gem_request *tmp;
  
        lockdep_assert_held(&req->i915->drm.struct_mutex);
 +      GEM_BUG_ON(!i915_gem_request_completed(req));
 +
        if (list_empty(&req->link))
                return;
  
@@@ -345,11 -326,11 +345,11 @@@ static int i915_gem_init_global_seqno(s
        GEM_BUG_ON(i915->gt.active_requests > 1);
  
        /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
 -      if (!i915_seqno_passed(seqno, atomic_read(&timeline->next_seqno))) {
 +      if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
                while (intel_breadcrumbs_busy(i915))
                        cond_resched(); /* spin until threads are complete */
        }
 -      atomic_set(&timeline->next_seqno, seqno);
 +      atomic_set(&timeline->seqno, seqno);
  
        /* Finally reset hw state */
        for_each_engine(engine, i915, id)
@@@ -384,11 -365,11 +384,11 @@@ int i915_gem_set_global_seqno(struct dr
  static int reserve_global_seqno(struct drm_i915_private *i915)
  {
        u32 active_requests = ++i915->gt.active_requests;
 -      u32 next_seqno = atomic_read(&i915->gt.global_timeline.next_seqno);
 +      u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
        int ret;
  
        /* Reservation is fine until we need to wrap around */
 -      if (likely(next_seqno + active_requests > next_seqno))
 +      if (likely(seqno + active_requests > seqno))
                return 0;
  
        ret = i915_gem_init_global_seqno(i915, 0);
  
  static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
  {
 -      /* next_seqno only incremented under a mutex */
 -      return ++tl->next_seqno.counter;
 +      /* seqno only incremented under a mutex */
 +      return ++tl->seqno.counter;
  }
  
  static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
  {
 -      return atomic_inc_return(&tl->next_seqno);
 +      return atomic_inc_return(&tl->seqno);
  }
  
  void __i915_gem_request_submit(struct drm_i915_gem_request *request)
@@@ -528,18 -509,10 +528,18 @@@ i915_gem_request_alloc(struct intel_eng
        if (ret)
                return ERR_PTR(ret);
  
 -      ret = reserve_global_seqno(dev_priv);
 +      /* Pinning the contexts may generate requests in order to acquire
 +       * GGTT space, so do this first before we reserve a seqno for
 +       * ourselves.
 +       */
 +      ret = engine->context_pin(engine, ctx);
        if (ret)
                return ERR_PTR(ret);
  
 +      ret = reserve_global_seqno(dev_priv);
 +      if (ret)
 +              goto err_unpin;
 +
        /* Move the oldest request to the slab-cache (if not in use!) */
        req = list_first_entry_or_null(&engine->timeline->requests,
                                       typeof(*req), link);
        INIT_LIST_HEAD(&req->active_list);
        req->i915 = dev_priv;
        req->engine = engine;
 -      req->ctx = i915_gem_context_get(ctx);
 +      req->ctx = ctx;
  
        /* No zalloc, must clear what we need by hand */
        req->global_seqno = 0;
 -      req->previous_context = NULL;
        req->file_priv = NULL;
        req->batch = NULL;
  
        req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
        GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
  
 -      if (i915.enable_execlists)
 -              ret = intel_logical_ring_alloc_request_extras(req);
 -      else
 -              ret = intel_ring_alloc_request_extras(req);
 +      ret = engine->request_alloc(req);
        if (ret)
                goto err_ctx;
  
        return req;
  
  err_ctx:
 -      i915_gem_context_put(ctx);
 +      /* Make sure we didn't add ourselves to external state before freeing */
 +      GEM_BUG_ON(!list_empty(&req->active_list));
 +      GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
 +      GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
 +
        kmem_cache_free(dev_priv->requests, req);
  err_unreserve:
        dev_priv->gt.active_requests--;
 +err_unpin:
 +      engine->context_unpin(engine, ctx);
        return ERR_PTR(ret);
  }
  
@@@ -1001,7 -972,7 +1001,7 @@@ bool __i915_spin_request(const struct d
                if (busywait_stop(timeout_us, cpu))
                        break;
  
-               cpu_relax_lowlatency();
+               cpu_relax();
        } while (!need_resched());
  
        return false;
index 48963a20992fcf2b069129a31459ba0ef13db2da,d068af2ec3a3a9597d5e9fb8553ecc7cf7596d86..6a8fa085b74edd91e696c167fff1bd8747042d6f
@@@ -515,7 -515,7 +515,7 @@@ __i915_gem_userptr_get_pages_worker(str
                                         obj->userptr.ptr + pinned * PAGE_SIZE,
                                         npages - pinned,
                                         flags,
-                                        pvec + pinned, NULL);
+                                        pvec + pinned, NULL, NULL);
                                if (ret < 0)
                                        break;
  
@@@ -784,7 -784,7 +784,7 @@@ i915_gem_userptr_ioctl(struct drm_devic
                return -ENODEV;
        }
  
 -      obj = i915_gem_object_alloc(dev);
 +      obj = i915_gem_object_alloc(dev_priv);
        if (obj == NULL)
                return -ENOMEM;
  
index 2070adac60de5dcf585768790ca7355950d4316f,5b72c50d6f768f56a5619c42fb20a7cbfd0db64d..16732e7bc08eed9709244ccfe07a421365e184a1
@@@ -340,7 -340,7 +340,7 @@@ static bool intel_dsi_compute_config(st
        /* DSI uses short packets for sync events, so clear mode flags for DSI */
        adjusted_mode->flags = 0;
  
 -      if (IS_BROXTON(dev_priv)) {
 +      if (IS_GEN9_LP(dev_priv)) {
                /* Dual link goes to DSI transcoder A. */
                if (intel_dsi->ports == BIT(PORT_C))
                        pipe_config->cpu_transcoder = TRANSCODER_DSI_C;
@@@ -379,8 -379,7 +379,8 @@@ static void bxt_dsi_device_ready(struc
                val &= ~ULPS_STATE_MASK;
                val |= (ULPS_STATE_ENTER | DEVICE_READY);
                I915_WRITE(MIPI_DEVICE_READY(port), val);
 -              usleep_range(2, 3);
 +              /* at least 2us - relaxed for hrtimer subsystem optimization */
 +              usleep_range(10, 50);
  
                /* 3. Exit ULPS */
                val = I915_READ(MIPI_DEVICE_READY(port));
@@@ -442,7 -441,7 +442,7 @@@ static void intel_dsi_device_ready(stru
  
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
                vlv_dsi_device_ready(encoder);
 -      else if (IS_BROXTON(dev_priv))
 +      else if (IS_GEN9_LP(dev_priv))
                bxt_dsi_device_ready(encoder);
  }
  
@@@ -465,7 -464,7 +465,7 @@@ static void intel_dsi_port_enable(struc
        }
  
        for_each_dsi_port(port, intel_dsi->ports) {
 -              i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
 +              i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
                        BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
                u32 temp;
  
                if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) {
                        temp |= (intel_dsi->dual_link - 1)
                                                << DUAL_LINK_MODE_SHIFT;
 -                      temp |= intel_crtc->pipe ?
 +                      if (IS_BROXTON(dev_priv))
 +                              temp |= LANE_CONFIGURATION_DUAL_LINK_A;
 +                      else
 +                              temp |= intel_crtc->pipe ?
                                        LANE_CONFIGURATION_DUAL_LINK_B :
                                        LANE_CONFIGURATION_DUAL_LINK_A;
                }
@@@ -498,7 -494,7 +498,7 @@@ static void intel_dsi_port_disable(stru
        enum port port;
  
        for_each_dsi_port(port, intel_dsi->ports) {
 -              i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
 +              i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
                        BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
                u32 temp;
  
@@@ -667,7 -663,7 +667,7 @@@ static void intel_dsi_clear_device_read
        DRM_DEBUG_KMS("\n");
        for_each_dsi_port(port, intel_dsi->ports) {
                /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
 -              i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
 +              i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
                        BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
                u32 val;
  
                I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
                usleep_range(2000, 2500);
        }
 -
 -      intel_disable_dsi_pll(encoder);
  }
  
  static void intel_dsi_post_disable(struct intel_encoder *encoder,
  
        intel_dsi_clear_device_ready(encoder);
  
 +      intel_disable_dsi_pll(encoder);
 +
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                u32 val;
  
@@@ -759,12 -755,12 +759,12 @@@ static bool intel_dsi_get_hw_state(stru
         * configuration, otherwise accessing DSI registers will hang the
         * machine. See BSpec North Display Engine registers/MIPI[BXT].
         */
 -      if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
 +      if (IS_GEN9_LP(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv))
                goto out_put_power;
  
        /* XXX: this only works for one DSI output */
        for_each_dsi_port(port, intel_dsi->ports) {
 -              i915_reg_t ctrl_reg = IS_BROXTON(dev_priv) ?
 +              i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
                        BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
                bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
  
                if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
                        continue;
  
 -              if (IS_BROXTON(dev_priv)) {
 +              if (IS_GEN9_LP(dev_priv)) {
                        u32 tmp = I915_READ(MIPI_CTRL(port));
                        tmp &= BXT_PIPE_SELECT_MASK;
                        tmp >>= BXT_PIPE_SELECT_SHIFT;
@@@ -977,7 -973,7 +977,7 @@@ static void intel_dsi_get_config(struc
        u32 pclk;
        DRM_DEBUG_KMS("\n");
  
 -      if (IS_BROXTON(dev_priv))
 +      if (IS_GEN9_LP(dev_priv))
                bxt_dsi_get_pipe_config(encoder, pipe_config);
  
        pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp,
@@@ -1069,7 -1065,7 +1069,7 @@@ static void set_dsi_timings(struct drm_
        hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
  
        for_each_dsi_port(port, intel_dsi->ports) {
 -              if (IS_BROXTON(dev_priv)) {
 +              if (IS_GEN9_LP(dev_priv)) {
                        /*
                         * Program hdisplay and vdisplay on MIPI transcoder.
                         * This is different from calculated hactive and
@@@ -1156,7 -1152,7 +1156,7 @@@ static void intel_dsi_prepare(struct in
                        tmp &= ~READ_REQUEST_PRIORITY_MASK;
                        I915_WRITE(MIPI_CTRL(port), tmp |
                                        READ_REQUEST_PRIORITY_HIGH);
 -              } else if (IS_BROXTON(dev_priv)) {
 +              } else if (IS_GEN9_LP(dev_priv)) {
                        enum pipe pipe = intel_crtc->pipe;
  
                        tmp = I915_READ(MIPI_CTRL(port));
        if (intel_dsi->clock_stop)
                tmp |= CLOCKSTOP;
  
 -      if (IS_BROXTON(dev_priv)) {
 +      if (IS_GEN9_LP(dev_priv)) {
                tmp |= BXT_DPHY_DEFEATURE_EN;
                if (!is_cmd_mode(intel_dsi))
                        tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
                I915_WRITE(MIPI_INIT_COUNT(port),
                                txclkesc(intel_dsi->escape_clk_div, 100));
  
 -              if (IS_BROXTON(dev_priv) && (!intel_dsi->dual_link)) {
 +              if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
                        /*
                         * BXT spec says write MIPI_INIT_COUNT for
                         * both the ports, even if only one is
        }
  }
  
- static enum drm_connector_status
- intel_dsi_detect(struct drm_connector *connector, bool force)
- {
-       return connector_status_connected;
- }
  static int intel_dsi_get_modes(struct drm_connector *connector)
  {
        struct intel_connector *intel_connector = to_intel_connector(connector);
@@@ -1411,7 -1401,6 +1405,6 @@@ static const struct drm_connector_helpe
  
  static const struct drm_connector_funcs intel_dsi_connector_funcs = {
        .dpms = drm_atomic_helper_connector_dpms,
-       .detect = intel_dsi_detect,
        .late_register = intel_connector_register,
        .early_unregister = intel_connector_unregister,
        .destroy = intel_dsi_connector_destroy,
@@@ -1435,15 -1424,15 +1428,15 @@@ static void intel_dsi_add_properties(st
        }
  }
  
 -void intel_dsi_init(struct drm_device *dev)
 +void intel_dsi_init(struct drm_i915_private *dev_priv)
  {
 +      struct drm_device *dev = &dev_priv->drm;
        struct intel_dsi *intel_dsi;
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
        struct intel_connector *intel_connector;
        struct drm_connector *connector;
        struct drm_display_mode *scan, *fixed_mode = NULL;
 -      struct drm_i915_private *dev_priv = to_i915(dev);
        enum port port;
        unsigned int i;
  
  
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                dev_priv->mipi_mmio_base = VLV_MIPI_BASE;
 -      } else if (IS_BROXTON(dev_priv)) {
 +      } else if (IS_GEN9_LP(dev_priv)) {
                dev_priv->mipi_mmio_base = BXT_MIPI_BASE;
        } else {
                DRM_ERROR("Unsupported Mipi device to reg base");
         * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
         * port C. BXT isn't limited like this.
         */
 -      if (IS_BROXTON(dev_priv))
 +      if (IS_GEN9_LP(dev_priv))
                intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C);
        else if (port == PORT_A)
                intel_encoder->crtc_mask = BIT(PIPE_A);
index 8fc5f29e79a8661fb0d5b0757d0f7d410172e1de,0bffd3f0c15d796627d5d89a5775e5da7a0bf572..abe08885a5ba4ef1726d67809544534cf35a57df
@@@ -62,7 -62,7 +62,7 @@@ fw_domain_arm_timer(struct intel_uncore
  {
        d->wake_count++;
        hrtimer_start_range_ns(&d->timer,
-                              ktime_set(0, NSEC_PER_MSEC),
+                              NSEC_PER_MSEC,
                               NSEC_PER_MSEC,
                               HRTIMER_MODE_REL);
  }
@@@ -421,7 -421,8 +421,7 @@@ static void __intel_uncore_early_saniti
                                   GT_FIFO_CTL_RC6_POLICY_STALL);
        }
  
 -      /* Enable Decoupled MMIO only on BXT C stepping onwards */
 -      if (!IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
 +      if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST))
                info->has_decoupled_mmio = false;
  
        intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
@@@ -625,14 -626,7 +625,14 @@@ find_fw_domain(struct drm_i915_private 
                        dev_priv->uncore.fw_domains_table_entries,
                        fw_range_cmp);
  
 -      return entry ? entry->domains : 0;
 +      if (!entry)
 +              return 0;
 +
 +      WARN(entry->domains & ~dev_priv->uncore.fw_domains,
 +           "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
 +           entry->domains & ~dev_priv->uncore.fw_domains, offset);
 +
 +      return entry->domains;
  }
  
  static void
@@@ -1819,7 -1813,7 +1819,7 @@@ static reset_func intel_get_gpu_reset(s
                return ironlake_do_reset;
        else if (IS_G4X(dev_priv))
                return g4x_do_reset;
 -      else if (IS_G33(dev_priv))
 +      else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
                return g33_do_reset;
        else if (INTEL_INFO(dev_priv)->gen >= 3)
                return i915_do_reset;
This page took 0.118962 seconds and 4 git commands to generate.