]> Git Repo - linux.git/commitdiff
Merge drm/drm-next into drm-intel-next
authorJani Nikula <[email protected]>
Wed, 25 Jan 2023 09:41:16 +0000 (11:41 +0200)
committerJani Nikula <[email protected]>
Wed, 25 Jan 2023 09:41:16 +0000 (11:41 +0200)
Backmerge to get the EDID handling changes.

Signed-off-by: Jani Nikula <[email protected]>
1  2 
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/intel_fbdev.c
drivers/gpu/drm/i915/display/skl_universal_plane.c
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/gt/intel_ggtt_gmch.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/selftests/i915_gem.c

index 05c17565cf31ff7c268752b8cc8987e229b35440,f47f00b162a484cc1deebd44812b2a7fc288015e..2184bc5b2be710d2d42e3c1e32da4fb7ff7d949a
@@@ -191,9 -191,9 +191,9 @@@ i915-y += 
          i915_vma_resource.o
  
  # general-purpose microcontroller (GuC) support
- i915-y += gt/uc/intel_uc.o \
-         gt/uc/intel_uc_debugfs.o \
-         gt/uc/intel_uc_fw.o \
+ i915-y += \
+         gt/uc/intel_gsc_fw.o \
+         gt/uc/intel_gsc_uc.o \
          gt/uc/intel_guc.o \
          gt/uc/intel_guc_ads.o \
          gt/uc/intel_guc_capture.o \
          gt/uc/intel_guc_submission.o \
          gt/uc/intel_huc.o \
          gt/uc/intel_huc_debugfs.o \
-         gt/uc/intel_huc_fw.o
+         gt/uc/intel_huc_fw.o \
+         gt/uc/intel_uc.o \
+         gt/uc/intel_uc_debugfs.o \
+         gt/uc/intel_uc_fw.o
  
  # graphics system controller (GSC) support
  i915-y += gt/intel_gsc.o
@@@ -263,7 -266,6 +266,7 @@@ i915-y += 
        display/intel_quirks.o \
        display/intel_sprite.o \
        display/intel_tc.o \
 +      display/intel_vblank.o \
        display/intel_vga.o \
        display/i9xx_plane.o \
        display/skl_scaler.o \
index f88ccc1bb3acc3edeaddab2c1dcaebec9ed50dc6,bbdb98d7c96e01ec45e004be48e3625fdb4459d5..19f3b5d92a5542c021ee4c472447da4b58801667
@@@ -170,7 -170,7 +170,7 @@@ static int intelfb_alloc(struct drm_fb_
                 * important and we should probably use that space with FBC or other
                 * features.
                 */
 -              if (size * 2 < dev_priv->stolen_usable_size)
 +              if (size * 2 < dev_priv->dsm.usable_size)
                        obj = i915_gem_object_create_stolen(dev_priv, size);
                if (IS_ERR(obj))
                        obj = i915_gem_object_create_shmem(dev_priv, size);
@@@ -267,26 -267,19 +267,19 @@@ static int intelfb_create(struct drm_fb
  
        info->fbops = &intelfb_ops;
  
-       /* setup aperture base/size for vesafb takeover */
        obj = intel_fb_obj(&intel_fb->base);
        if (i915_gem_object_is_lmem(obj)) {
                struct intel_memory_region *mem = obj->mm.region;
  
-               info->apertures->ranges[0].base = mem->io_start;
-               info->apertures->ranges[0].size = mem->io_size;
                /* Use fbdev's framebuffer from lmem for discrete */
                info->fix.smem_start =
                        (unsigned long)(mem->io_start +
                                        i915_gem_object_get_dma_address(obj, 0));
                info->fix.smem_len = obj->base.size;
        } else {
-               info->apertures->ranges[0].base = ggtt->gmadr.start;
-               info->apertures->ranges[0].size = ggtt->mappable_end;
                /* Our framebuffer is the entirety of fbdev's system memory */
                info->fix.smem_start =
-                       (unsigned long)(ggtt->gmadr.start + vma->node.start);
+                       (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
                info->fix.smem_len = vma->size;
        }
  
index 7d07fa3123ece747c046176f585a7091869fb15e,4b79c2d2d6177a47c23339632fffc72c9524e36d..9b172a1e90deb7d2bc8cda12ee66ba5bcbd5f5a6
@@@ -1627,7 -1627,7 +1627,7 @@@ static int skl_check_main_surface(struc
        u32 offset;
        int ret;
  
 -      if (w > max_width || w < min_width || h > max_height) {
 +      if (w > max_width || w < min_width || h > max_height || h < 1) {
                drm_dbg_kms(&dev_priv->drm,
                            "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
                            w, h, min_width, max_width, max_height);
@@@ -1848,7 -1848,7 +1848,7 @@@ static bool bo_has_valid_encryption(str
  {
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
  
-       return intel_pxp_key_check(&to_gt(i915)->pxp, obj, false) == 0;
+       return intel_pxp_key_check(i915->pxp, obj, false) == 0;
  }
  
  static bool pxp_is_borked(struct drm_i915_gem_object *obj)
index 2e75e3c10403e9de0e0226e2ba8156221e84c3e9,0c7fe360f873313c4ca1b2aec41d64c15447f2ad..fe64c13fd3b4aa2980daad8908d5d97423143292
@@@ -8,6 -8,7 +8,7 @@@
  #include <linux/types.h>
  #include <linux/stop_machine.h>
  
+ #include <drm/drm_managed.h>
  #include <drm/i915_drm.h>
  #include <drm/intel-gtt.h>
  
  #include "intel_gtt.h"
  #include "gen8_ppgtt.h"
  
- static inline bool suspend_retains_ptes(struct i915_address_space *vm)
- {
-       return GRAPHICS_VER(vm->i915) >= 8 &&
-               !HAS_LMEM(vm->i915) &&
-               vm->is_ggtt;
- }
  static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
                                   unsigned long color,
                                   u64 *start,
@@@ -104,23 -98,6 +98,6 @@@ int i915_ggtt_init_hw(struct drm_i915_p
        return 0;
  }
  
- /*
-  * Return the value of the last GGTT pte cast to an u64, if
-  * the system is supposed to retain ptes across resume. 0 otherwise.
-  */
- static u64 read_last_pte(struct i915_address_space *vm)
- {
-       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-       gen8_pte_t __iomem *ptep;
-       if (!suspend_retains_ptes(vm))
-               return 0;
-       GEM_BUG_ON(GRAPHICS_VER(vm->i915) < 8);
-       ptep = (typeof(ptep))ggtt->gsm + (ggtt_total_entries(ggtt) - 1);
-       return readq(ptep);
- }
  /**
   * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
   * @vm: The VM to suspend the mappings for
@@@ -184,10 -161,7 +161,7 @@@ retry
                i915_gem_object_unlock(obj);
        }
  
-       if (!suspend_retains_ptes(vm))
-               vm->clear_range(vm, 0, vm->total);
-       else
-               i915_vm_to_ggtt(vm)->probed_pte = read_last_pte(vm);
+       vm->clear_range(vm, 0, vm->total);
  
        vm->skip_pte_rewrite = save_skip_rewrite;
  
  
  void i915_ggtt_suspend(struct i915_ggtt *ggtt)
  {
+       struct intel_gt *gt;
        i915_ggtt_suspend_vm(&ggtt->vm);
        ggtt->invalidate(ggtt);
  
-       intel_gt_check_and_clear_faults(ggtt->vm.gt);
+       list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+               intel_gt_check_and_clear_faults(gt);
  }
  
  void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
@@@ -225,16 -202,21 +202,21 @@@ static void gen8_ggtt_invalidate(struc
  
  static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
  {
-       struct intel_uncore *uncore = ggtt->vm.gt->uncore;
        struct drm_i915_private *i915 = ggtt->vm.i915;
  
        gen8_ggtt_invalidate(ggtt);
  
-       if (GRAPHICS_VER(i915) >= 12)
-               intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
-                                     GEN12_GUC_TLB_INV_CR_INVALIDATE);
-       else
-               intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+       if (GRAPHICS_VER(i915) >= 12) {
+               struct intel_gt *gt;
+               list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+                       intel_uncore_write_fw(gt->uncore,
+                                             GEN12_GUC_TLB_INV_CR,
+                                             GEN12_GUC_TLB_INV_CR_INVALIDATE);
+       } else {
+               intel_uncore_write_fw(ggtt->vm.gt->uncore,
+                                     GEN8_GTCR, GEN8_GTCR_INVALIDATE);
+       }
  }
  
  u64 gen8_ggtt_pte_encode(dma_addr_t addr,
@@@ -287,8 -269,11 +269,11 @@@ static void gen8_ggtt_insert_entries(st
         */
  
        gte = (gen8_pte_t __iomem *)ggtt->gsm;
-       gte += vma_res->start / I915_GTT_PAGE_SIZE;
-       end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+       gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
+       end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
+       while (gte < end)
+               gen8_set_pte(gte++, vm->scratch[0]->encode);
+       end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
  
        for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
                gen8_set_pte(gte++, pte_encode | addr);
@@@ -338,9 -323,12 +323,12 @@@ static void gen6_ggtt_insert_entries(st
        dma_addr_t addr;
  
        gte = (gen6_pte_t __iomem *)ggtt->gsm;
-       gte += vma_res->start / I915_GTT_PAGE_SIZE;
-       end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
+       gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
  
+       end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
+       while (gte < end)
+               iowrite32(vm->scratch[0]->encode, gte++);
+       end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
        for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
                iowrite32(vm->pte_encode(addr, level, flags), gte++);
        GEM_BUG_ON(gte > end);
@@@ -361,27 -349,6 +349,6 @@@ static void nop_clear_range(struct i915
  {
  }
  
- static void gen8_ggtt_clear_range(struct i915_address_space *vm,
-                                 u64 start, u64 length)
- {
-       struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
-       unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
-       unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
-       const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
-       gen8_pte_t __iomem *gtt_base =
-               (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
-       const int max_entries = ggtt_total_entries(ggtt) - first_entry;
-       int i;
-       if (WARN(num_entries > max_entries,
-                "First entry = %d; Num entries = %d (max=%d)\n",
-                first_entry, num_entries, max_entries))
-               num_entries = max_entries;
-       for (i = 0; i < num_entries; i++)
-               gen8_set_pte(&gtt_base[i], scratch_pte);
- }
  static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
  {
        /*
@@@ -551,8 -518,6 +518,6 @@@ static int init_ggtt(struct i915_ggtt *
        struct drm_mm_node *entry;
        int ret;
  
-       ggtt->pte_lost = true;
        /*
         * GuC requires all resources that we're sharing with it to be placed in
         * non-WOPCM memory. If GuC is not present or not in use we still need a
@@@ -920,8 -885,8 +885,8 @@@ static void gen6_gmch_remove(struct i91
  
  static struct resource pci_resource(struct pci_dev *pdev, int bar)
  {
 -      return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
 -                                             pci_resource_len(pdev, bar));
 +      return DEFINE_RES_MEM(pci_resource_start(pdev, bar),
 +                            pci_resource_len(pdev, bar));
  }
  
  static int gen8_gmch_probe(struct i915_ggtt *ggtt)
        ggtt->vm.cleanup = gen6_gmch_remove;
        ggtt->vm.insert_page = gen8_ggtt_insert_page;
        ggtt->vm.clear_range = nop_clear_range;
-       if (intel_scanout_needs_vtd_wa(i915))
-               ggtt->vm.clear_range = gen8_ggtt_clear_range;
  
        ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
  
                        I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
        }
  
-       ggtt->invalidate = gen8_ggtt_invalidate;
+       if (intel_uc_wants_guc(&ggtt->vm.gt->uc))
+               ggtt->invalidate = guc_ggtt_invalidate;
+       else
+               ggtt->invalidate = gen8_ggtt_invalidate;
  
        ggtt->vm.vma_ops.bind_vma    = intel_ggtt_bind_vma;
        ggtt->vm.vma_ops.unbind_vma  = intel_ggtt_unbind_vma;
  
        ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
  
-       setup_private_pat(ggtt->vm.gt);
        return ggtt_probe_common(ggtt, size);
  }
  
@@@ -1115,7 -1079,7 +1079,7 @@@ static int gen6_gmch_probe(struct i915_
        ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
  
        ggtt->vm.clear_range = nop_clear_range;
-       if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
+       if (!HAS_FULL_PPGTT(i915))
                ggtt->vm.clear_range = gen6_ggtt_clear_range;
        ggtt->vm.insert_page = gen6_ggtt_insert_page;
        ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
@@@ -1196,7 -1160,14 +1160,14 @@@ static int ggtt_probe_hw(struct i915_gg
   */
  int i915_ggtt_probe_hw(struct drm_i915_private *i915)
  {
-       int ret;
+       struct intel_gt *gt;
+       int ret, i;
+       for_each_gt(gt, i915, i) {
+               ret = intel_gt_assign_ggtt(gt);
+               if (ret)
+                       return ret;
+       }
  
        ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
        if (ret)
        return 0;
  }
  
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
struct i915_ggtt *i915_ggtt_create(struct drm_i915_private *i915)
  {
-       if (GRAPHICS_VER(i915) < 6)
-               return intel_ggtt_gmch_enable_hw(i915);
+       struct i915_ggtt *ggtt;
  
-       return 0;
- }
+       ggtt = drmm_kzalloc(&i915->drm, sizeof(*ggtt), GFP_KERNEL);
+       if (!ggtt)
+               return ERR_PTR(-ENOMEM);
  
- void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
- {
-       GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
+       INIT_LIST_HEAD(&ggtt->gt_list);
  
-       ggtt->invalidate = guc_ggtt_invalidate;
-       ggtt->invalidate(ggtt);
+       return ggtt;
  }
  
void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
int i915_ggtt_enable_hw(struct drm_i915_private *i915)
  {
-       /* XXX Temporary pardon for error unload */
-       if (ggtt->invalidate == gen8_ggtt_invalidate)
-               return;
-       /* We should only be called after i915_ggtt_enable_guc() */
-       GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
-       ggtt->invalidate = gen8_ggtt_invalidate;
+       if (GRAPHICS_VER(i915) < 6)
+               return intel_ggtt_gmch_enable_hw(i915);
  
-       ggtt->invalidate(ggtt);
+       return 0;
  }
  
  /**
@@@ -1253,20 -1214,11 +1214,11 @@@ bool i915_ggtt_resume_vm(struct i915_ad
  {
        struct i915_vma *vma;
        bool write_domain_objs = false;
-       bool retained_ptes;
  
        drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
  
-       /*
-        * First fill our portion of the GTT with scratch pages if
-        * they were not retained across suspend.
-        */
-       retained_ptes = suspend_retains_ptes(vm) &&
-               !i915_vm_to_ggtt(vm)->pte_lost &&
-               !GEM_WARN_ON(i915_vm_to_ggtt(vm)->probed_pte != read_last_pte(vm));
-       if (!retained_ptes)
-               vm->clear_range(vm, 0, vm->total);
+       /* First fill our portion of the GTT with scratch pages */
+       vm->clear_range(vm, 0, vm->total);
  
        /* clflush objects bound into the GGTT and rebind them. */
        list_for_each_entry(vma, &vm->bound_list, vm_link) {
                        atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
  
                GEM_BUG_ON(!was_bound);
-               if (!retained_ptes) {
-                       /*
-                        * Clear the bound flags of the vma resource to allow
-                        * ptes to be repopulated.
-                        */
-                       vma->resource->bound_flags = 0;
-                       vma->ops->bind_vma(vm, NULL, vma->resource,
-                                          obj ? obj->cache_level : 0,
-                                          was_bound);
-               }
+               /*
+                * Clear the bound flags of the vma resource to allow
+                * ptes to be repopulated.
+                */
+               vma->resource->bound_flags = 0;
+               vma->ops->bind_vma(vm, NULL, vma->resource,
+                                  obj ? obj->cache_level : 0,
+                                  was_bound);
                if (obj) { /* only used during resume => exclusive access */
                        write_domain_objs |= fetch_and_zero(&obj->write_domain);
                        obj->read_domains |= I915_GEM_DOMAIN_GTT;
  
  void i915_ggtt_resume(struct i915_ggtt *ggtt)
  {
+       struct intel_gt *gt;
        bool flush;
  
-       intel_gt_check_and_clear_faults(ggtt->vm.gt);
+       list_for_each_entry(gt, &ggtt->gt_list, ggtt_link)
+               intel_gt_check_and_clear_faults(gt);
  
        flush = i915_ggtt_resume_vm(&ggtt->vm);
  
        if (flush)
                wbinvd_on_all_cpus();
  
-       if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
-               setup_private_pat(ggtt->vm.gt);
        intel_ggtt_restore_fences(ggtt);
  }
- void i915_ggtt_mark_pte_lost(struct drm_i915_private *i915, bool val)
- {
-       to_gt(i915)->ggtt->pte_lost = val;
- }
index 4192d06df0f25da19e370a288c31d704d6f7de12,0e36301036936f7f62da5f81c289a4725235a431..1c492eaee7d984960527acce6421186d6b3d2d5f
@@@ -6,7 -6,6 +6,6 @@@
  #include "intel_ggtt_gmch.h"
  
  #include <drm/intel-gtt.h>
- #include <drm/i915_drm.h>
  
  #include <linux/agp_backend.h>
  
@@@ -89,7 -88,8 +88,7 @@@ int intel_ggtt_gmch_probe(struct i915_g
  
        intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
  
 -      ggtt->gmadr =
 -              (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
 +      ggtt->gmadr = DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
  
        ggtt->vm.alloc_pt_dma = alloc_pt_dma;
        ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
index 518c8fb8fd12c721d64b7a44cc358b6f6f0fa549,a356ca490159e3e8250330c48ecf73464c6559bf..45773ce1deac223d481dda41cad1ab22b5c5bad3
@@@ -183,7 -183,7 +183,7 @@@ i915_debugfs_describe_obj(struct seq_fi
  
                seq_printf(m, " (%s offset: %08llx, size: %08llx, pages: %s",
                           stringify_vma_type(vma),
-                          vma->node.start, vma->node.size,
+                          i915_vma_offset(vma), i915_vma_size(vma),
                           stringify_page_sizes(vma->resource->page_sizes_gtt,
                                                NULL, 0));
                if (i915_vma_is_ggtt(vma) || i915_vma_is_dpt(vma)) {
@@@ -648,14 -648,13 +648,14 @@@ i915_drop_caches_get(void *data, u64 *v
  
        return 0;
  }
 +
  static int
  gt_drop_caches(struct intel_gt *gt, u64 val)
  {
        int ret;
  
        if (val & DROP_RESET_ACTIVE &&
 -          wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT))
 +          wait_for(intel_engines_are_idle(gt), 200))
                intel_gt_set_wedged(gt);
  
        if (val & DROP_RETIRE)
@@@ -763,6 -762,7 +763,6 @@@ static const struct drm_info_list i915_
        {"i915_sseu_status", i915_sseu_status, 0},
        {"i915_rps_boost_info", i915_rps_boost_info, 0},
  };
 -#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
  
  static const struct i915_debugfs_files {
        const char *name;
@@@ -795,6 -795,6 +795,6 @@@ void i915_debugfs_register(struct drm_i
        }
  
        drm_debugfs_create_files(i915_debugfs_list,
 -                               I915_DEBUGFS_ENTRIES,
 +                               ARRAY_SIZE(i915_debugfs_list),
                                 minor->debugfs_root, minor);
  }
index ac4c3c6f55413eba1ca9941e4b90a4909130be95,48fd82722f1235cfab423de618189ec38196beec..2a6e212f882445a12b0ea1ed0f82195230ed430a
  #include "intel_uncore.h"
  
  struct drm_i915_clock_gating_funcs;
 -struct drm_i915_gem_object;
 -struct drm_i915_private;
 -struct intel_connector;
 -struct intel_dp;
 -struct intel_encoder;
 -struct intel_limit;
 -struct intel_overlay_error_state;
  struct vlv_s0ix_state;
+ struct intel_pxp;
  
 -#define I915_GEM_GPU_DOMAINS \
 -      (I915_GEM_DOMAIN_RENDER | \
 -       I915_GEM_DOMAIN_SAMPLER | \
 -       I915_GEM_DOMAIN_COMMAND | \
 -       I915_GEM_DOMAIN_INSTRUCTION | \
 -       I915_GEM_DOMAIN_VERTEX)
 +#define GEM_QUIRK_PIN_SWIZZLED_PAGES  BIT(0)
  
 -#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
 +/* Data Stolen Memory (DSM) aka "i915 stolen memory" */
 +struct i915_dsm {
 +      /*
 +       * The start and end of DSM which we can optionally use to create GEM
 +       * objects backed by stolen memory.
 +       *
 +       * Note that usable_size tells us exactly how much of this we are
 +       * actually allowed to use, given that some portion of it is in fact
 +       * reserved for use by hardware functions.
 +       */
 +      struct resource stolen;
  
 -#define GEM_QUIRK_PIN_SWIZZLED_PAGES  BIT(0)
 +      /*
 +       * Reserved portion of DSM.
 +       */
 +      struct resource reserved;
 +
 +      /*
 +       * Total size minus reserved ranges.
 +       *
 +       * DSM is segmented in hardware with different portions offlimits to
 +       * certain functions.
 +       *
 +       * The drm_mm is initialised to the total accessible range, as found
 +       * from the PCI config. On Broadwell+, this is further restricted to
 +       * avoid the first page! The upper end of DSM is reserved for hardware
 +       * functions and similarly removed from the accessible range.
 +       */
 +      resource_size_t usable_size;
 +};
  
  struct i915_suspend_saved_registers {
        u32 saveDSPARB;
@@@ -177,6 -163,19 +178,6 @@@ struct i915_gem_mm 
        u32 shrink_count;
  };
  
 -#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
 -
 -unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
 -                                       u64 context);
 -
 -static inline unsigned long
 -i915_fence_timeout(const struct drm_i915_private *i915)
 -{
 -      return i915_fence_context_timeout(i915, U64_MAX);
 -}
 -
 -#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
 -
  struct i915_virtual_gpu {
        struct mutex lock; /* serialises sending of g2v_notify command pkts */
        bool active;
@@@ -206,7 -205,29 +207,7 @@@ struct drm_i915_private 
        struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
        struct intel_driver_caps caps;
  
 -      /**
 -       * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
 -       * end of stolen which we can optionally use to create GEM objects
 -       * backed by stolen memory. Note that stolen_usable_size tells us
 -       * exactly how much of this we are actually allowed to use, given that
 -       * some portion of it is in fact reserved for use by hardware functions.
 -       */
 -      struct resource dsm;
 -      /**
 -       * Reseved portion of Data Stolen Memory
 -       */
 -      struct resource dsm_reserved;
 -
 -      /*
 -       * Stolen memory is segmented in hardware with different portions
 -       * offlimits to certain functions.
 -       *
 -       * The drm_mm is initialised to the total accessible range, as found
 -       * from the PCI config. On Broadwell+, this is further restricted to
 -       * avoid the first page! The upper end of stolen memory is reserved for
 -       * hardware functions and similarly removed from the accessible range.
 -       */
 -      resource_size_t stolen_usable_size;     /* Total size minus reserved ranges */
 +      struct i915_dsm dsm;
  
        struct intel_uncore uncore;
        struct intel_uncore_mmio_debug mmio_debug;
  
        struct i915_gpu_error gpu_error;
  
 -      /*
 -       * Shadows for CHV DPLL_MD regs to keep the state
 -       * checker somewhat working in the presence hardware
 -       * crappiness (can't read out DPLL_MD for pipes B & C).
 -       */
 -      u32 chv_dpll_md[I915_MAX_PIPES];
 -      u32 bxt_phy_grc;
 -
        u32 suspend_count;
        struct i915_suspend_saved_registers regfile;
        struct vlv_s0ix_state *vlv_s0ix_state;
                struct file *mmap_singleton;
        } gem;
  
 -      u8 pch_ssc_use;
 -
+       struct intel_pxp *pxp;
        /* For i915gm/i945gm vblank irq workaround */
        u8 vblank_enabled;
  
        bool irq_enabled;
  
 -      /*
 -       * DG2: Mask of PHYs that were not calibrated by the firmware
 -       * and should not be used.
 -       */
 -      u8 snps_phy_failed_calibration;
 -
        struct i915_pmu pmu;
  
        struct i915_drm_clients clients;
@@@ -430,6 -469,9 +433,6 @@@ static inline struct intel_gt *to_gt(st
  
  #define INTEL_REVID(dev_priv) (to_pci_dev((dev_priv)->drm.dev)->revision)
  
 -#define HAS_DSB(dev_priv)     (INTEL_INFO(dev_priv)->display.has_dsb)
 -#define HAS_DSC(__i915)               (RUNTIME_INFO(__i915)->has_dsc)
 -
  #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
  #define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step)
  #define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step)
@@@ -841,9 -883,6 +844,9 @@@ IS_SUBPLATFORM(const struct drm_i915_pr
  #define HAS_RPS(dev_priv)     (INTEL_INFO(dev_priv)->has_rps)
  
  #define HAS_DMC(dev_priv)     (RUNTIME_INFO(dev_priv)->has_dmc)
 +#define HAS_DSB(dev_priv)     (INTEL_INFO(dev_priv)->display.has_dsb)
 +#define HAS_DSC(__i915)               (RUNTIME_INFO(__i915)->has_dsc)
 +#define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
  
  #define HAS_HECI_PXP(dev_priv) \
        (INTEL_INFO(dev_priv)->has_heci_pxp)
  
  #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv)   (INTEL_INFO(dev_priv)->has_global_mocs)
  
- #define HAS_PXP(dev_priv)  ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
-                           INTEL_INFO(dev_priv)->has_pxp) && \
-                           VDBOX_MASK(to_gt(dev_priv)))
  #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
  
  #define HAS_GMD_ID(i915)      (INTEL_INFO(i915)->has_gmd_id)
  #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
                                 2 : HAS_L3_DPF(dev_priv))
  
 -#define GT_FREQUENCY_MULTIPLIER 50
 -#define GEN9_FREQ_SCALER 3
 -
  #define INTEL_NUM_PIPES(dev_priv) (hweight8(RUNTIME_INFO(dev_priv)->pipe_mask))
  
  #define HAS_DISPLAY(dev_priv) (RUNTIME_INFO(dev_priv)->pipe_mask != 0)
index 4c299800cfeddb24d8101a14c5497dbc7781ff95,243419783052333207bffa1b38be18e8e90a3b34..3d77679bf211bf82430a9aff1f90f5908403bf40
@@@ -18,8 -18,6 +18,8 @@@ struct drm_i915_gem_object
  struct i915_address_space;
  struct i915_gem_ww_ctx;
  
 +#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
 +
  int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
                                            struct sg_table *pages);
  void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
@@@ -46,7 -44,8 +46,8 @@@ int i915_gem_gtt_insert(struct i915_add
  #define PIN_HIGH              BIT_ULL(5)
  #define PIN_OFFSET_BIAS               BIT_ULL(6)
  #define PIN_OFFSET_FIXED      BIT_ULL(7)
- #define PIN_VALIDATE          BIT_ULL(8) /* validate placement only, no need to call unpin() */
+ #define PIN_OFFSET_GUARD      BIT_ULL(8)
+ #define PIN_VALIDATE          BIT_ULL(9) /* validate placement only, no need to call unpin() */
  
  #define PIN_GLOBAL            BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */
  #define PIN_USER              BIT_ULL(11) /* I915_VMA_LOCAL_BIND */
index e89236cf96dccda00fdb366378d50e82a359640b,8b2cf980f323d201154c8189d3d55211924eed08..bad36a67d873e85f0bc8da5c0bde63d4bb7a7a7e
  
  #define GU_CNTL                               _MMIO(0x101010)
  #define   LMEM_INIT                   REG_BIT(7)
+ #define   DRIVERFLR                   REG_BIT(31)
+ #define GU_DEBUG                      _MMIO(0x101018)
+ #define   DRIVERFLR_STATUS            REG_BIT(31)
  
  #define GEN6_STOLEN_RESERVED          _MMIO(0x1082C0)
  #define GEN6_STOLEN_RESERVED_ADDR_MASK        (0xFFF << 20)
  #define  RESET_PCH_HANDSHAKE_ENABLE   REG_BIT(4)
  
  #define GEN8_CHICKEN_DCPR_1                   _MMIO(0x46430)
 +#define   LATENCY_REPORTING_REMOVED_PIPE_D    REG_BIT(31)
  #define   SKL_SELECT_ALTERNATE_DC_EXIT                REG_BIT(30)
  #define   LATENCY_REPORTING_REMOVED_PIPE_C    REG_BIT(25)
  #define   LATENCY_REPORTING_REMOVED_PIPE_B    REG_BIT(24)
@@@ -8103,7 -8105,7 +8106,7 @@@ enum skl_power_gate 
  #define DSB_TAIL(pipe, id)            _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4)
  #define DSB_CTRL(pipe, id)            _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8)
  #define   DSB_ENABLE                  (1 << 31)
 -#define   DSB_STATUS                  (1 << 0)
 +#define   DSB_STATUS_BUSY             (1 << 0)
  
  #define CLKREQ_POLICY                 _MMIO(0x101038)
  #define  CLKREQ_POLICY_MEM_UP_OVRD    REG_BIT(1)
index 0917315a67de609f24f4e8a31bf67fc5c8dae8b6,2535b9684bd19343096edfa548b34775b06b32c0..d91d0ade8abddd100c5b4c7ef2be0a6e2d639dad
@@@ -44,7 -44,7 +44,7 @@@ static void trash_stolen(struct drm_i91
  {
        struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
        const u64 slot = ggtt->error_capture.start;
 -      const resource_size_t size = resource_size(&i915->dsm);
 +      const resource_size_t size = resource_size(&i915->dsm.stolen);
        unsigned long page;
        u32 prng = 0x12345678;
  
@@@ -53,7 -53,7 +53,7 @@@
                return;
  
        for (page = 0; page < size; page += PAGE_SIZE) {
 -              const dma_addr_t dma = i915->dsm.start + page;
 +              const dma_addr_t dma = i915->dsm.stolen.start + page;
                u32 __iomem *s;
                int x;
  
@@@ -127,6 -127,8 +127,8 @@@ static void igt_pm_resume(struct drm_i9
         */
        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
                i915_ggtt_resume(to_gt(i915)->ggtt);
+               if (GRAPHICS_VER(i915) >= 8)
+                       setup_private_pat(to_gt(i915));
                i915_gem_resume(i915);
        }
  }
This page took 0.138513 seconds and 4 git commands to generate.