]> Git Repo - linux.git/commitdiff
Merge tag 'drm-intel-gt-next-2020-11-12-1' of git://anongit.freedesktop.org/drm/drm...
authorDave Airlie <[email protected]>
Fri, 13 Nov 2020 05:01:13 +0000 (15:01 +1000)
committerDave Airlie <[email protected]>
Fri, 13 Nov 2020 05:01:57 +0000 (15:01 +1000)
Cross-subsystem Changes:
- DMA mapped scatterlist fixes in i915 to unblock merging of
  https://lkml.org/lkml/2020/9/27/70 (Tvrtko, Tom)

Driver Changes:

- Fix for user reported issue #2381 (Graphical output stops with "switching to inteldrmfb from simple"):
  Mark ininitial fb obj as WT on eLLC machines to avoid rcu lockup during fbdev init (Ville, Chris)
- Fix for Tigerlake (and earlier) to avoid spurious empty CSB events leading to hang (Chris, Bruce)
- Delay execlist processing for Tigerlake to avoid hang (Chris)
- Fix for Tigerlake RCS engine health check through heartbeat (Chris)
- Fix for Tigerlake reserved MOCS entries (Ayaz, Chris)
- Fix Media power gate sequence on Tigerlake (Rodrigo)
- Enable eLLC caching of display buffers for SKL+ (Ville)
- Support parsing of oversize batches on Gen9 (Matt, Chris)
- Exclude low pages (128KiB) of stolen from use to avoid thrashing during reset (Chris)
- Flush engines before Tigerlake breadcrumbs (Chris)

- Use the local HWSP offset during submission (Chris)
- Flush coherency domains on first set-domain-ioctl (Chris, Zbigniew)
- Use the active reference on the vma while capturing to avoid use-after-free (Chris)
- Fix MOCS PTE setting for gen9+ (Ville)
- Avoid NULL dereference on IPS driver callback while unbinding i915 (Chris)
- Avoid NULL dereference from PT/PD stash allocation error (Matt)
- Hold request reference for canceling an active context (Chris)
- Avoid infinite loop on x86-32 when mapping a lot of objects (Chris)
- Disallow WC mappings when processor doesn't support them (Chris)
- Return correct error in i915_gem_object_copy_blt() error path (Dan)
- Return correct error in intel_context_create_request() error path (Maarten)
- Tune down GuC communication enabled/disabled messages to debug (Jani)
- Fix rebased commit "Remove i915_request.lock requirement for execution callbacks" (Chris)
- Cancel outstanding work after disabling heartbeats on an engine (Chris)
- Signal cancelled requests (Chris)
- Retire cancelled requests on unload (Chris)
- Scrub HW state on driver remove (Chris)
- Undo forced context restores after trivial preemptions (Chris)
- Handle PCI unbind in PMU code (Tvrtko)
- Fix CPU hotplug with multiple GPUs in PMU code (Trtkko)
- Correctly set SFC capability for video engines (Venkata)

- Update GuC code to use firmware v49.0.1 (John, Matthew B., Daniele, Oscar, Michel, Rodrigo, Michal)
- Improve GuC warnings on loading failure (John)
- Avoid ownership race in buffer pool by clearing age (Chris)
- Use MMIO to read CSB in case of failure (Chris, Mika)
- Show engine properties in engine state dump to indicate changes (Chris, Joonas)
- Break up error capture compression loops with cond_resched() (Chris)
- Reduce GPU error capture mutex hold time to avoid khungtaskd (Chris)
- Serialise debugfs i915_gem_objects with ctx->mutex (Chris)
- Always test execution status on closing the context and close if not persistent (Chris)
- Avoid mixing integer types during batch copies (Chris, Jared)
- Skip over MI_NOOP when parsing to avoid overhead (Chris)
- Hold onto an explicit ref to i915_vma_work.pinned (Chris)
- Perform all asynchronous waits prior to marking payload start (Chris)
- Pull phys pread/pwrite implementations to the backend (Matt)

- Improve record of hung engines in error state (Tvrtko)
- Allow backends to override pread implementation (Matt)
- Reinforce LRC poisoning checks to confirm context survives execution (Chris)
- Fix memory region max size calculation (Matt)
- Fix order when adding blocks to memory region (Matt)
- Eliminate unused intel_virtual_engine_get_sibling func (Chris)
- Cleanup kasan warning for on-stack (unsigned long) casting (Chris)
- Onion unwind for scratch page allocation failure (Chris)
- Poison stolen pages before use (Chris)
- Selftest improvements (Chris)

Signed-off-by: Dave Airlie <[email protected]>
From: Joonas Lahtinen <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
20 files changed:
1  2 
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/gt/intel_mocs.c
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/selftest_timeline.c
drivers/gpu/drm/i915/gt/uc/intel_guc.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_scatterlist.h
drivers/gpu/drm/i915/intel_pm.c

index 3389ac972d16356b5843a4be81f79b95c0a7f322,ffeaf1b9b1bb4c3abab30aaac133a551242edebd..00d24000b5e8cddfceee8633f21840df0d167741
@@@ -39,18 -39,9 +39,18 @@@ static struct i915_global_object 
        struct kmem_cache *slab_objects;
  } global;
  
 +static const struct drm_gem_object_funcs i915_gem_object_funcs;
 +
  struct drm_i915_gem_object *i915_gem_object_alloc(void)
  {
 -      return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
 +      struct drm_i915_gem_object *obj;
 +
 +      obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
 +      if (!obj)
 +              return NULL;
 +      obj->base.funcs = &i915_gem_object_funcs;
 +
 +      return obj;
  }
  
  void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@@ -82,6 -73,8 +82,8 @@@ void i915_gem_object_init(struct drm_i9
        obj->mm.madv = I915_MADV_WILLNEED;
        INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
        mutex_init(&obj->mm.get_page.lock);
+       INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
+       mutex_init(&obj->mm.get_dma_page.lock);
  
        if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
                i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
@@@ -110,7 -103,7 +112,7 @@@ void i915_gem_object_set_cache_coherenc
                !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
  }
  
 -void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 +static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
  {
        struct drm_i915_gem_object *obj = to_intel_bo(gem);
        struct drm_i915_file_private *fpriv = file->driver_priv;
@@@ -273,7 -266,7 +275,7 @@@ static void __i915_gem_free_work(struc
        i915_gem_flush_free_objects(i915);
  }
  
 -void i915_gem_free_object(struct drm_gem_object *gem_obj)
 +static void i915_gem_free_object(struct drm_gem_object *gem_obj)
  {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@@ -412,12 -405,6 +414,12 @@@ int __init i915_global_objects_init(voi
        return 0;
  }
  
 +static const struct drm_gem_object_funcs i915_gem_object_funcs = {
 +      .free = i915_gem_free_object,
 +      .close = i915_gem_close_object,
 +      .export = i915_gem_prime_export,
 +};
 +
  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  #include "selftests/huge_gem_object.c"
  #include "selftests/huge_pages.c"
index eaf3d4147be07d3538eddb8f465f9abe46882375,3cad6a07d0a64f1319502e015c1622d28015c180..be14486f63a7aa1e157625a7d34a3573112abbd7
@@@ -38,6 -38,9 +38,6 @@@ void __i915_gem_object_release_shmem(st
  
  int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
  
 -void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 -void i915_gem_free_object(struct drm_gem_object *obj);
 -
  void i915_gem_flush_free_objects(struct drm_i915_private *i915);
  
  struct sg_table *
@@@ -272,17 -275,31 +272,35 @@@ int i915_gem_object_set_tiling(struct d
                               unsigned int tiling, unsigned int stride);
  
  struct scatterlist *
+ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+                        struct i915_gem_object_page_iter *iter,
+                        unsigned int n,
+                        unsigned int *offset);
+ static inline struct scatterlist *
  i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
-                      unsigned int n, unsigned int *offset);
+                      unsigned int n,
+                      unsigned int *offset)
+ {
+       return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset);
+ }
+ static inline struct scatterlist *
+ i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
+                          unsigned int n,
+                          unsigned int *offset)
+ {
+       return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset);
+ }
  
  struct page *
  i915_gem_object_get_page(struct drm_i915_gem_object *obj,
                         unsigned int n);
  
 +struct page *
 +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
 +                             unsigned int n);
 +
  dma_addr_t
  i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
                                    unsigned long n,
index f60ca6dc911f29d7d8881b566bc51f11e4554bb4,138020b5edf7563ac13e160f3656f6e1083cb9cb..e2c7b2a7895fff25d7c4d13a24112ce95939c3a3
@@@ -33,6 -33,8 +33,8 @@@ void __i915_gem_object_set_pages(struc
  
        obj->mm.get_page.sg_pos = pages->sgl;
        obj->mm.get_page.sg_idx = 0;
+       obj->mm.get_dma_page.sg_pos = pages->sgl;
+       obj->mm.get_dma_page.sg_idx = 0;
  
        obj->mm.pages = pages;
  
@@@ -155,6 -157,8 +157,8 @@@ static void __i915_gem_object_reset_pag
        rcu_read_lock();
        radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
                radix_tree_delete(&obj->mm.get_page.radix, iter.index);
+       radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
+               radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
        rcu_read_unlock();
  }
  
@@@ -162,6 -166,8 +166,6 @@@ static void unmap_object(struct drm_i91
  {
        if (is_vmalloc_addr(ptr))
                vunmap(ptr);
 -      else
 -              kunmap(kmap_to_page(ptr));
  }
  
  struct sg_table *
@@@ -232,21 -238,34 +236,21 @@@ unlock
        return err;
  }
  
 -static inline pte_t iomap_pte(resource_size_t base,
 -                            dma_addr_t offset,
 -                            pgprot_t prot)
 -{
 -      return pte_mkspecial(pfn_pte((base + offset) >> PAGE_SHIFT, prot));
 -}
 -
  /* The 'mapping' part of i915_gem_object_pin_map() below */
 -static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
 -                               enum i915_map_type type)
 +static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj,
 +              enum i915_map_type type)
  {
 -      unsigned long n_pte = obj->base.size >> PAGE_SHIFT;
 -      struct sg_table *sgt = obj->mm.pages;
 -      pte_t *stack[32], **mem;
 -      struct vm_struct *area;
 +      unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i;
 +      struct page *stack[32], **pages = stack, *page;
 +      struct sgt_iter iter;
        pgprot_t pgprot;
 +      void *vaddr;
  
 -      if (!i915_gem_object_has_struct_page(obj) && type != I915_MAP_WC)
 -              return NULL;
 -
 -      if (GEM_WARN_ON(type == I915_MAP_WC &&
 -                      !static_cpu_has(X86_FEATURE_PAT)))
 -              return NULL;
 -
 -      /* A single page can always be kmapped */
 -      if (n_pte == 1 && type == I915_MAP_WB) {
 -              struct page *page = sg_page(sgt->sgl);
 -
 +      switch (type) {
 +      default:
 +              MISSING_CASE(type);
 +              fallthrough;    /* to use PAGE_KERNEL anyway */
 +      case I915_MAP_WB:
                /*
                 * On 32b, highmem using a finite set of indirect PTE (i.e.
                 * vmap) to provide virtual mappings of the high pages.
                 * forever.
                 *
                 * So if the page is beyond the 32b boundary, make an explicit
 -               * vmap. On 64b, this check will be optimised away as we can
 -               * directly kmap any page on the system.
 +               * vmap.
                 */
 -              if (!PageHighMem(page))
 -                      return kmap(page);
 -      }
 -
 -      mem = stack;
 -      if (n_pte > ARRAY_SIZE(stack)) {
 -              /* Too big for stack -- allocate temporary array instead */
 -              mem = kvmalloc_array(n_pte, sizeof(*mem), GFP_KERNEL);
 -              if (!mem)
 -                      return NULL;
 -      }
 -
 -      area = alloc_vm_area(obj->base.size, mem);
 -      if (!area) {
 -              if (mem != stack)
 -                      kvfree(mem);
 -              return NULL;
 -      }
 -
 -      switch (type) {
 -      default:
 -              MISSING_CASE(type);
 -              /* fallthrough - to use PAGE_KERNEL anyway */
 -      case I915_MAP_WB:
 +              if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
 +                      return page_address(sg_page(obj->mm.pages->sgl));
                pgprot = PAGE_KERNEL;
                break;
        case I915_MAP_WC:
                break;
        }
  
 -      if (i915_gem_object_has_struct_page(obj)) {
 -              struct sgt_iter iter;
 -              struct page *page;
 -              pte_t **ptes = mem;
 +      if (n_pages > ARRAY_SIZE(stack)) {
 +              /* Too big for stack -- allocate temporary array instead */
 +              pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
 +              if (!pages)
 +                      return NULL;
 +      }
  
 -              for_each_sgt_page(page, iter, sgt)
 -                      **ptes++ = mk_pte(page, pgprot);
 -      } else {
 -              resource_size_t iomap;
 -              struct sgt_iter iter;
 -              pte_t **ptes = mem;
 -              dma_addr_t addr;
 +      i = 0;
 +      for_each_sgt_page(page, iter, obj->mm.pages)
 +              pages[i++] = page;
 +      vaddr = vmap(pages, n_pages, 0, pgprot);
 +      if (pages != stack)
 +              kvfree(pages);
 +      return vaddr;
 +}
  
 -              iomap = obj->mm.region->iomap.base;
 -              iomap -= obj->mm.region->region.start;
 +static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
 +              enum i915_map_type type)
 +{
 +      resource_size_t iomap = obj->mm.region->iomap.base -
 +              obj->mm.region->region.start;
 +      unsigned long n_pfn = obj->base.size >> PAGE_SHIFT;
 +      unsigned long stack[32], *pfns = stack, i;
 +      struct sgt_iter iter;
 +      dma_addr_t addr;
 +      void *vaddr;
 +
 +      if (type != I915_MAP_WC)
 +              return NULL;
  
 -              for_each_sgt_daddr(addr, iter, sgt)
 -                      **ptes++ = iomap_pte(iomap, addr, pgprot);
 +      if (n_pfn > ARRAY_SIZE(stack)) {
 +              /* Too big for stack -- allocate temporary array instead */
 +              pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL);
 +              if (!pfns)
 +                      return NULL;
        }
  
 -      if (mem != stack)
 -              kvfree(mem);
 -
 -      return area->addr;
 +      i = 0;
 +      for_each_sgt_daddr(addr, iter, obj->mm.pages)
 +              pfns[i++] = (iomap + addr) >> PAGE_SHIFT;
 +      vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO));
 +      if (pfns != stack)
 +              kvfree(pfns);
 +      return vaddr;
  }
  
  /* get, pin, and map the pages of the object into kernel space */
@@@ -368,13 -390,7 +372,13 @@@ void *i915_gem_object_pin_map(struct dr
        }
  
        if (!ptr) {
 -              ptr = i915_gem_object_map(obj, type);
 +              if (GEM_WARN_ON(type == I915_MAP_WC &&
 +                              !static_cpu_has(X86_FEATURE_PAT)))
 +                      ptr = NULL;
 +              else if (i915_gem_object_has_struct_page(obj))
 +                      ptr = i915_gem_object_map_page(obj, type);
 +              else
 +                      ptr = i915_gem_object_map_pfn(obj, type);
                if (!ptr) {
                        err = -ENOMEM;
                        goto err_unpin;
@@@ -438,11 -454,12 +442,12 @@@ void __i915_gem_object_release_map(stru
  }
  
  struct scatterlist *
- i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
-                      unsigned int n,
-                      unsigned int *offset)
+ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
+                        struct i915_gem_object_page_iter *iter,
+                        unsigned int n,
+                        unsigned int *offset)
  {
-       struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
+       const bool dma = iter == &obj->mm.get_dma_page;
        struct scatterlist *sg;
        unsigned int idx, count;
  
  
        sg = iter->sg_pos;
        idx = iter->sg_idx;
-       count = __sg_page_count(sg);
+       count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
  
        while (idx + count <= n) {
                void *entry;
  
                idx += count;
                sg = ____sg_next(sg);
-               count = __sg_page_count(sg);
+               count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
        }
  
  scan:
        while (idx + count <= n) {
                idx += count;
                sg = ____sg_next(sg);
-               count = __sg_page_count(sg);
+               count = dma ? __sg_dma_page_count(sg) : __sg_page_count(sg);
        }
  
        *offset = n - idx;
@@@ -562,20 -579,6 +567,20 @@@ i915_gem_object_get_page(struct drm_i91
        return nth_page(sg_page(sg), offset);
  }
  
 +/* Like i915_gem_object_get_page(), but mark the returned page dirty */
 +struct page *
 +i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
 +                             unsigned int n)
 +{
 +      struct page *page;
 +
 +      page = i915_gem_object_get_page(obj, n);
 +      if (!obj->mm.dirty)
 +              set_page_dirty(page);
 +
 +      return page;
 +}
 +
  dma_addr_t
  i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
                                    unsigned long n,
        struct scatterlist *sg;
        unsigned int offset;
  
-       sg = i915_gem_object_get_sg(obj, n, &offset);
+       sg = i915_gem_object_get_sg_dma(obj, n, &offset);
  
        if (len)
                *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
index 84b2707d8b17a16bebe815ccca0a56da11a56867,69a37a1fcff2db266e9d184cec00e38926b46f9d..29bffc6afcc151f4db43896577457f593239e167
@@@ -251,7 -251,7 +251,7 @@@ static void vlv_get_stolen_reserved(str
        switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
        default:
                MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
 -              /* fall through */
 +              fallthrough;
        case GEN7_STOLEN_RESERVED_1M:
                *size = 1024 * 1024;
                break;
@@@ -418,7 -418,7 +418,7 @@@ static int i915_gem_init_stolen(struct 
        case 4:
                if (!IS_G4X(i915))
                        break;
 -              /* fall through */
 +              fallthrough;
        case 5:
                g4x_get_stolen_reserved(i915, uncore,
                                        &reserved_base, &reserved_size);
                break;
        default:
                MISSING_CASE(INTEL_GEN(i915));
 -              /* fall-through */
 +              fallthrough;
        case 11:
        case 12:
                icl_get_stolen_reserved(i915, uncore,
        return 0;
  }
  
+ static void dbg_poison(struct i915_ggtt *ggtt,
+                      dma_addr_t addr, resource_size_t size,
+                      u8 x)
+ {
+ #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+       if (!drm_mm_node_allocated(&ggtt->error_capture))
+               return;
+       if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
+               return; /* beware stop_machine() inversion */
+       GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+       mutex_lock(&ggtt->error_mutex);
+       while (size) {
+               void __iomem *s;
+               ggtt->vm.insert_page(&ggtt->vm, addr,
+                                    ggtt->error_capture.start,
+                                    I915_CACHE_NONE, 0);
+               mb();
+               s = io_mapping_map_wc(&ggtt->iomap,
+                                     ggtt->error_capture.start,
+                                     PAGE_SIZE);
+               memset_io(s, x, PAGE_SIZE);
+               io_mapping_unmap(s);
+               addr += PAGE_SIZE;
+               size -= PAGE_SIZE;
+       }
+       mb();
+       ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
+       mutex_unlock(&ggtt->error_mutex);
+ #endif
+ }
  static struct sg_table *
  i915_pages_create_for_stolen(struct drm_device *dev,
                             resource_size_t offset, resource_size_t size)
@@@ -540,6 -577,11 +577,11 @@@ static int i915_gem_object_get_pages_st
        if (IS_ERR(pages))
                return PTR_ERR(pages);
  
+       dbg_poison(&to_i915(obj->base.dev)->ggtt,
+                  sg_dma_address(pages->sgl),
+                  sg_dma_len(pages->sgl),
+                  POISON_INUSE);
        __i915_gem_object_set_pages(obj, pages, obj->stolen->size);
  
        return 0;
@@@ -549,6 -591,12 +591,12 @@@ static void i915_gem_object_put_pages_s
                                             struct sg_table *pages)
  {
        /* Should only be called from i915_gem_object_release_stolen() */
+       dbg_poison(&to_i915(obj->base.dev)->ggtt,
+                  sg_dma_address(pages->sgl),
+                  sg_dma_len(pages->sgl),
+                  POISON_FREE);
        sg_free_table(pages);
        kfree(pages);
  }
index 5bfb5f7ed02c9aa1e78b0a7a0d06b6a0bc8447ab,9018582e7229ea3faa61b649008ae82507ab900f..0b31670343f5a2a4b04e10a4ed81c0e37be257a9
@@@ -214,7 -214,7 +214,7 @@@ u32 intel_engine_context_size(struct in
                break;
        default:
                MISSING_CASE(class);
 -              /* fall through */
 +              fallthrough;
        case VIDEO_DECODE_CLASS:
        case VIDEO_ENHANCEMENT_CLASS:
        case COPY_ENGINE_CLASS:
@@@ -305,8 -305,9 +305,9 @@@ static int intel_engine_setup(struct in
        engine->i915 = i915;
        engine->gt = gt;
        engine->uncore = gt->uncore;
-       engine->hw_id = engine->guc_id = info->hw_id;
        engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
+       engine->hw_id = info->hw_id;
+       engine->guc_id = MAKE_GUC_ID(info->class, info->instance);
  
        engine->class = info->class;
        engine->instance = info->instance;
@@@ -371,7 -372,8 +372,8 @@@ static void __setup_engine_capabilities
                 * instances.
                 */
                if ((INTEL_GEN(i915) >= 11 &&
-                    engine->gt->info.vdbox_sfc_access & engine->mask) ||
+                    (engine->gt->info.vdbox_sfc_access &
+                     BIT(engine->instance))) ||
                    (INTEL_GEN(i915) >= 9 && engine->instance == 0))
                        engine->uabi_capabilities |=
                                I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
@@@ -1599,6 -1601,41 +1601,41 @@@ static unsigned long list_count(struct 
        return count;
  }
  
+ static unsigned long read_ul(void *p, size_t x)
+ {
+       return *(unsigned long *)(p + x);
+ }
+ static void print_properties(struct intel_engine_cs *engine,
+                            struct drm_printer *m)
+ {
+       static const struct pmap {
+               size_t offset;
+               const char *name;
+       } props[] = {
+ #define P(x) { \
+       .offset = offsetof(typeof(engine->props), x), \
+       .name = #x \
+ }
+               P(heartbeat_interval_ms),
+               P(max_busywait_duration_ns),
+               P(preempt_timeout_ms),
+               P(stop_timeout_ms),
+               P(timeslice_duration_ms),
+               {},
+ #undef P
+       };
+       const struct pmap *p;
+       drm_printf(m, "\tProperties:\n");
+       for (p = props; p->name; p++)
+               drm_printf(m, "\t\t%s: %lu [default %lu]\n",
+                          p->name,
+                          read_ul(&engine->props, p->offset),
+                          read_ul(&engine->defaults, p->offset));
+ }
  void intel_engine_dump(struct intel_engine_cs *engine,
                       struct drm_printer *m,
                       const char *header, ...)
        drm_printf(m, "\tReset count: %d (global %d)\n",
                   i915_reset_engine_count(error, engine),
                   i915_reset_count(error));
+       print_properties(engine, m);
  
        drm_printf(m, "\tRequests:\n");
  
index 188a5f70177dfa8089e197ebe9662ca7998b47cf,5c8aab1f9beff3a1a1567284150403ae5f7cf498..cf94525be2c18c5f19abe623c6143810b83d5182
@@@ -835,7 -835,7 +835,7 @@@ static int gen8_gmch_probe(struct i915_
        u16 snb_gmch_ctl;
  
        /* TODO: We're not aware of mappable constraints on gen8 yet */
 -      if (!IS_DGFX(i915)) {
 +      if (!HAS_LMEM(i915)) {
                ggtt->gmadr = pci_resource(pdev, 2);
                ggtt->mappable_end = resource_size(&ggtt->gmadr);
        }
@@@ -1383,7 -1383,7 +1383,7 @@@ intel_partial_pages(const struct i915_g
        if (ret)
                goto err_sg_alloc;
  
-       iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
+       iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
        GEM_BUG_ON(!iter);
  
        sg = st->sgl;
        do {
                unsigned int len;
  
-               len = min(iter->length - (offset << PAGE_SHIFT),
+               len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
                          count << PAGE_SHIFT);
                sg_set_page(sg, NULL, len, 0);
                sg_dma_address(sg) =
@@@ -1434,7 -1434,7 +1434,7 @@@ i915_get_ggtt_vma_pages(struct i915_vm
        switch (vma->ggtt_view.type) {
        default:
                GEM_BUG_ON(vma->ggtt_view.type);
 -              /* fall through */
 +              fallthrough;
        case I915_GGTT_VIEW_NORMAL:
                vma->pages = vma->obj->mm.pages;
                return 0;
index 39179a3eee98455bc3f4ae70c3e51e172f9a7844,1ade9583c3c1d4291df1fc99082bc2d3c29c0b45..254873e1646e09ece1e658d167cd973fd9ab89c9
@@@ -109,7 -109,7 +109,7 @@@ struct drm_i915_mocs_table 
   * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for
   * PTE and will be initialized to an invalid value.
   *
 - * The last two entries are reserved by the hardware. For ICL+ they
 + * The last few entries are reserved by the hardware. For ICL+ they
   * should be initialized according to bspec and never used, for older
   * platforms they should never be written to.
   *
                   LE_1_UC | LE_TC_2_LLC_ELLC, \
                   L3_1_UC), \
        MOCS_ENTRY(I915_MOCS_PTE, \
-                  LE_0_PAGETABLE | LE_TC_2_LLC_ELLC | LE_LRUM(3), \
+                  LE_0_PAGETABLE | LE_TC_0_PAGETABLE | LE_LRUM(3), \
                   L3_3_WB)
  
  static const struct drm_i915_mocs_entry skl_mocs_table[] = {
@@@ -243,8 -243,9 +243,9 @@@ static const struct drm_i915_mocs_entr
         * only, __init_mocs_table() take care to program unused index with
         * this entry.
         */
-       MOCS_ENTRY(1, LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
-                  L3_3_WB),
+       MOCS_ENTRY(I915_MOCS_PTE,
+                  LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
+                  L3_1_UC),
        GEN11_MOCS_ENTRIES,
  
        /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
@@@ -280,45 -281,12 +281,45 @@@ static const struct drm_i915_mocs_entr
                   L3_1_UC),
        /* Base - L3 + LeCC:PAT (Deprecated) */
        MOCS_ENTRY(I915_MOCS_PTE,
-                  LE_0_PAGETABLE | LE_TC_1_LLC,
+                  LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
                   L3_3_WB),
  
        GEN11_MOCS_ENTRIES
  };
  
 +static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
 +      /* Error */
 +      MOCS_ENTRY(0, 0, L3_0_DIRECT),
 +
 +      /* UC */
 +      MOCS_ENTRY(1, 0, L3_1_UC),
 +
 +      /* Reserved */
 +      MOCS_ENTRY(2, 0, L3_0_DIRECT),
 +      MOCS_ENTRY(3, 0, L3_0_DIRECT),
 +      MOCS_ENTRY(4, 0, L3_0_DIRECT),
 +
 +      /* WB - L3 */
 +      MOCS_ENTRY(5, 0, L3_3_WB),
 +      /* WB - L3 50% */
 +      MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB),
 +      /* WB - L3 25% */
 +      MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB),
 +      /* WB - L3 12.5% */
 +      MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB),
 +
 +      /* HDC:L1 + L3 */
 +      MOCS_ENTRY(48, 0, L3_3_WB),
 +      /* HDC:L1 */
 +      MOCS_ENTRY(49, 0, L3_1_UC),
 +
 +      /* HW Reserved */
 +      MOCS_ENTRY(60, 0, L3_1_UC),
 +      MOCS_ENTRY(61, 0, L3_1_UC),
 +      MOCS_ENTRY(62, 0, L3_1_UC),
 +      MOCS_ENTRY(63, 0, L3_1_UC),
 +};
 +
  enum {
        HAS_GLOBAL_MOCS = BIT(0),
        HAS_ENGINE_MOCS = BIT(1),
@@@ -345,11 -313,7 +346,11 @@@ static unsigned int get_mocs_settings(c
  {
        unsigned int flags;
  
 -      if (INTEL_GEN(i915) >= 12) {
 +      if (IS_DG1(i915)) {
 +              table->size = ARRAY_SIZE(dg1_mocs_table);
 +              table->table = dg1_mocs_table;
 +              table->n_entries = GEN11_NUM_MOCS_ENTRIES;
 +      } else if (INTEL_GEN(i915) >= 12) {
                table->size  = ARRAY_SIZE(tgl_mocs_table);
                table->table = tgl_mocs_table;
                table->n_entries = GEN11_NUM_MOCS_ENTRIES;
index 16b48e72c36910e846a8239dbaf8ee9ef8ef965a,9ecf9520fa46a41368d195877e71940b99843920..a41b43f445b8ae3fee0f545a7d3a29e5ef10a3c9
@@@ -101,7 -101,7 +101,7 @@@ static void set_hwsp(struct intel_engin
                 */
                default:
                        GEM_BUG_ON(engine->id);
 -                      /* fallthrough */
 +                      fallthrough;
                case RCS0:
                        hwsp = RENDER_HWS_PGA_GEN7;
                        break;
@@@ -444,6 -444,7 +444,7 @@@ static void reset_cancel(struct intel_e
                i915_request_set_error_once(request, -EIO);
                i915_request_mark_complete(request);
        }
+       intel_engine_signal_breadcrumbs(engine);
  
        /* Remaining _unready_ requests will be nop'ed when submitted */
  
index 466ec671b3794741d5f6a3eb6e57cd5b85d5486c,a53928363b863bc0e1876a1fcf9c4faa92acdf79..0d88f17799ff5f1811678ae0b180e8b65c600d51
@@@ -390,16 -390,6 +390,16 @@@ static void gen5_rps_update(struct inte
        spin_unlock_irq(&mchdev_lock);
  }
  
 +static unsigned int gen5_invert_freq(struct intel_rps *rps,
 +                                   unsigned int val)
 +{
 +      /* Invert the frequency bin into an ips delay */
 +      val = rps->max_freq - val;
 +      val = rps->min_freq + val;
 +
 +      return val;
 +}
 +
  static bool gen5_rps_set(struct intel_rps *rps, u8 val)
  {
        struct intel_uncore *uncore = rps_to_uncore(rps);
        }
  
        /* Invert the frequency bin into an ips delay */
 -      val = rps->max_freq - val;
 -      val = rps->min_freq + val;
 +      val = gen5_invert_freq(rps, val);
  
        rgvswctl =
                (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
@@@ -509,7 -500,6 +509,7 @@@ static unsigned int init_emon(struct in
  
  static bool gen5_rps_enable(struct intel_rps *rps)
  {
 +      struct drm_i915_private *i915 = rps_to_i915(rps);
        struct intel_uncore *uncore = rps_to_uncore(rps);
        u8 fstart, vstart;
        u32 rgvmodectl;
        rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC);
        rps->ips.last_time2 = ktime_get_raw_ns();
  
 +      spin_lock(&i915->irq_lock);
 +      ilk_enable_display_irq(i915, DE_PCU_EVENT);
 +      spin_unlock(&i915->irq_lock);
 +
        spin_unlock_irq(&mchdev_lock);
  
        rps->ips.corr = init_emon(uncore);
  
  static void gen5_rps_disable(struct intel_rps *rps)
  {
 +      struct drm_i915_private *i915 = rps_to_i915(rps);
        struct intel_uncore *uncore = rps_to_uncore(rps);
        u16 rgvswctl;
  
        spin_lock_irq(&mchdev_lock);
  
 +      spin_lock(&i915->irq_lock);
 +      ilk_disable_display_irq(i915, DE_PCU_EVENT);
 +      spin_unlock(&i915->irq_lock);
 +
        rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
  
        /* Ack interrupts, disable EFC interrupt */
                           intel_uncore_read(uncore, MEMINTREN) &
                           ~MEMINT_EVAL_CHG_EN);
        intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
 -      intel_uncore_write(uncore, DEIER,
 -                         intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT);
 -      intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT);
 -      intel_uncore_write(uncore, DEIMR,
 -                         intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT);
  
        /* Go back to the starting frequency */
        gen5_rps_set(rps, rps->idle_freq);
@@@ -1286,9 -1272,8 +1286,9 @@@ static unsigned long __ips_gfx_val(stru
  {
        struct intel_rps *rps = container_of(ips, typeof(*rps), ips);
        struct intel_uncore *uncore = rps_to_uncore(rps);
 -      unsigned long t, corr, state1, corr2, state2;
 +      unsigned int t, state1, state2;
        u32 pxvid, ext_v;
 +      u64 corr, corr2;
  
        lockdep_assert_held(&mchdev_lock);
  
        else /* < 50 */
                corr = t * 301 + 1004;
  
 -      corr = corr * 150142 * state1 / 10000 - 78642;
 -      corr /= 100000;
 -      corr2 = corr * ips->corr;
 +      corr = div_u64(corr * 150142 * state1, 10000) - 78642;
 +      corr2 = div_u64(corr, 100000) * ips->corr;
  
 -      state2 = corr2 * state1 / 10000;
 +      state2 = div_u64(corr2 * state1, 10000);
        state2 /= 100; /* convert to mW */
  
        __gen5_ips_update(ips);
@@@ -1446,10 -1432,8 +1446,10 @@@ int intel_gpu_freq(struct intel_rps *rp
                return chv_gpu_freq(rps, val);
        else if (IS_VALLEYVIEW(i915))
                return byt_gpu_freq(rps, val);
 -      else
 +      else if (INTEL_GEN(i915) >= 6)
                return val * GT_FREQUENCY_MULTIPLIER;
 +      else
 +              return val;
  }
  
  int intel_freq_opcode(struct intel_rps *rps, int val)
                return chv_freq_opcode(rps, val);
        else if (IS_VALLEYVIEW(i915))
                return byt_freq_opcode(rps, val);
 -      else
 +      else if (INTEL_GEN(i915) >= 6)
                return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
 +      else
 +              return val;
  }
  
  static void vlv_init_gpll_ref_freq(struct intel_rps *rps)
@@@ -1882,11 -1864,8 +1882,11 @@@ u32 intel_rps_get_cagf(struct intel_rp
                cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
        else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
                cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
 -      else
 +      else if (INTEL_GEN(i915) >= 6)
                cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
 +      else
 +              cagf = gen5_invert_freq(rps, (rpstat & MEMSTAT_PSTATE_MASK) >>
 +                                      MEMSTAT_PSTATE_SHIFT);
  
        return cagf;
  }
  static u32 read_cagf(struct intel_rps *rps)
  {
        struct drm_i915_private *i915 = rps_to_i915(rps);
 +      struct intel_uncore *uncore = rps_to_uncore(rps);
        u32 freq;
  
        if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
                vlv_punit_get(i915);
                freq = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
                vlv_punit_put(i915);
 +      } else if (INTEL_GEN(i915) >= 6) {
 +              freq = intel_uncore_read(uncore, GEN6_RPSTAT1);
        } else {
 -              freq = intel_uncore_read(rps_to_uncore(rps), GEN6_RPSTAT1);
 +              freq = intel_uncore_read(uncore, MEMSTAT_ILK);
        }
  
        return intel_rps_get_cagf(rps, freq);
@@@ -1973,7 -1949,7 +1973,7 @@@ static struct drm_i915_private *mchdev_
  
        rcu_read_lock();
        i915 = rcu_dereference(ips_mchdev);
-       if (!kref_get_unless_zero(&i915->drm.ref))
+       if (i915 && !kref_get_unless_zero(&i915->drm.ref))
                i915 = NULL;
        rcu_read_unlock();
  
index 19c2cb166e7c298fc19a4ce1bb80f27833792ed7,d5ea6a91bc9e11f5bac1f37c9b7c61b85c90c9f3..2edf2b15885f7c72e99ef5e71f457bdedbb54158
@@@ -17,8 -17,9 +17,9 @@@
  #include "../selftests/i915_random.h"
  #include "../i915_selftest.h"
  
- #include "../selftests/igt_flush_test.h"
- #include "../selftests/mock_gem_device.h"
+ #include "selftests/igt_flush_test.h"
+ #include "selftests/lib_sw_fence.h"
+ #include "selftests/mock_gem_device.h"
  #include "selftests/mock_timeline.h"
  
  static struct page *hwsp_page(struct intel_timeline *tl)
@@@ -158,7 -159,7 +159,7 @@@ out
                __mock_hwsp_record(&state, na, NULL);
        kfree(state.history);
  err_put:
 -      drm_dev_put(&i915->drm);
 +      mock_destroy_device(i915);
        return err;
  }
  
@@@ -755,6 -756,378 +756,378 @@@ out_free
        return err;
  }
  
+ static int emit_read_hwsp(struct i915_request *rq,
+                         u32 seqno, u32 hwsp,
+                         u32 *addr)
+ {
+       const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(rq->engine->mmio_base, 0));
+       u32 *cs;
+       cs = intel_ring_begin(rq, 12);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+       *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+       *cs++ = *addr;
+       *cs++ = 0;
+       *cs++ = seqno;
+       *addr += 4;
+       *cs++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+       *cs++ = gpr;
+       *cs++ = hwsp;
+       *cs++ = 0;
+       *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+       *cs++ = gpr;
+       *cs++ = *addr;
+       *cs++ = 0;
+       *addr += 4;
+       intel_ring_advance(rq, cs);
+       return 0;
+ }
+ struct hwsp_watcher {
+       struct i915_vma *vma;
+       struct i915_request *rq;
+       u32 addr;
+       u32 *map;
+ };
+ static bool cmp_lt(u32 a, u32 b)
+ {
+       return a < b;
+ }
+ static bool cmp_gte(u32 a, u32 b)
+ {
+       return a >= b;
+ }
+ static int setup_watcher(struct hwsp_watcher *w, struct intel_gt *gt)
+ {
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       obj = i915_gem_object_create_internal(gt->i915, SZ_2M);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+       w->map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(w->map)) {
+               i915_gem_object_put(obj);
+               return PTR_ERR(w->map);
+       }
+       vma = i915_gem_object_ggtt_pin_ww(obj, NULL, NULL, 0, 0, 0);
+       if (IS_ERR(vma)) {
+               i915_gem_object_put(obj);
+               return PTR_ERR(vma);
+       }
+       w->vma = vma;
+       w->addr = i915_ggtt_offset(vma);
+       return 0;
+ }
+ static int create_watcher(struct hwsp_watcher *w,
+                         struct intel_engine_cs *engine,
+                         int ringsz)
+ {
+       struct intel_context *ce;
+       struct intel_timeline *tl;
+       ce = intel_context_create(engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
+       ce->ring = __intel_context_ring_size(ringsz);
+       w->rq = intel_context_create_request(ce);
+       intel_context_put(ce);
+       if (IS_ERR(w->rq))
+               return PTR_ERR(w->rq);
+       w->addr = i915_ggtt_offset(w->vma);
+       tl = w->rq->context->timeline;
+       /* some light mutex juggling required; think co-routines */
+       lockdep_unpin_lock(&tl->mutex, w->rq->cookie);
+       mutex_unlock(&tl->mutex);
+       return 0;
+ }
+ static int check_watcher(struct hwsp_watcher *w, const char *name,
+                        bool (*op)(u32 hwsp, u32 seqno))
+ {
+       struct i915_request *rq = fetch_and_zero(&w->rq);
+       struct intel_timeline *tl = rq->context->timeline;
+       u32 offset, end;
+       int err;
+       GEM_BUG_ON(w->addr - i915_ggtt_offset(w->vma) > w->vma->size);
+       i915_request_get(rq);
+       mutex_lock(&tl->mutex);
+       rq->cookie = lockdep_pin_lock(&tl->mutex);
+       i915_request_add(rq);
+       if (i915_request_wait(rq, 0, HZ) < 0) {
+               err = -ETIME;
+               goto out;
+       }
+       err = 0;
+       offset = 0;
+       end = (w->addr - i915_ggtt_offset(w->vma)) / sizeof(*w->map);
+       while (offset < end) {
+               if (!op(w->map[offset + 1], w->map[offset])) {
+                       pr_err("Watcher '%s' found HWSP value %x for seqno %x\n",
+                              name, w->map[offset + 1], w->map[offset]);
+                       err = -EINVAL;
+               }
+               offset += 2;
+       }
+ out:
+       i915_request_put(rq);
+       return err;
+ }
+ static void cleanup_watcher(struct hwsp_watcher *w)
+ {
+       if (w->rq) {
+               struct intel_timeline *tl = w->rq->context->timeline;
+               mutex_lock(&tl->mutex);
+               w->rq->cookie = lockdep_pin_lock(&tl->mutex);
+               i915_request_add(w->rq);
+       }
+       i915_vma_unpin_and_release(&w->vma, I915_VMA_RELEASE_MAP);
+ }
+ static bool retire_requests(struct intel_timeline *tl)
+ {
+       struct i915_request *rq, *rn;
+       mutex_lock(&tl->mutex);
+       list_for_each_entry_safe(rq, rn, &tl->requests, link)
+               if (!i915_request_retire(rq))
+                       break;
+       mutex_unlock(&tl->mutex);
+       return !i915_active_fence_isset(&tl->last_request);
+ }
+ static struct i915_request *wrap_timeline(struct i915_request *rq)
+ {
+       struct intel_context *ce = rq->context;
+       struct intel_timeline *tl = ce->timeline;
+       u32 seqno = rq->fence.seqno;
+       while (tl->seqno >= seqno) { /* Cause a wrap */
+               i915_request_put(rq);
+               rq = intel_context_create_request(ce);
+               if (IS_ERR(rq))
+                       return rq;
+               i915_request_get(rq);
+               i915_request_add(rq);
+       }
+       i915_request_put(rq);
+       rq = intel_context_create_request(ce);
+       if (IS_ERR(rq))
+               return rq;
+       i915_request_get(rq);
+       i915_request_add(rq);
+       return rq;
+ }
+ static int live_hwsp_read(void *arg)
+ {
+       struct intel_gt *gt = arg;
+       struct hwsp_watcher watcher[2] = {};
+       struct intel_engine_cs *engine;
+       struct intel_timeline *tl;
+       enum intel_engine_id id;
+       int err = 0;
+       int i;
+       /*
+        * If we take a reference to the HWSP for reading on the GPU, that
+        * read may be arbitrarily delayed (either by foreign fence or
+        * priority saturation) and a wrap can happen within 30 minutes.
+        * When the GPU read is finally submitted it should be correct,
+        * even across multiple wraps.
+        */
+       if (INTEL_GEN(gt->i915) < 8) /* CS convenience [SRM/LRM] */
+               return 0;
+       tl = intel_timeline_create(gt);
+       if (IS_ERR(tl))
+               return PTR_ERR(tl);
+       if (!tl->hwsp_cacheline)
+               goto out_free;
+       for (i = 0; i < ARRAY_SIZE(watcher); i++) {
+               err = setup_watcher(&watcher[i], gt);
+               if (err)
+                       goto out;
+       }
+       for_each_engine(engine, gt, id) {
+               struct intel_context *ce;
+               unsigned long count = 0;
+               IGT_TIMEOUT(end_time);
+               /* Create a request we can use for remote reading of the HWSP */
+               err = create_watcher(&watcher[1], engine, SZ_512K);
+               if (err)
+                       goto out;
+               do {
+                       struct i915_sw_fence *submit;
+                       struct i915_request *rq;
+                       u32 hwsp;
+                       submit = heap_fence_create(GFP_KERNEL);
+                       if (!submit) {
+                               err = -ENOMEM;
+                               goto out;
+                       }
+                       err = create_watcher(&watcher[0], engine, SZ_4K);
+                       if (err)
+                               goto out;
+                       ce = intel_context_create(engine);
+                       if (IS_ERR(ce)) {
+                               err = PTR_ERR(ce);
+                               goto out;
+                       }
+                       /* Skip to the end, saving 30 minutes of nops */
+                       tl->seqno = -10u + 2 * (count & 3);
+                       WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+                       ce->timeline = intel_timeline_get(tl);
+                       rq = intel_context_create_request(ce);
+                       if (IS_ERR(rq)) {
+                               err = PTR_ERR(rq);
+                               intel_context_put(ce);
+                               goto out;
+                       }
+                       err = i915_sw_fence_await_dma_fence(&rq->submit,
+                                                           &watcher[0].rq->fence, 0,
+                                                           GFP_KERNEL);
+                       if (err < 0) {
+                               i915_request_add(rq);
+                               intel_context_put(ce);
+                               goto out;
+                       }
+                       mutex_lock(&watcher[0].rq->context->timeline->mutex);
+                       err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp);
+                       if (err == 0)
+                               err = emit_read_hwsp(watcher[0].rq, /* before */
+                                                    rq->fence.seqno, hwsp,
+                                                    &watcher[0].addr);
+                       mutex_unlock(&watcher[0].rq->context->timeline->mutex);
+                       if (err) {
+                               i915_request_add(rq);
+                               intel_context_put(ce);
+                               goto out;
+                       }
+                       mutex_lock(&watcher[1].rq->context->timeline->mutex);
+                       err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp);
+                       if (err == 0)
+                               err = emit_read_hwsp(watcher[1].rq, /* after */
+                                                    rq->fence.seqno, hwsp,
+                                                    &watcher[1].addr);
+                       mutex_unlock(&watcher[1].rq->context->timeline->mutex);
+                       if (err) {
+                               i915_request_add(rq);
+                               intel_context_put(ce);
+                               goto out;
+                       }
+                       i915_request_get(rq);
+                       i915_request_add(rq);
+                       rq = wrap_timeline(rq);
+                       intel_context_put(ce);
+                       if (IS_ERR(rq)) {
+                               err = PTR_ERR(rq);
+                               goto out;
+                       }
+                       err = i915_sw_fence_await_dma_fence(&watcher[1].rq->submit,
+                                                           &rq->fence, 0,
+                                                           GFP_KERNEL);
+                       if (err < 0) {
+                               i915_request_put(rq);
+                               goto out;
+                       }
+                       err = check_watcher(&watcher[0], "before", cmp_lt);
+                       i915_sw_fence_commit(submit);
+                       heap_fence_put(submit);
+                       if (err) {
+                               i915_request_put(rq);
+                               goto out;
+                       }
+                       count++;
+                       if (8 * watcher[1].rq->ring->emit >
+                           3 * watcher[1].rq->ring->size) {
+                               i915_request_put(rq);
+                               break;
+                       }
+                       /* Flush the timeline before manually wrapping again */
+                       if (i915_request_wait(rq,
+                                             I915_WAIT_INTERRUPTIBLE,
+                                             HZ) < 0) {
+                               err = -ETIME;
+                               i915_request_put(rq);
+                               goto out;
+                       }
+                       retire_requests(tl);
+                       i915_request_put(rq);
+               } while (!__igt_timeout(end_time, NULL));
+               WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef);
+               pr_info("%s: simulated %lu wraps\n", engine->name, count);
+               err = check_watcher(&watcher[1], "after", cmp_gte);
+               if (err)
+                       goto out;
+       }
+ out:
+       for (i = 0; i < ARRAY_SIZE(watcher); i++)
+               cleanup_watcher(&watcher[i]);
+       if (igt_flush_test(gt->i915))
+               err = -EIO;
+ out_free:
+       intel_timeline_put(tl);
+       return err;
+ }
  static int live_hwsp_rollover_kernel(void *arg)
  {
        struct intel_gt *gt = arg;
@@@ -998,6 -1371,7 +1371,7 @@@ int intel_timeline_live_selftests(struc
                SUBTEST(live_hwsp_engine),
                SUBTEST(live_hwsp_alternate),
                SUBTEST(live_hwsp_wrap),
+               SUBTEST(live_hwsp_read),
                SUBTEST(live_hwsp_rollover_kernel),
                SUBTEST(live_hwsp_rollover_user),
        };
index e4aaa5f2979630f4da416286bfd43f41842485a9,6909da1e1a736ff1a535c919b3ec445e8a473efd..2a343a97798720cbdff68093df0a02a6b998a659
@@@ -213,23 -213,6 +213,6 @@@ static u32 guc_ctl_feature_flags(struc
        return flags;
  }
  
- static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
- {
-       u32 flags = 0;
-       if (intel_guc_submission_is_used(guc)) {
-               u32 ctxnum, base;
-               base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
-               ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16;
-               base >>= PAGE_SHIFT;
-               flags |= (base << GUC_CTL_BASE_ADDR_SHIFT) |
-                       (ctxnum << GUC_CTL_CTXNUM_IN16_SHIFT);
-       }
-       return flags;
- }
  static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
  {
        u32 offset = intel_guc_ggtt_offset(guc, guc->log.vma) >> PAGE_SHIFT;
@@@ -291,7 -274,6 +274,6 @@@ static void guc_init_params(struct inte
  
        BUILD_BUG_ON(sizeof(guc->params) != GUC_CTL_MAX_DWORDS * sizeof(u32));
  
-       params[GUC_CTL_CTXINFO] = guc_ctl_ctxinfo_flags(guc);
        params[GUC_CTL_LOG_PARAMS] = guc_ctl_log_params_flags(guc);
        params[GUC_CTL_FEATURE] = guc_ctl_feature_flags(guc);
        params[GUC_CTL_DEBUG] = guc_ctl_debug_flags(guc);
@@@ -312,18 -294,18 +294,18 @@@ void intel_guc_write_params(struct inte
        int i;
  
        /*
 -       * All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
 +       * All SOFT_SCRATCH registers are in FORCEWAKE_GT domain and
         * they are power context saved so it's ok to release forcewake
         * when we are done here and take it again at xfer time.
         */
 -      intel_uncore_forcewake_get(uncore, FORCEWAKE_BLITTER);
 +      intel_uncore_forcewake_get(uncore, FORCEWAKE_GT);
  
        intel_uncore_write(uncore, SOFT_SCRATCH(0), 0);
  
        for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
                intel_uncore_write(uncore, SOFT_SCRATCH(1 + i), guc->params[i]);
  
 -      intel_uncore_forcewake_put(uncore, FORCEWAKE_BLITTER);
 +      intel_uncore_forcewake_put(uncore, FORCEWAKE_GT);
  }
  
  int intel_guc_init(struct intel_guc *guc)
index 037bcaf3c8b574fad854813f971a25f8367e8b1f,ee4ac392227708fe4d8e3249a9ea5fdf82c9f96c..180c23e2e25e4ed3291a995c7590c0646c89d42a
@@@ -44,24 -44,19 +44,20 @@@ void intel_uc_fw_change_status(struct i
   * List of required GuC and HuC binaries per-platform.
   * Must be ordered based on platform + revid, from newer to older.
   *
-  * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas
-  * between 33.0 and 35.2 are only related to new additions to support new Gen12
-  * features.
-  *
   * Note that RKL uses the same firmware as TGL.
   */
  #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
-       fw_def(ROCKETLAKE,  0, guc_def(tgl, 35, 2, 0), huc_def(tgl,  7, 5, 0)) \
-       fw_def(TIGERLAKE,   0, guc_def(tgl, 35, 2, 0), huc_def(tgl,  7, 5, 0)) \
-       fw_def(JASPERLAKE,  0, guc_def(ehl, 33, 0, 4), huc_def(ehl,  9, 0, 0)) \
-       fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl,  9, 0, 0)) \
-       fw_def(ICELAKE,     0, guc_def(icl, 33, 0, 0), huc_def(icl,  9, 0, 0)) \
-       fw_def(COMETLAKE,   5, guc_def(cml, 33, 0, 0), huc_def(cml,  4, 0, 0)) \
-       fw_def(COFFEELAKE,  0, guc_def(kbl, 33, 0, 0), huc_def(kbl,  4, 0, 0)) \
-       fw_def(GEMINILAKE,  0, guc_def(glk, 33, 0, 0), huc_def(glk,  4, 0, 0)) \
-       fw_def(KABYLAKE,    0, guc_def(kbl, 33, 0, 0), huc_def(kbl,  4, 0, 0)) \
-       fw_def(BROXTON,     0, guc_def(bxt, 33, 0, 0), huc_def(bxt,  2, 0, 0)) \
-       fw_def(SKYLAKE,     0, guc_def(skl, 33, 0, 0), huc_def(skl,  2, 0, 0))
+       fw_def(ROCKETLAKE,  0, guc_def(tgl, 49, 0, 1), huc_def(tgl,  7, 5, 0)) \
+       fw_def(TIGERLAKE,   0, guc_def(tgl, 49, 0, 1), huc_def(tgl,  7, 5, 0)) \
++      fw_def(JASPERLAKE,  0, guc_def(ehl, 49, 0, 1), huc_def(ehl,  9, 0, 0)) \
+       fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl,  9, 0, 0)) \
+       fw_def(ICELAKE,     0, guc_def(icl, 49, 0, 1), huc_def(icl,  9, 0, 0)) \
+       fw_def(COMETLAKE,   5, guc_def(cml, 49, 0, 1), huc_def(cml,  4, 0, 0)) \
+       fw_def(COFFEELAKE,  0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
+       fw_def(GEMINILAKE,  0, guc_def(glk, 49, 0, 1), huc_def(glk,  4, 0, 0)) \
+       fw_def(KABYLAKE,    0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
+       fw_def(BROXTON,     0, guc_def(bxt, 49, 0, 1), huc_def(bxt,  2, 0, 0)) \
+       fw_def(SKYLAKE,     0, guc_def(skl, 49, 0, 1), huc_def(skl,  2, 0, 0))
  
  #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
        "i915/" \
@@@ -372,6 -367,9 +368,9 @@@ int intel_uc_fw_fetch(struct intel_uc_f
                }
        }
  
+       if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
+               uc_fw->private_data_size = css->private_data_size;
        obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
        if (IS_ERR(obj)) {
                err = PTR_ERR(obj);
index 200f6b86f8647a4091d49651bc1772e56367b189,a727552d2bc6aad13d29eb3fbe48d90d2f2081a6..77e76b665098f2ca30d6a45c88a74c08a41a758c
@@@ -725,7 -725,7 +725,7 @@@ static int i915_gpu_info_open(struct in
  
        gpu = NULL;
        with_intel_runtime_pm(&i915->runtime_pm, wakeref)
-               gpu = i915_gpu_coredump(i915);
+               gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES);
        if (IS_ERR(gpu))
                return PTR_ERR(gpu);
  
@@@ -786,6 -786,7 +786,6 @@@ static int i915_frequency_info(struct s
        struct intel_uncore *uncore = &dev_priv->uncore;
        struct intel_rps *rps = &dev_priv->gt.rps;
        intel_wakeref_t wakeref;
 -      int ret = 0;
  
        wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
  
        seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
  
        intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
 -      return ret;
 +      return 0;
  }
  
  static int i915_ring_freq_table(struct seq_file *m, void *unused)
index d7765b31fbefaa589d25cb7b344f682254353ab7,539df8fbc84b9727b2a5df5bca62791b40d18202..d548e10e1600f05acce9afdd9ae8ed94c0639520
@@@ -33,8 -33,6 +33,8 @@@
  #include <uapi/drm/i915_drm.h>
  #include <uapi/drm/drm_fourcc.h>
  
 +#include <asm/hypervisor.h>
 +
  #include <linux/io-mapping.h>
  #include <linux/i2c.h>
  #include <linux/i2c-algo-bit.h>
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
 -#define DRIVER_DATE           "20200824"
 -#define DRIVER_TIMESTAMP      1598293597
 +#define DRIVER_DATE           "20201103"
 +#define DRIVER_TIMESTAMP      1604406085
  
  struct drm_i915_gem_object;
  
 -/*
 - * The code assumes that the hpd_pins below have consecutive values and
 - * starting with HPD_PORT_A, the HPD pin associated with any port can be
 - * retrieved by adding the corresponding port (or phy) enum value to
 - * HPD_PORT_A in most cases. For example:
 - * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
 - */
  enum hpd_pin {
        HPD_NONE = 0,
        HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
        HPD_PORT_C,
        HPD_PORT_D,
        HPD_PORT_E,
 -      HPD_PORT_F,
 -      HPD_PORT_G,
 -      HPD_PORT_H,
 -      HPD_PORT_I,
 +      HPD_PORT_TC1,
 +      HPD_PORT_TC2,
 +      HPD_PORT_TC3,
 +      HPD_PORT_TC4,
 +      HPD_PORT_TC5,
 +      HPD_PORT_TC6,
  
        HPD_NUM_PINS
  };
@@@ -534,9 -537,13 +534,9 @@@ struct intel_gmbus 
  
  struct i915_suspend_saved_registers {
        u32 saveDSPARB;
 -      u32 saveFBC_CONTROL;
 -      u32 saveCACHE_MODE_0;
 -      u32 saveMI_ARB_STATE;
        u32 saveSWF0[16];
        u32 saveSWF1[16];
        u32 saveSWF3[3];
 -      u32 savePCH_PORT_HOTPLUG;
        u16 saveGCDGMBUS;
  };
  
@@@ -1013,6 -1020,8 +1013,6 @@@ struct drm_i915_private 
         */
        u8 active_pipes;
  
 -      int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 -
        struct i915_wa_list gt_wa_list;
  
        struct i915_frontbuffer_tracking fb_tracking;
@@@ -1419,8 -1428,7 +1419,8 @@@ IS_SUBPLATFORM(const struct drm_i915_pr
  #define IS_COMETLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
  #define IS_CANNONLAKE(dev_priv)       IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
  #define IS_ICELAKE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_ICELAKE)
 -#define IS_ELKHARTLAKE(dev_priv)      IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
 +#define IS_JSL_EHL(dev_priv)  (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
 +                              IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
  #define IS_TIGERLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
  #define IS_ROCKETLAKE(dev_priv)       IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
  #define IS_DG1(dev_priv)        IS_PLATFORM(dev_priv, INTEL_DG1)
@@@ -1561,44 -1569,15 +1561,44 @@@ extern const struct i915_rev_steppings 
  
  #define EHL_REVID_A0            0x0
  
 -#define IS_EHL_REVID(p, since, until) \
 -      (IS_ELKHARTLAKE(p) && IS_REVID(p, since, until))
 +#define IS_JSL_EHL_REVID(p, since, until) \
 +      (IS_JSL_EHL(p) && IS_REVID(p, since, until))
 +
 +enum {
 +      TGL_REVID_A0,
 +      TGL_REVID_B0,
 +      TGL_REVID_B1,
 +      TGL_REVID_C0,
 +      TGL_REVID_D0,
 +};
 +
 +extern const struct i915_rev_steppings tgl_uy_revids[];
 +extern const struct i915_rev_steppings tgl_revids[];
 +
 +static inline const struct i915_rev_steppings *
 +tgl_revids_get(struct drm_i915_private *dev_priv)
 +{
 +      if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
 +              return tgl_uy_revids;
 +      else
 +              return tgl_revids;
 +}
  
 -#define TGL_REVID_A0          0x0
 -#define TGL_REVID_B0          0x1
 -#define TGL_REVID_C0          0x2
 +#define IS_TGL_DISP_REVID(p, since, until) \
 +      (IS_TIGERLAKE(p) && \
 +       tgl_revids_get(p)->disp_stepping >= (since) && \
 +       tgl_revids_get(p)->disp_stepping <= (until))
  
 -#define IS_TGL_REVID(p, since, until) \
 -      (IS_TIGERLAKE(p) && IS_REVID(p, since, until))
 +#define IS_TGL_UY_GT_REVID(p, since, until) \
 +      ((IS_TGL_U(p) || IS_TGL_Y(p)) && \
 +       tgl_uy_revids->gt_stepping >= (since) && \
 +       tgl_uy_revids->gt_stepping <= (until))
 +
 +#define IS_TGL_GT_REVID(p, since, until) \
 +      (IS_TIGERLAKE(p) && \
 +       !(IS_TGL_U(p) || IS_TGL_Y(p)) && \
 +       tgl_revids->gt_stepping >= (since) && \
 +       tgl_revids->gt_stepping <= (until))
  
  #define RKL_REVID_A0          0x0
  #define RKL_REVID_B0          0x1
  #define HAS_SNOOP(dev_priv)   (INTEL_INFO(dev_priv)->has_snoop)
  #define HAS_EDRAM(dev_priv)   ((dev_priv)->edram_size_mb)
  #define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
- #define HAS_WT(dev_priv)      ((IS_HASWELL(dev_priv) || \
-                                IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
+ #define HAS_WT(dev_priv)      HAS_EDRAM(dev_priv)
  
  #define HWS_NEEDS_PHYSICAL(dev_priv)  (INTEL_INFO(dev_priv)->hws_needs_physical)
  
@@@ -1763,9 -1741,7 +1762,9 @@@ static inline bool intel_vtd_active(voi
        if (intel_iommu_gfx_mapped)
                return true;
  #endif
 -      return false;
 +
 +      /* Running as a guest, we assume the host is enforcing VT'd */
 +      return !hypervisor_is_type(X86_HYPER_NATIVE);
  }
  
  static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
@@@ -1784,7 -1760,6 +1783,7 @@@ extern const struct dev_pm_ops i915_pm_
  
  int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
  void i915_driver_remove(struct drm_i915_private *i915);
 +void i915_driver_shutdown(struct drm_i915_private *i915);
  
  int i915_resume_switcheroo(struct drm_i915_private *i915);
  int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
index cf6e47adfde6fc3262555548d3e8e47fe80f3651,f38e6abd45792eeccc318a732e99cc209b045ccd..d8cac4c5881fdb705d17e6dc9aa0d3e0b276301b
@@@ -570,6 -570,7 +570,7 @@@ static void error_print_engine(struct d
                                   ee->vm_info.pp_dir_base);
                }
        }
+       err_printf(m, "  hung: %u\n", ee->hung);
        err_printf(m, "  engine reset count: %u\n", ee->reset_count);
  
        for (n = 0; n < ee->num_ports; n++) {
@@@ -1026,6 -1027,7 +1027,7 @@@ i915_vma_coredump_create(const struct i
                dma_addr_t dma;
  
                for_each_sgt_daddr(dma, iter, vma->pages) {
+                       mutex_lock(&ggtt->error_mutex);
                        ggtt->vm.insert_page(&ggtt->vm, dma, slot,
                                             I915_CACHE_NONE, 0);
                        mb();
                                            (void  __force *)s, dst,
                                            true);
                        io_mapping_unmap(s);
+                       mb();
+                       ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+                       mutex_unlock(&ggtt->error_mutex);
                        if (ret)
                                break;
                }
@@@ -1162,7 -1168,7 +1168,7 @@@ static void engine_record_registers(str
                        switch (engine->id) {
                        default:
                                MISSING_CASE(engine->id);
 -                              /* fall through */
 +                              fallthrough;
                        case RCS0:
                                mmio = RENDER_HWS_PGA_GEN7;
                                break;
@@@ -1451,6 -1457,7 +1457,7 @@@ capture_engine(struct intel_engine_cs *
  
  static void
  gt_record_engines(struct intel_gt_coredump *gt,
+                 intel_engine_mask_t engine_mask,
                  struct i915_vma_compress *compress)
  {
        struct intel_engine_cs *engine;
                if (!ee)
                        continue;
  
+               ee->hung = engine->mask & engine_mask;
                gt->simulated |= ee->simulated;
                if (ee->simulated) {
                        kfree(ee);
@@@ -1505,25 -1514,6 +1514,6 @@@ gt_record_uc(struct intel_gt_coredump *
        return error_uc;
  }
  
- static void gt_capture_prepare(struct intel_gt_coredump *gt)
- {
-       struct i915_ggtt *ggtt = gt->_gt->ggtt;
-       mutex_lock(&ggtt->error_mutex);
- }
- static void gt_capture_finish(struct intel_gt_coredump *gt)
- {
-       struct i915_ggtt *ggtt = gt->_gt->ggtt;
-       if (drm_mm_node_allocated(&ggtt->error_capture))
-               ggtt->vm.clear_range(&ggtt->vm,
-                                    ggtt->error_capture.start,
-                                    PAGE_SIZE);
-       mutex_unlock(&ggtt->error_mutex);
- }
  /* Capture all registers which don't fit into another category. */
  static void gt_record_regs(struct intel_gt_coredump *gt)
  {
@@@ -1669,24 -1659,25 +1659,25 @@@ static u32 generate_ecode(const struct 
  static const char *error_msg(struct i915_gpu_coredump *error)
  {
        struct intel_engine_coredump *first = NULL;
+       unsigned int hung_classes = 0;
        struct intel_gt_coredump *gt;
-       intel_engine_mask_t engines;
        int len;
  
-       engines = 0;
        for (gt = error->gt; gt; gt = gt->next) {
                struct intel_engine_coredump *cs;
  
-               if (gt->engine && !first)
-                       first = gt->engine;
-               for (cs = gt->engine; cs; cs = cs->next)
-                       engines |= cs->engine->mask;
+               for (cs = gt->engine; cs; cs = cs->next) {
+                       if (cs->hung) {
+                               hung_classes |= BIT(cs->engine->uabi_class);
+                               if (!first)
+                                       first = cs;
+                       }
+               }
        }
  
        len = scnprintf(error->error_msg, sizeof(error->error_msg),
                        "GPU HANG: ecode %d:%x:%08x",
-                       INTEL_GEN(error->i915), engines,
+                       INTEL_GEN(error->i915), hung_classes,
                        generate_ecode(first));
        if (first && first->context.pid) {
                /* Just show the first executing process, more is confusing */
@@@ -1782,8 -1773,6 +1773,6 @@@ i915_vma_capture_prepare(struct intel_g
                return NULL;
        }
  
-       gt_capture_prepare(gt);
        return compress;
  }
  
@@@ -1793,14 -1782,14 +1782,14 @@@ void i915_vma_capture_finish(struct int
        if (!compress)
                return;
  
-       gt_capture_finish(gt);
        compress_fini(compress);
        kfree(compress);
  }
  
- struct i915_gpu_coredump *i915_gpu_coredump(struct drm_i915_private *i915)
+ struct i915_gpu_coredump *
+ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask)
  {
+       struct drm_i915_private *i915 = gt->i915;
        struct i915_gpu_coredump *error;
  
        /* Check if GPU capture has been disabled */
        if (!error)
                return ERR_PTR(-ENOMEM);
  
-       error->gt = intel_gt_coredump_alloc(&i915->gt, ALLOW_FAIL);
+       error->gt = intel_gt_coredump_alloc(gt, ALLOW_FAIL);
        if (error->gt) {
                struct i915_vma_compress *compress;
  
                }
  
                gt_record_info(error->gt);
-               gt_record_engines(error->gt, compress);
+               gt_record_engines(error->gt, engine_mask, compress);
  
                if (INTEL_INFO(i915)->has_gt_uc)
                        error->gt->uc = gt_record_uc(error->gt, compress);
@@@ -1871,20 -1860,23 +1860,23 @@@ void i915_error_state_store(struct i915
  
  /**
   * i915_capture_error_state - capture an error record for later analysis
-  * @i915: i915 device
+  * @gt: intel_gt which originated the hang
+  * @engine_mask: hung engines
+  *
   *
   * Should be called when an error is detected (either a hang or an error
   * interrupt) to capture error state from the time of the error.  Fills
   * out a structure which becomes available in debugfs for user level tools
   * to pick up.
   */
- void i915_capture_error_state(struct drm_i915_private *i915)
+ void i915_capture_error_state(struct intel_gt *gt,
+                             intel_engine_mask_t engine_mask)
  {
        struct i915_gpu_coredump *error;
  
-       error = i915_gpu_coredump(i915);
+       error = i915_gpu_coredump(gt, engine_mask);
        if (IS_ERR(error)) {
-               cmpxchg(&i915->gpu_error.first_error, NULL, error);
+               cmpxchg(&gt->i915->gpu_error.first_error, NULL, error);
                return;
        }
  
index 1fe390727d8089dbe4c17b95903023120a712782,629e19209da00751b993eac412c30cc229951827..11fe790b19690b7897fce684f0dd559dbc6ba6a0
@@@ -389,7 -389,6 +389,7 @@@ static const struct intel_device_info i
        GEN5_FEATURES,
        PLATFORM(INTEL_IRONLAKE),
        .is_mobile = 1,
 +      .has_rps = true,
        .display.has_fbc = 1,
  };
  
@@@ -847,14 -846,6 +847,14 @@@ static const struct intel_device_info e
        .ppgtt_size = 36,
  };
  
 +static const struct intel_device_info jsl_info = {
 +      GEN11_FEATURES,
 +      PLATFORM(INTEL_JASPERLAKE),
 +      .require_force_probe = 1,
 +      .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0) | BIT(VECS0),
 +      .ppgtt_size = 36,
 +};
 +
  #define GEN12_FEATURES \
        GEN11_FEATURES, \
        GEN(12), \
@@@ -909,8 -900,6 +909,8 @@@ static const struct intel_device_info r
        GEN12_FEATURES, \
        .memory_regions = REGION_SMEM | REGION_LMEM, \
        .has_master_unit_irq = 1, \
 +      .has_llc = 0, \
 +      .has_snoop = 1, \
        .is_dgfx = 1
  
  static const struct intel_device_info dg1_info __maybe_unused = {
        .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
                BIT(VCS0) | BIT(VCS2),
 +      /* Wa_16011227922 */
 +      .ppgtt_size = 47,
  };
  
  #undef GEN
@@@ -998,7 -985,6 +998,7 @@@ static const struct pci_device_id pciid
        INTEL_CNL_IDS(&cnl_info),
        INTEL_ICL_11_IDS(&icl_info),
        INTEL_EHL_IDS(&ehl_info),
 +      INTEL_JSL_IDS(&jsl_info),
        INTEL_TGL_12_IDS(&tgl_info),
        INTEL_RKL_IDS(&rkl_info),
        {0, 0, 0}
@@@ -1104,19 -1090,11 +1104,19 @@@ static int i915_pci_probe(struct pci_de
        return 0;
  }
  
 +static void i915_pci_shutdown(struct pci_dev *pdev)
 +{
 +      struct drm_i915_private *i915 = pci_get_drvdata(pdev);
 +
 +      i915_driver_shutdown(i915);
 +}
 +
  static struct pci_driver i915_pci_driver = {
        .name = DRIVER_NAME,
        .id_table = pciidlist,
        .probe = i915_pci_probe,
        .remove = i915_pci_remove,
 +      .shutdown = i915_pci_shutdown,
        .driver.pm = &i915_pm_ops,
  };
  
@@@ -1151,9 -1129,13 +1151,13 @@@ static int __init i915_init(void
                return 0;
        }
  
+       i915_pmu_init();
        err = pci_register_driver(&i915_pci_driver);
-       if (err)
+       if (err) {
+               i915_pmu_exit();
                return err;
+       }
  
        i915_perf_sysctl_register();
        return 0;
@@@ -1167,6 -1149,7 +1171,7 @@@ static void __exit i915_exit(void
        i915_perf_sysctl_unregister();
        pci_unregister_driver(&i915_pci_driver);
        i915_globals_exit();
+       i915_pmu_exit();
  }
  
  module_init(i915_init);
index 69c0fa20eba17b031b40073e63250715bbab02be,5c010bc147aa377947e0f880f6e311cf81ce862e..cd786ad12be704f596f51f67ba84f61ab2ae50e8
@@@ -30,6 -30,7 +30,7 @@@
  #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
  
  static cpumask_t i915_pmu_cpumask;
+ static unsigned int i915_pmu_target_cpu = -1;
  
  static u8 engine_config_sample(u64 config)
  {
@@@ -445,6 -446,8 +446,8 @@@ static void i915_pmu_event_destroy(stru
                container_of(event->pmu, typeof(*i915), pmu.base);
  
        drm_WARN_ON(&i915->drm, event->parent);
+       drm_dev_put(&i915->drm);
  }
  
  static int
@@@ -474,7 -477,7 +477,7 @@@ config_status(struct drm_i915_private *
                if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
                        /* Requires a mutex for sampling! */
                        return -ENODEV;
 -              /* Fall-through. */
 +              fallthrough;
        case I915_PMU_REQUESTED_FREQUENCY:
                if (INTEL_GEN(i915) < 6)
                        return -ENODEV;
@@@ -510,8 -513,12 +513,12 @@@ static int i915_pmu_event_init(struct p
  {
        struct drm_i915_private *i915 =
                container_of(event->pmu, typeof(*i915), pmu.base);
+       struct i915_pmu *pmu = &i915->pmu;
        int ret;
  
+       if (pmu->closed)
+               return -ENODEV;
        if (event->attr.type != event->pmu->type)
                return -ENOENT;
  
        if (ret)
                return ret;
  
-       if (!event->parent)
+       if (!event->parent) {
+               drm_dev_get(&i915->drm);
                event->destroy = i915_pmu_event_destroy;
+       }
  
        return 0;
  }
@@@ -594,9 -603,16 +603,16 @@@ static u64 __i915_pmu_event_read(struc
  
  static void i915_pmu_event_read(struct perf_event *event)
  {
+       struct drm_i915_private *i915 =
+               container_of(event->pmu, typeof(*i915), pmu.base);
        struct hw_perf_event *hwc = &event->hw;
+       struct i915_pmu *pmu = &i915->pmu;
        u64 prev, new;
  
+       if (pmu->closed) {
+               event->hw.state = PERF_HES_STOPPED;
+               return;
+       }
  again:
        prev = local64_read(&hwc->prev_count);
        new = __i915_pmu_event_read(event);
@@@ -724,6 -740,13 +740,13 @@@ static void i915_pmu_disable(struct per
  
  static void i915_pmu_event_start(struct perf_event *event, int flags)
  {
+       struct drm_i915_private *i915 =
+               container_of(event->pmu, typeof(*i915), pmu.base);
+       struct i915_pmu *pmu = &i915->pmu;
+       if (pmu->closed)
+               return;
        i915_pmu_enable(event);
        event->hw.state = 0;
  }
@@@ -738,6 -761,13 +761,13 @@@ static void i915_pmu_event_stop(struct 
  
  static int i915_pmu_event_add(struct perf_event *event, int flags)
  {
+       struct drm_i915_private *i915 =
+               container_of(event->pmu, typeof(*i915), pmu.base);
+       struct i915_pmu *pmu = &i915->pmu;
+       if (pmu->closed)
+               return -ENODEV;
        if (flags & PERF_EF_START)
                i915_pmu_event_start(event, flags);
  
@@@ -1020,25 -1050,39 +1050,39 @@@ static int i915_pmu_cpu_online(unsigne
  static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
  {
        struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
-       unsigned int target;
+       unsigned int target = i915_pmu_target_cpu;
  
        GEM_BUG_ON(!pmu->base.event_init);
  
+       /*
+        * Unregistering an instance generates a CPU offline event which we must
+        * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
+        */
+       if (pmu->closed)
+               return 0;
        if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
                target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
                /* Migrate events if there is a valid target */
                if (target < nr_cpu_ids) {
                        cpumask_set_cpu(target, &i915_pmu_cpumask);
-                       perf_pmu_migrate_context(&pmu->base, cpu, target);
+                       i915_pmu_target_cpu = target;
                }
        }
  
+       if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
+               perf_pmu_migrate_context(&pmu->base, cpu, target);
+               pmu->cpuhp.cpu = target;
+       }
        return 0;
  }
  
- static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
+ static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
+ void i915_pmu_init(void)
  {
-       enum cpuhp_state slot;
        int ret;
  
        ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
                                      i915_pmu_cpu_online,
                                      i915_pmu_cpu_offline);
        if (ret < 0)
-               return ret;
+               pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
+                         ret);
+       else
+               cpuhp_slot = ret;
+ }
  
-       slot = ret;
-       ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node);
-       if (ret) {
-               cpuhp_remove_multi_state(slot);
-               return ret;
-       }
+ void i915_pmu_exit(void)
+ {
+       if (cpuhp_slot != CPUHP_INVALID)
+               cpuhp_remove_multi_state(cpuhp_slot);
+ }
  
-       pmu->cpuhp.slot = slot;
-       return 0;
+ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
+ {
+       if (cpuhp_slot == CPUHP_INVALID)
+               return -EINVAL;
+       return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node);
  }
  
  static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
  {
-       struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
-       drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID);
-       drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
-       cpuhp_remove_multi_state(pmu->cpuhp.slot);
-       pmu->cpuhp.slot = CPUHP_INVALID;
+       cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node);
  }
  
  static bool is_igp(struct drm_i915_private *i915)
@@@ -1100,7 -1146,7 +1146,7 @@@ void i915_pmu_register(struct drm_i915_
        spin_lock_init(&pmu->lock);
        hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        pmu->timer.function = i915_sample;
-       pmu->cpuhp.slot = CPUHP_INVALID;
+       pmu->cpuhp.cpu = -1;
  
        if (!is_igp(i915)) {
                pmu->name = kasprintf(GFP_KERNEL,
@@@ -1167,7 -1213,13 +1213,13 @@@ void i915_pmu_unregister(struct drm_i91
        if (!pmu->base.event_init)
                return;
  
-       drm_WARN_ON(&i915->drm, pmu->enable);
+       /*
+        * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
+        * ensures all currently executing ones will have exited before we
+        * proceed with unregistration.
+        */
+       pmu->closed = true;
+       synchronize_rcu();
  
        hrtimer_cancel(&pmu->timer);
  
index bb0656875697259f8509eebf2c7db4f5774cb022,6fb1746e21252ce9102090114897126c96afe725..c61302c69b11675ce9d951fa0ee00949ee400e6c
@@@ -242,8 -242,7 +242,8 @@@ static inline bool i915_mmio_reg_valid(
  #define _MMIO_PIPE3(pipe, a, b, c)    _MMIO(_PICK(pipe, a, b, c))
  #define _MMIO_PORT3(pipe, a, b, c)    _MMIO(_PICK(pipe, a, b, c))
  #define _MMIO_PHY3(phy, a, b, c)      _MMIO(_PHY3(phy, a, b, c))
 -#define _MMIO_PLL3(pll, a, b, c)      _MMIO(_PICK(pll, a, b, c))
 +#define _MMIO_PLL3(pll, ...)          _MMIO(_PICK(pll, __VA_ARGS__))
 +
  
  /*
   * Device info offset array based helpers for groups of registers with unevenly
  #define  DPIO_CMNRST                  (1 << 0)
  
  #define DPIO_PHY(pipe)                        ((pipe) >> 1)
 -#define DPIO_PHY_IOSF_PORT(phy)               (dev_priv->dpio_phy_iosf_port[phy])
  
  /*
   * Per pipe/PLL DPIO regs
  #define RING_PSMI_CTL(base)   _MMIO((base) + 0x50)
  #define RING_MAX_IDLE(base)   _MMIO((base) + 0x54)
  #define RING_HWS_PGA(base)    _MMIO((base) + 0x80)
 +#define RING_ID(base)         _MMIO((base) + 0x8c)
  #define RING_HWS_PGA_GEN6(base)       _MMIO((base) + 0x2080)
  #define RING_RESET_CTL(base)  _MMIO((base) + 0xd0)
  #define   RESET_CTL_CAT_ERROR    REG_BIT(2)
@@@ -4148,7 -4147,6 +4148,7 @@@ enum 
  
  #define GEN9_CLKGATE_DIS_3            _MMIO(0x46538)
  #define   TGL_VRH_GATING_DIS          REG_BIT(31)
 +#define   DPT_GATING_DIS              REG_BIT(22)
  
  #define GEN9_CLKGATE_DIS_4            _MMIO(0x4653C)
  #define   BXT_GMBUS_GATING_DIS                (1 << 14)
  #define  PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME     REG_BIT(2)
  #define  PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE     REG_BIT(1)
  
 +/* Icelake DSC Rate Control Range Parameter Registers */
 +#define DSCA_RC_RANGE_PARAMETERS_0            _MMIO(0x6B240)
 +#define DSCA_RC_RANGE_PARAMETERS_0_UDW                _MMIO(0x6B240 + 4)
 +#define DSCC_RC_RANGE_PARAMETERS_0            _MMIO(0x6BA40)
 +#define DSCC_RC_RANGE_PARAMETERS_0_UDW                _MMIO(0x6BA40 + 4)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB    (0x78208)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB        (0x78208 + 4)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB    (0x78308)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB        (0x78308 + 4)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC    (0x78408)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC        (0x78408 + 4)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC    (0x78508)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC        (0x78508 + 4)
 +#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
 +#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
 +#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
 +#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
 +#define RC_BPG_OFFSET_SHIFT                   10
 +#define RC_MAX_QP_SHIFT                               5
 +#define RC_MIN_QP_SHIFT                               0
 +
 +#define DSCA_RC_RANGE_PARAMETERS_1            _MMIO(0x6B248)
 +#define DSCA_RC_RANGE_PARAMETERS_1_UDW                _MMIO(0x6B248 + 4)
 +#define DSCC_RC_RANGE_PARAMETERS_1            _MMIO(0x6BA48)
 +#define DSCC_RC_RANGE_PARAMETERS_1_UDW                _MMIO(0x6BA48 + 4)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB    (0x78210)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB        (0x78210 + 4)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB    (0x78310)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB        (0x78310 + 4)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC    (0x78410)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC        (0x78410 + 4)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC    (0x78510)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC        (0x78510 + 4)
 +#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
 +#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
 +#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
 +#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
 +
 +#define DSCA_RC_RANGE_PARAMETERS_2            _MMIO(0x6B250)
 +#define DSCA_RC_RANGE_PARAMETERS_2_UDW                _MMIO(0x6B250 + 4)
 +#define DSCC_RC_RANGE_PARAMETERS_2            _MMIO(0x6BA50)
 +#define DSCC_RC_RANGE_PARAMETERS_2_UDW                _MMIO(0x6BA50 + 4)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB    (0x78218)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB        (0x78218 + 4)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB    (0x78318)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB        (0x78318 + 4)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC    (0x78418)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC        (0x78418 + 4)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC    (0x78518)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC        (0x78518 + 4)
 +#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
 +#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
 +#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
 +#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
 +
 +#define DSCA_RC_RANGE_PARAMETERS_3            _MMIO(0x6B258)
 +#define DSCA_RC_RANGE_PARAMETERS_3_UDW                _MMIO(0x6B258 + 4)
 +#define DSCC_RC_RANGE_PARAMETERS_3            _MMIO(0x6BA58)
 +#define DSCC_RC_RANGE_PARAMETERS_3_UDW                _MMIO(0x6BA58 + 4)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB    (0x78220)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB        (0x78220 + 4)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB    (0x78320)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB        (0x78320 + 4)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC    (0x78420)
 +#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC        (0x78420 + 4)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC    (0x78520)
 +#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC        (0x78520 + 4)
 +#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
 +#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
 +                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
 +#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
 +#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
 +                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
 +
  /* VGA port control */
  #define ADPA                  _MMIO(0x61100)
  #define PCH_ADPA                _MMIO(0xe1100)
        _MMIO(_PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe)))
  
  /* define the Watermark register on Ironlake */
 -#define WM0_PIPEA_ILK         _MMIO(0x45100)
 +#define _WM0_PIPEA_ILK                0x45100
 +#define _WM0_PIPEB_ILK                0x45104
 +#define _WM0_PIPEC_IVB                0x45200
 +#define WM0_PIPE_ILK(pipe)    _MMIO_PIPE3((pipe), _WM0_PIPEA_ILK, \
 +                                          _WM0_PIPEB_ILK, _WM0_PIPEC_IVB)
  #define  WM0_PIPE_PLANE_MASK  (0xffff << 16)
  #define  WM0_PIPE_PLANE_SHIFT 16
  #define  WM0_PIPE_SPRITE_MASK (0xff << 8)
  #define  WM0_PIPE_SPRITE_SHIFT        8
  #define  WM0_PIPE_CURSOR_MASK (0xff)
 -
 -#define WM0_PIPEB_ILK         _MMIO(0x45104)
 -#define WM0_PIPEC_IVB         _MMIO(0x45200)
  #define WM1_LP_ILK            _MMIO(0x45108)
  #define  WM1_LP_SR_EN         (1 << 31)
  #define  WM1_LP_LATENCY_SHIFT 24
  #define   PLANE_CTL_TILED_X                   (1 << 10)
  #define   PLANE_CTL_TILED_Y                   (4 << 10)
  #define   PLANE_CTL_TILED_YF                  (5 << 10)
 +#define   PLANE_CTL_ASYNC_FLIP                        (1 << 9)
  #define   PLANE_CTL_FLIP_HORIZONTAL           (1 << 8)
  #define   PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE        (1 << 4) /* TGL+ */
  #define   PLANE_CTL_ALPHA_MASK                        (0x3 << 4) /* Pre-GLK */
  #define PS_PLANE_SEL(plane) (((plane) + 1) << 25)
  #define PS_FILTER_MASK         (3 << 23)
  #define PS_FILTER_MEDIUM       (0 << 23)
 +#define PS_FILTER_PROGRAMMED   (1 << 23)
  #define PS_FILTER_EDGE_ENHANCE (2 << 23)
  #define PS_FILTER_BILINEAR     (3 << 23)
  #define PS_VERT3TAP            (1 << 21)
  #define PS_VADAPT_MODE_MOST_ADAPT  (3 << 5)
  #define PS_PLANE_Y_SEL_MASK  (7 << 5)
  #define PS_PLANE_Y_SEL(plane) (((plane) + 1) << 5)
 +#define PS_Y_VERT_FILTER_SELECT(set)   ((set) << 4)
 +#define PS_Y_HORZ_FILTER_SELECT(set)   ((set) << 3)
 +#define PS_UV_VERT_FILTER_SELECT(set)  ((set) << 2)
 +#define PS_UV_HORZ_FILTER_SELECT(set)  ((set) << 1)
  
  #define _PS_PWR_GATE_1A     0x68160
  #define _PS_PWR_GATE_2A     0x68260
  #define _PS_ECC_STAT_2B     0x68AD0
  #define _PS_ECC_STAT_1C     0x691D0
  
 +#define _PS_COEF_SET0_INDEX_1A           0x68198
 +#define _PS_COEF_SET0_INDEX_2A           0x68298
 +#define _PS_COEF_SET0_INDEX_1B           0x68998
 +#define _PS_COEF_SET0_INDEX_2B           0x68A98
 +#define PS_COEE_INDEX_AUTO_INC           (1 << 10)
 +
 +#define _PS_COEF_SET0_DATA_1A    0x6819C
 +#define _PS_COEF_SET0_DATA_2A    0x6829C
 +#define _PS_COEF_SET0_DATA_1B    0x6899C
 +#define _PS_COEF_SET0_DATA_2B    0x68A9C
 +
  #define _ID(id, a, b) _PICK_EVEN(id, a, b)
  #define SKL_PS_CTRL(pipe, id) _MMIO_PIPE(pipe,        \
                        _ID(id, _PS_1A_CTRL, _PS_2A_CTRL),       \
  #define SKL_PS_ECC_STAT(pipe, id)  _MMIO_PIPE(pipe,     \
                        _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A),   \
                        _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
 +#define CNL_PS_COEF_INDEX_SET(pipe, id, set)  _MMIO_PIPE(pipe,    \
 +                      _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
 +                      _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
  
 +#define CNL_PS_COEF_DATA_SET(pipe, id, set)  _MMIO_PIPE(pipe,     \
 +                      _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
 +                      _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
  /* legacy palette */
  #define _LGC_PALETTE_A           0x4a000
  #define _LGC_PALETTE_B           0x4a800
  #define BXT_CSR_DC3_DC5_COUNT _MMIO(0x80038)
  #define TGL_DMC_DEBUG_DC5_COUNT       _MMIO(0x101084)
  #define TGL_DMC_DEBUG_DC6_COUNT       _MMIO(0x101088)
 +#define DG1_DMC_DEBUG_DC5_COUNT       _MMIO(0x134154)
  
  #define DMC_DEBUG3            _MMIO(0x101090)
  
        (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \
         GEN11_PIPE_PLANE5_FAULT)
  
 +#define _HPD_PIN_DDI(hpd_pin) ((hpd_pin) - HPD_PORT_A)
 +#define _HPD_PIN_TC(hpd_pin)  ((hpd_pin) - HPD_PORT_TC1)
 +
  #define GEN8_DE_PORT_ISR _MMIO(0x44440)
  #define GEN8_DE_PORT_IMR _MMIO(0x44444)
  #define GEN8_DE_PORT_IIR _MMIO(0x44448)
  #define  GEN9_AUX_CHANNEL_B           (1 << 25)
  #define  DSI1_TE                      (1 << 24)
  #define  DSI0_TE                      (1 << 23)
 -#define  BXT_DE_PORT_HP_DDIC          (1 << 5)
 -#define  BXT_DE_PORT_HP_DDIB          (1 << 4)
 -#define  BXT_DE_PORT_HP_DDIA          (1 << 3)
 -#define  BXT_DE_PORT_HOTPLUG_MASK     (BXT_DE_PORT_HP_DDIA | \
 -                                       BXT_DE_PORT_HP_DDIB | \
 -                                       BXT_DE_PORT_HP_DDIC)
 -#define  GEN8_PORT_DP_A_HOTPLUG               (1 << 3)
 +#define  GEN8_DE_PORT_HOTPLUG(hpd_pin)        REG_BIT(3 + _HPD_PIN_DDI(hpd_pin))
 +#define  BXT_DE_PORT_HOTPLUG_MASK     (GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | \
 +                                       GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | \
 +                                       GEN8_DE_PORT_HOTPLUG(HPD_PORT_C))
 +#define  BDW_DE_PORT_HOTPLUG_MASK     GEN8_DE_PORT_HOTPLUG(HPD_PORT_A)
  #define  BXT_DE_PORT_GMBUS            (1 << 1)
  #define  GEN8_AUX_CHANNEL_A           (1 << 0)
  #define  TGL_DE_PORT_AUX_USBC6                (1 << 13)
  #define GEN11_DE_HPD_IMR              _MMIO(0x44474)
  #define GEN11_DE_HPD_IIR              _MMIO(0x44478)
  #define GEN11_DE_HPD_IER              _MMIO(0x4447c)
 -#define  GEN12_TC6_HOTPLUG                    (1 << 21)
 -#define  GEN12_TC5_HOTPLUG                    (1 << 20)
 -#define  GEN11_TC4_HOTPLUG                    (1 << 19)
 -#define  GEN11_TC3_HOTPLUG                    (1 << 18)
 -#define  GEN11_TC2_HOTPLUG                    (1 << 17)
 -#define  GEN11_TC1_HOTPLUG                    (1 << 16)
 -#define  GEN11_TC_HOTPLUG(tc_port)            (1 << ((tc_port) + 16))
 -#define  GEN11_DE_TC_HOTPLUG_MASK             (GEN12_TC6_HOTPLUG | \
 -                                               GEN12_TC5_HOTPLUG | \
 -                                               GEN11_TC4_HOTPLUG | \
 -                                               GEN11_TC3_HOTPLUG | \
 -                                               GEN11_TC2_HOTPLUG | \
 -                                               GEN11_TC1_HOTPLUG)
 -#define  GEN12_TBT6_HOTPLUG                   (1 << 5)
 -#define  GEN12_TBT5_HOTPLUG                   (1 << 4)
 -#define  GEN11_TBT4_HOTPLUG                   (1 << 3)
 -#define  GEN11_TBT3_HOTPLUG                   (1 << 2)
 -#define  GEN11_TBT2_HOTPLUG                   (1 << 1)
 -#define  GEN11_TBT1_HOTPLUG                   (1 << 0)
 -#define  GEN11_TBT_HOTPLUG(tc_port)           (1 << (tc_port))
 -#define  GEN11_DE_TBT_HOTPLUG_MASK            (GEN12_TBT6_HOTPLUG | \
 -                                               GEN12_TBT5_HOTPLUG | \
 -                                               GEN11_TBT4_HOTPLUG | \
 -                                               GEN11_TBT3_HOTPLUG | \
 -                                               GEN11_TBT2_HOTPLUG | \
 -                                               GEN11_TBT1_HOTPLUG)
 +#define  GEN11_TC_HOTPLUG(hpd_pin)            REG_BIT(16 + _HPD_PIN_TC(hpd_pin))
 +#define  GEN11_DE_TC_HOTPLUG_MASK             (GEN11_TC_HOTPLUG(HPD_PORT_TC6) | \
 +                                               GEN11_TC_HOTPLUG(HPD_PORT_TC5) | \
 +                                               GEN11_TC_HOTPLUG(HPD_PORT_TC4) | \
 +                                               GEN11_TC_HOTPLUG(HPD_PORT_TC3) | \
 +                                               GEN11_TC_HOTPLUG(HPD_PORT_TC2) | \
 +                                               GEN11_TC_HOTPLUG(HPD_PORT_TC1))
 +#define  GEN11_TBT_HOTPLUG(hpd_pin)           REG_BIT(_HPD_PIN_TC(hpd_pin))
 +#define  GEN11_DE_TBT_HOTPLUG_MASK            (GEN11_TBT_HOTPLUG(HPD_PORT_TC6) | \
 +                                               GEN11_TBT_HOTPLUG(HPD_PORT_TC5) | \
 +                                               GEN11_TBT_HOTPLUG(HPD_PORT_TC4) | \
 +                                               GEN11_TBT_HOTPLUG(HPD_PORT_TC3) | \
 +                                               GEN11_TBT_HOTPLUG(HPD_PORT_TC2) | \
 +                                               GEN11_TBT_HOTPLUG(HPD_PORT_TC1))
  
  #define GEN11_TBT_HOTPLUG_CTL                         _MMIO(0x44030)
  #define GEN11_TC_HOTPLUG_CTL                          _MMIO(0x44038)
 -#define  GEN11_HOTPLUG_CTL_ENABLE(tc_port)            (8 << (tc_port) * 4)
 -#define  GEN11_HOTPLUG_CTL_LONG_DETECT(tc_port)               (2 << (tc_port) * 4)
 -#define  GEN11_HOTPLUG_CTL_SHORT_DETECT(tc_port)      (1 << (tc_port) * 4)
 -#define  GEN11_HOTPLUG_CTL_NO_DETECT(tc_port)         (0 << (tc_port) * 4)
 +#define  GEN11_HOTPLUG_CTL_ENABLE(hpd_pin)            (8 << (_HPD_PIN_TC(hpd_pin) * 4))
 +#define  GEN11_HOTPLUG_CTL_LONG_DETECT(hpd_pin)               (2 << (_HPD_PIN_TC(hpd_pin) * 4))
 +#define  GEN11_HOTPLUG_CTL_SHORT_DETECT(hpd_pin)      (1 << (_HPD_PIN_TC(hpd_pin) * 4))
 +#define  GEN11_HOTPLUG_CTL_NO_DETECT(hpd_pin)         (0 << (_HPD_PIN_TC(hpd_pin) * 4))
  
  #define GEN11_GT_INTR_DW0             _MMIO(0x190018)
  #define  GEN11_CSME                   (31)
  # define CHICKEN3_DGMG_DONE_FIX_DISABLE               (1 << 2)
  
  #define CHICKEN_PAR1_1                        _MMIO(0x42080)
 +#define  KBL_ARB_FILL_SPARE_22                REG_BIT(22)
  #define  DIS_RAM_BYPASS_PSR2_MAN_TRACK        (1 << 16)
  #define  SKL_DE_COMPRESSED_HASH_MODE  (1 << 15)
  #define  DPA_MASK_VBLANK_SRD          (1 << 15)
  
  #define CHICKEN_MISC_2                _MMIO(0x42084)
  #define  CNL_COMP_PWR_DOWN    (1 << 23)
 +#define  KBL_ARB_FILL_SPARE_14        REG_BIT(14)
 +#define  KBL_ARB_FILL_SPARE_13        REG_BIT(13)
  #define  GLK_CL2_PWR_DOWN     (1 << 12)
  #define  GLK_CL1_PWR_DOWN     (1 << 11)
  #define  GLK_CL0_PWR_DOWN     (1 << 10)
  #define DISP_ARB_CTL2 _MMIO(0x45004)
  #define  DISP_DATA_PARTITION_5_6      (1 << 6)
  #define  DISP_IPC_ENABLE              (1 << 3)
 -#define _DBUF_CTL_S1                  0x45008
 -#define _DBUF_CTL_S2                  0x44FE8
 -#define DBUF_CTL_S(slice)             _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
 -#define  DBUF_POWER_REQUEST           (1 << 31)
 -#define  DBUF_POWER_STATE             (1 << 30)
 +
 +#define _DBUF_CTL_S1                          0x45008
 +#define _DBUF_CTL_S2                          0x44FE8
 +#define DBUF_CTL_S(slice)                     _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
 +#define  DBUF_POWER_REQUEST                   REG_BIT(31)
 +#define  DBUF_POWER_STATE                     REG_BIT(30)
 +#define  DBUF_TRACKER_STATE_SERVICE_MASK      REG_GENMASK(23, 19)
 +#define  DBUF_TRACKER_STATE_SERVICE(x)                REG_FIELD_PREP(DBUF_TRACKER_STATE_SERVICE_MASK, x)
 +
  #define GEN7_MSG_CTL  _MMIO(0x45010)
  #define  WAIT_FOR_PCH_RESET_ACK               (1 << 1)
  #define  WAIT_FOR_PCH_FLR_ACK         (1 << 0)
  #define GEN8_L3CNTLREG        _MMIO(0x7034)
    #define GEN8_ERRDETBCTRL (1 << 9)
  
 -#define GEN11_COMMON_SLICE_CHICKEN3           _MMIO(0x7304)
 -  #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC  (1 << 11)
 -  #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE  (1 << 9)
 +#define GEN11_COMMON_SLICE_CHICKEN3                   _MMIO(0x7304)
 +  #define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN    REG_BIT(12)
 +  #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC          REG_BIT(11)
 +  #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE          REG_BIT(9)
  
  #define HIZ_CHICKEN                                   _MMIO(0x7018)
 -# define CHV_HZ_8X8_MODE_IN_1X                                (1 << 15)
 -# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE  (1 << 3)
 +# define CHV_HZ_8X8_MODE_IN_1X                                REG_BIT(15)
 +# define DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE   REG_BIT(14)
 +# define BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE  REG_BIT(3)
  
  #define GEN9_SLICE_COMMON_ECO_CHICKEN0                _MMIO(0x7308)
  #define  DISABLE_PIXEL_MASK_CAMMING           (1 << 14)
  
  /* south display engine interrupt: ICP/TGP */
  #define SDE_GMBUS_ICP                 (1 << 23)
 -#define SDE_TC_HOTPLUG_ICP(tc_port)   (1 << ((tc_port) + 24))
 -#define SDE_DDI_HOTPLUG_ICP(port)     (1 << ((port) + 16))
 -#define SDE_DDI_MASK_ICP              (SDE_DDI_HOTPLUG_ICP(PORT_B) | \
 -                                       SDE_DDI_HOTPLUG_ICP(PORT_A))
 -#define SDE_TC_MASK_ICP                       (SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
 -                                       SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
 -                                       SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
 -                                       SDE_TC_HOTPLUG_ICP(PORT_TC1))
 -#define SDE_DDI_MASK_TGP              (SDE_DDI_HOTPLUG_ICP(PORT_C) | \
 -                                       SDE_DDI_HOTPLUG_ICP(PORT_B) | \
 -                                       SDE_DDI_HOTPLUG_ICP(PORT_A))
 -#define SDE_TC_MASK_TGP                       (SDE_TC_HOTPLUG_ICP(PORT_TC6) | \
 -                                       SDE_TC_HOTPLUG_ICP(PORT_TC5) | \
 -                                       SDE_TC_HOTPLUG_ICP(PORT_TC4) | \
 -                                       SDE_TC_HOTPLUG_ICP(PORT_TC3) | \
 -                                       SDE_TC_HOTPLUG_ICP(PORT_TC2) | \
 -                                       SDE_TC_HOTPLUG_ICP(PORT_TC1))
 +#define SDE_TC_HOTPLUG_ICP(hpd_pin)   REG_BIT(24 + _HPD_PIN_TC(hpd_pin))
 +#define SDE_DDI_HOTPLUG_ICP(hpd_pin)  REG_BIT(16 + _HPD_PIN_DDI(hpd_pin))
 +#define SDE_DDI_HOTPLUG_MASK_ICP      (SDE_DDI_HOTPLUG_ICP(HPD_PORT_D) | \
 +                                       SDE_DDI_HOTPLUG_ICP(HPD_PORT_C) | \
 +                                       SDE_DDI_HOTPLUG_ICP(HPD_PORT_B) | \
 +                                       SDE_DDI_HOTPLUG_ICP(HPD_PORT_A))
 +#define SDE_TC_HOTPLUG_MASK_ICP               (SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6) | \
 +                                       SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5) | \
 +                                       SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4) | \
 +                                       SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3) | \
 +                                       SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
 +                                       SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
  
  #define SDEISR  _MMIO(0xc4000)
  #define SDEIMR  _MMIO(0xc4004)
   */
  
  #define SHOTPLUG_CTL_DDI                              _MMIO(0xc4030)
 -#define   SHOTPLUG_CTL_DDI_HPD_ENABLE(port)           (0x8 << (4 * (port)))
 -#define   SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(port)      (0x3 << (4 * (port)))
 -#define   SHOTPLUG_CTL_DDI_HPD_NO_DETECT(port)                (0x0 << (4 * (port)))
 -#define   SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(port)     (0x1 << (4 * (port)))
 -#define   SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(port)      (0x2 << (4 * (port)))
 -#define   SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(port)        (0x3 << (4 * (port)))
 +#define   SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin)                        (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
 +#define   SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin)           (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
 +#define   SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin)             (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
 +#define   SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin)          (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
 +#define   SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(hpd_pin)           (0x2 << (_HPD_PIN_DDI(hpd_pin) * 4))
 +#define   SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(hpd_pin)     (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
  
  #define SHOTPLUG_CTL_TC                               _MMIO(0xc4034)
 -#define   ICP_TC_HPD_ENABLE(tc_port)          (8 << (tc_port) * 4)
 +#define   ICP_TC_HPD_ENABLE(hpd_pin)          (8 << (_HPD_PIN_TC(hpd_pin) * 4))
 +#define   ICP_TC_HPD_LONG_DETECT(hpd_pin)     (2 << (_HPD_PIN_TC(hpd_pin) * 4))
 +#define   ICP_TC_HPD_SHORT_DETECT(hpd_pin)    (1 << (_HPD_PIN_TC(hpd_pin) * 4))
  
  #define SHPD_FILTER_CNT                               _MMIO(0xc4038)
  #define   SHPD_FILTER_CNT_500_ADJ             0x001D9
  
 -/* Icelake DSC Rate Control Range Parameter Registers */
 -#define DSCA_RC_RANGE_PARAMETERS_0            _MMIO(0x6B240)
 -#define DSCA_RC_RANGE_PARAMETERS_0_UDW                _MMIO(0x6B240 + 4)
 -#define DSCC_RC_RANGE_PARAMETERS_0            _MMIO(0x6BA40)
 -#define DSCC_RC_RANGE_PARAMETERS_0_UDW                _MMIO(0x6BA40 + 4)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB    (0x78208)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB        (0x78208 + 4)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB    (0x78308)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB        (0x78308 + 4)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC    (0x78408)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC        (0x78408 + 4)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC    (0x78508)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC        (0x78508 + 4)
 -#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
 -#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
 -#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
 -#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
 -#define RC_BPG_OFFSET_SHIFT                   10
 -#define RC_MAX_QP_SHIFT                               5
 -#define RC_MIN_QP_SHIFT                               0
 -
 -#define DSCA_RC_RANGE_PARAMETERS_1            _MMIO(0x6B248)
 -#define DSCA_RC_RANGE_PARAMETERS_1_UDW                _MMIO(0x6B248 + 4)
 -#define DSCC_RC_RANGE_PARAMETERS_1            _MMIO(0x6BA48)
 -#define DSCC_RC_RANGE_PARAMETERS_1_UDW                _MMIO(0x6BA48 + 4)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB    (0x78210)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB        (0x78210 + 4)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB    (0x78310)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB        (0x78310 + 4)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC    (0x78410)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC        (0x78410 + 4)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC    (0x78510)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC        (0x78510 + 4)
 -#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
 -#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
 -#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
 -#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
 -
 -#define DSCA_RC_RANGE_PARAMETERS_2            _MMIO(0x6B250)
 -#define DSCA_RC_RANGE_PARAMETERS_2_UDW                _MMIO(0x6B250 + 4)
 -#define DSCC_RC_RANGE_PARAMETERS_2            _MMIO(0x6BA50)
 -#define DSCC_RC_RANGE_PARAMETERS_2_UDW                _MMIO(0x6BA50 + 4)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB    (0x78218)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB        (0x78218 + 4)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB    (0x78318)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB        (0x78318 + 4)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC    (0x78418)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC        (0x78418 + 4)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC    (0x78518)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC        (0x78518 + 4)
 -#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
 -#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
 -#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
 -#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
 -
 -#define DSCA_RC_RANGE_PARAMETERS_3            _MMIO(0x6B258)
 -#define DSCA_RC_RANGE_PARAMETERS_3_UDW                _MMIO(0x6B258 + 4)
 -#define DSCC_RC_RANGE_PARAMETERS_3            _MMIO(0x6BA58)
 -#define DSCC_RC_RANGE_PARAMETERS_3_UDW                _MMIO(0x6BA58 + 4)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB    (0x78220)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB        (0x78220 + 4)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB    (0x78320)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB        (0x78320 + 4)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC    (0x78420)
 -#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC        (0x78420 + 4)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC    (0x78520)
 -#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC        (0x78520 + 4)
 -#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
 -#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
 -                                                      _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
 -#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe)          _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
 -#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe)      _MMIO_PIPE((pipe) - PIPE_B, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
 -                                                      _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
 -
 -#define   ICP_TC_HPD_LONG_DETECT(tc_port)     (2 << (tc_port) * 4)
 -#define   ICP_TC_HPD_SHORT_DETECT(tc_port)    (1 << (tc_port) * 4)
 -
 -#define ICP_DDI_HPD_ENABLE_MASK               (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
 -                                       SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
 -#define ICP_TC_HPD_ENABLE_MASK                (ICP_TC_HPD_ENABLE(PORT_TC4) | \
 -                                       ICP_TC_HPD_ENABLE(PORT_TC3) | \
 -                                       ICP_TC_HPD_ENABLE(PORT_TC2) | \
 -                                       ICP_TC_HPD_ENABLE(PORT_TC1))
 -#define TGP_DDI_HPD_ENABLE_MASK               (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_C) | \
 -                                       SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \
 -                                       SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A))
 -#define TGP_TC_HPD_ENABLE_MASK                (ICP_TC_HPD_ENABLE(PORT_TC6) | \
 -                                       ICP_TC_HPD_ENABLE(PORT_TC5) | \
 -                                       ICP_TC_HPD_ENABLE_MASK)
 -
  #define _PCH_DPLL_A              0xc6014
  #define _PCH_DPLL_B              0xc6018
  #define PCH_DPLL(pll) _MMIO((pll) == 0 ? _PCH_DPLL_A : _PCH_DPLL_B)
  #define SOUTH_CHICKEN1                _MMIO(0xc2000)
  #define  FDIA_PHASE_SYNC_SHIFT_OVR    19
  #define  FDIA_PHASE_SYNC_SHIFT_EN     18
 +#define  INVERT_DDID_HPD                      (1 << 18)
 +#define  INVERT_DDIC_HPD                      (1 << 17)
 +#define  INVERT_DDIB_HPD                      (1 << 16)
 +#define  INVERT_DDIA_HPD                      (1 << 15)
  #define  FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
  #define  FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
  #define  FDI_BC_BIFURCATION_SELECT    (1 << 12)
  #define  FORCEWAKE_MEDIA_VDBOX_GEN11(n)               _MMIO(0xa540 + (n) * 4)
  #define  FORCEWAKE_MEDIA_VEBOX_GEN11(n)               _MMIO(0xa560 + (n) * 4)
  #define  FORCEWAKE_RENDER_GEN9                        _MMIO(0xa278)
 -#define  FORCEWAKE_BLITTER_GEN9                       _MMIO(0xa188)
 +#define  FORCEWAKE_GT_GEN9                    _MMIO(0xa188)
  #define  FORCEWAKE_ACK_MEDIA_GEN9             _MMIO(0x0D88)
  #define  FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(n)   _MMIO(0x0D50 + (n) * 4)
  #define  FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(n)   _MMIO(0x0D70 + (n) * 4)
  #define  FORCEWAKE_ACK_RENDER_GEN9            _MMIO(0x0D84)
 -#define  FORCEWAKE_ACK_BLITTER_GEN9           _MMIO(0x130044)
 +#define  FORCEWAKE_ACK_GT_GEN9                        _MMIO(0x130044)
  #define   FORCEWAKE_KERNEL                    BIT(0)
  #define   FORCEWAKE_USER                      BIT(1)
  #define   FORCEWAKE_KERNEL_FALLBACK           BIT(15)
  #define   GEN9_PWRGT_MEDIA_STATUS_MASK                (1 << 0)
  #define   GEN9_PWRGT_RENDER_STATUS_MASK               (1 << 1)
  
- #define POWERGATE_ENABLE                      _MMIO(0xa210)
- #define    VDN_HCP_POWERGATE_ENABLE(n)                BIT(((n) * 2) + 3)
- #define    VDN_MFX_POWERGATE_ENABLE(n)                BIT(((n) * 2) + 4)
  #define  GTFIFODBG                            _MMIO(0x120000)
  #define    GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV  (0x1f << 20)
  #define    GT_FIFO_FREE_ENTRIES_CHV           (0x7f << 13)
  #define GEN9_MEDIA_PG_IDLE_HYSTERESIS         _MMIO(0xA0C4)
  #define GEN9_RENDER_PG_IDLE_HYSTERESIS                _MMIO(0xA0C8)
  #define GEN9_PG_ENABLE                                _MMIO(0xA210)
- #define GEN9_RENDER_PG_ENABLE                 REG_BIT(0)
- #define GEN9_MEDIA_PG_ENABLE                  REG_BIT(1)
- #define GEN11_MEDIA_SAMPLER_PG_ENABLE         REG_BIT(2)
+ #define   GEN9_RENDER_PG_ENABLE                       REG_BIT(0)
+ #define   GEN9_MEDIA_PG_ENABLE                        REG_BIT(1)
+ #define   GEN11_MEDIA_SAMPLER_PG_ENABLE               REG_BIT(2)
+ #define   VDN_HCP_POWERGATE_ENABLE(n)         REG_BIT(3 + 2 * (n))
+ #define   VDN_MFX_POWERGATE_ENABLE(n)         REG_BIT(4 + 2 * (n))
  #define GEN8_PUSHBUS_CONTROL                  _MMIO(0xA248)
  #define GEN8_PUSHBUS_ENABLE                   _MMIO(0xA250)
  #define GEN8_PUSHBUS_SHIFT                    _MMIO(0xA25C)
  #define     GEN9_SAGV_DISABLE                 0x0
  #define     GEN9_SAGV_IS_DISABLED             0x1
  #define     GEN9_SAGV_ENABLE                  0x3
 +#define   DG1_PCODE_STATUS                    0x7E
 +#define     DG1_UNCORE_GET_INIT_STATUS                0x0
 +#define     DG1_UNCORE_INIT_STATUS_COMPLETE   0x1
  #define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US   0x23
  #define GEN6_PCODE_DATA                               _MMIO(0x138128)
  #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT      8
  #define   GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC     (1 << 7)
  
  #define GEN10_SAMPLER_MODE            _MMIO(0xE18C)
 +#define   ENABLE_SMALLPL                      REG_BIT(15)
  #define   GEN11_SAMPLER_ENABLE_HEADLESS_MSG   REG_BIT(5)
  
  /* IVYBRIDGE DPF */
@@@ -10283,7 -10267,6 +10281,7 @@@ enum skl_power_gate 
  #define  DPLL_CFGCR2_PDIV_2 (1 << 2)
  #define  DPLL_CFGCR2_PDIV_3 (2 << 2)
  #define  DPLL_CFGCR2_PDIV_7 (4 << 2)
 +#define  DPLL_CFGCR2_PDIV_7_INVALID   (5 << 2)
  #define  DPLL_CFGCR2_CENTRAL_FREQ_MASK        (3)
  
  #define DPLL_CFGCR1(id)       _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
  #define ICL_DPCLKA_CFGCR0                     _MMIO(0x164280)
  #define  ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)   (1 << _PICK(phy, 10, 11, 24))
  #define  RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)   REG_BIT((phy) + 10)
 -#define  ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)        (1 << ((tc_port) < PORT_TC4 ? \
 +#define  ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port)        (1 << ((tc_port) < TC_PORT_4 ? \
                                                       (tc_port) + 12 : \
 -                                                     (tc_port) - PORT_TC4 + 21))
 +                                                     (tc_port) - TC_PORT_4 + 21))
  #define  ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy)     ((phy) * 2)
  #define  ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy)      (3 << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
  #define  ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll, phy)      ((pll) << ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy))
  #define MG_PLL_ENABLE(tc_port)        _MMIO_PORT((tc_port), _MG_PLL1_ENABLE, \
                                           _MG_PLL2_ENABLE)
  
 +/* DG1 PLL */
 +#define DG1_DPLL_ENABLE(pll)    _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
 +                                         _MG_PLL1_ENABLE, _MG_PLL2_ENABLE)
 +
  #define _MG_REFCLKIN_CTL_PORT1                                0x16892C
  #define _MG_REFCLKIN_CTL_PORT2                                0x16992C
  #define _MG_REFCLKIN_CTL_PORT3                                0x16A92C
  #define RKL_DPLL_CFGCR1(pll)          _MMIO_PLL(pll, _TGL_DPLL0_CFGCR1, \
                                                  _TGL_DPLL1_CFGCR1)
  
 +#define _DG1_DPLL2_CFGCR0             0x16C284
 +#define _DG1_DPLL3_CFGCR0             0x16C28C
 +#define DG1_DPLL_CFGCR0(pll)          _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR0, \
 +                                                 _TGL_DPLL1_CFGCR0, \
 +                                                 _DG1_DPLL2_CFGCR0, \
 +                                                 _DG1_DPLL3_CFGCR0)
 +
 +#define _DG1_DPLL2_CFGCR1               0x16C288
 +#define _DG1_DPLL3_CFGCR1               0x16C290
 +#define DG1_DPLL_CFGCR1(pll)            _MMIO_PLL3(pll, _TGL_DPLL0_CFGCR1, \
 +                                                 _TGL_DPLL1_CFGCR1, \
 +                                                 _DG1_DPLL2_CFGCR1, \
 +                                                 _DG1_DPLL3_CFGCR1)
 +
  #define _DKL_PHY1_BASE                        0x168000
  #define _DKL_PHY2_BASE                        0x169000
  #define _DKL_PHY3_BASE                        0x16A000
  #define _CGM_PIPE_A_CSC_COEFF67       (VLV_DISPLAY_BASE + 0x6790C)
  #define _CGM_PIPE_A_CSC_COEFF8        (VLV_DISPLAY_BASE + 0x67910)
  #define _CGM_PIPE_A_DEGAMMA   (VLV_DISPLAY_BASE + 0x66000)
 +#define   CGM_PIPE_DEGAMMA_RED_MASK   REG_GENMASK(13, 0)
 +#define   CGM_PIPE_DEGAMMA_GREEN_MASK REG_GENMASK(29, 16)
 +#define   CGM_PIPE_DEGAMMA_BLUE_MASK  REG_GENMASK(13, 0)
  #define _CGM_PIPE_A_GAMMA     (VLV_DISPLAY_BASE + 0x67000)
 +#define   CGM_PIPE_GAMMA_RED_MASK     REG_GENMASK(9, 0)
 +#define   CGM_PIPE_GAMMA_GREEN_MASK   REG_GENMASK(25, 16)
 +#define   CGM_PIPE_GAMMA_BLUE_MASK    REG_GENMASK(9, 0)
  #define _CGM_PIPE_A_MODE      (VLV_DISPLAY_BASE + 0x67A00)
  #define   CGM_PIPE_MODE_GAMMA (1 << 2)
  #define   CGM_PIPE_MODE_CSC   (1 << 1)
  #define   CGM_PIPE_MODE_DEGAMMA       (1 << 0)
 -#define   CGM_PIPE_GAMMA_RED_MASK   REG_GENMASK(9, 0)
 -#define   CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16)
 -#define   CGM_PIPE_GAMMA_BLUE_MASK  REG_GENMASK(9, 0)
  
  #define _CGM_PIPE_B_CSC_COEFF01       (VLV_DISPLAY_BASE + 0x69900)
  #define _CGM_PIPE_B_CSC_COEFF23       (VLV_DISPLAY_BASE + 0x69904)
index 883dd8d09d6bf2ba5dc274f0bf1c4fa1537b533c,102d8d7007b6b5b38e7a704b8f319917b7e17e48..9cb26a2240348b22927f38393afa7a088291a9c2
@@@ -27,13 -27,17 +27,17 @@@ static __always_inline struct sgt_iter 
  } __sgt_iter(struct scatterlist *sgl, bool dma) {
        struct sgt_iter s = { .sgp = sgl };
  
-       if (s.sgp) {
+       if (dma && s.sgp && sg_dma_len(s.sgp) == 0) {
+               s.sgp = NULL;
+       } else if (s.sgp) {
                s.max = s.curr = s.sgp->offset;
-               s.max += s.sgp->length;
-               if (dma)
+               if (dma) {
                        s.dma = sg_dma_address(s.sgp);
-               else
+                       s.max += sg_dma_len(s.sgp);
+               } else {
                        s.pfn = page_to_pfn(sg_page(s.sgp));
+                       s.max += s.sgp->length;
+               }
        }
  
        return s;
@@@ -44,6 -48,11 +48,11 @@@ static inline int __sg_page_count(cons
        return sg->length >> PAGE_SHIFT;
  }
  
+ static inline int __sg_dma_page_count(const struct scatterlist *sg)
+ {
+       return sg_dma_len(sg) >> PAGE_SHIFT;
+ }
  static inline struct scatterlist *____sg_next(struct scatterlist *sg)
  {
        ++sg;
@@@ -112,7 -121,7 +121,7 @@@ static inline unsigned int i915_sg_segm
        unsigned int size = swiotlb_max_segment();
  
        if (size == 0)
 -              return SCATTERLIST_MAX_SEGMENT;
 +              size = UINT_MAX;
  
        size = rounddown(size, PAGE_SIZE);
        /* swiotlb_max_segment_size can return 1 byte when it means one page. */
index f54375b11964acbd716ff320159c38d0f23c87c5,2000d3991d12c54d783484beb73d72d71f9e3692..bbec56f978323ac56e545e3132770a22b187c58b
@@@ -3573,11 -3573,11 +3573,11 @@@ static void ilk_write_wm_values(struct 
        _ilk_disable_lp_wm(dev_priv, dirty);
  
        if (dirty & WM_DIRTY_PIPE(PIPE_A))
 -              I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
 +              I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]);
        if (dirty & WM_DIRTY_PIPE(PIPE_B))
 -              I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
 +              I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]);
        if (dirty & WM_DIRTY_PIPE(PIPE_C))
 -              I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
 +              I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]);
  
        if (dirty & WM_DIRTY_DDB) {
                if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
@@@ -3706,7 -3706,7 +3706,7 @@@ skl_setup_sagv_block_time(struct drm_i9
   *  - All planes can enable watermarks for latencies >= SAGV engine block time
   *  - We're not using an interlaced display configuration
   */
 -int
 +static int
  intel_enable_sagv(struct drm_i915_private *dev_priv)
  {
        int ret;
        return 0;
  }
  
 -int
 +static int
  intel_disable_sagv(struct drm_i915_private *dev_priv)
  {
        int ret;
@@@ -6287,8 -6287,13 +6287,8 @@@ static void ilk_pipe_wm_get_hw_state(st
        struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
        struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal;
        enum pipe pipe = crtc->pipe;
 -      static const i915_reg_t wm0_pipe_reg[] = {
 -              [PIPE_A] = WM0_PIPEA_ILK,
 -              [PIPE_B] = WM0_PIPEB_ILK,
 -              [PIPE_C] = WM0_PIPEC_IVB,
 -      };
  
 -      hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
 +      hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe));
  
        memset(active, 0, sizeof(*active));
  
@@@ -7111,28 -7116,14 +7111,14 @@@ static void icl_init_clock_gating(struc
                         0, CNL_DELAY_PMRSP);
  }
  
- static void gen12_init_clock_gating(struct drm_i915_private *i915)
- {
-       unsigned int i;
-       /* This is not a WA. Enable VD HCP & MFX_ENC powergate */
-       for (i = 0; i < I915_MAX_VCS; i++)
-               if (HAS_ENGINE(&i915->gt, _VCS(i)))
-                       intel_uncore_rmw(&i915->uncore, POWERGATE_ENABLE, 0,
-                                        VDN_HCP_POWERGATE_ENABLE(i) |
-                                        VDN_MFX_POWERGATE_ENABLE(i));
- }
  static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
  {
-       gen12_init_clock_gating(dev_priv);
        /* Wa_1409120013:tgl */
        I915_WRITE(ILK_DPFC_CHICKEN,
                   ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
  
        /* Wa_1409825376:tgl (pre-prod)*/
 -      if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
 +      if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1))
                I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
                           TGL_VRH_GATING_DIS);
  
                         0, DFR_DISABLE);
  }
  
-       gen12_init_clock_gating(dev_priv);
 +static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
 +{
 +      /* Wa_1409836686:dg1[a0] */
 +      if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0))
 +              I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
 +                         DPT_GATING_DIS);
 +}
 +
  static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
  {
        if (!HAS_PCH_CNP(dev_priv))
@@@ -7203,10 -7184,6 +7187,10 @@@ static void cfl_init_clock_gating(struc
        cnp_init_clock_gating(dev_priv);
        gen9_init_clock_gating(dev_priv);
  
 +      /* WAC6entrylatency:cfl */
 +      I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
 +                 FBC_LLC_FULLY_OPEN);
 +
        /*
         * WaFbcTurnOffFbcWatermark:cfl
         * Display WA #0562: cfl
@@@ -7226,10 -7203,6 +7210,10 @@@ static void kbl_init_clock_gating(struc
  {
        gen9_init_clock_gating(dev_priv);
  
 +      /* WAC6entrylatency:kbl */
 +      I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
 +                 FBC_LLC_FULLY_OPEN);
 +
        /* WaDisableSDEUnitClockGating:kbl */
        if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
                I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
@@@ -7604,9 -7577,7 +7588,9 @@@ static void nop_init_clock_gating(struc
   */
  void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
  {
 -      if (IS_GEN(dev_priv, 12))
 +      if (IS_DG1(dev_priv))
 +              dev_priv->display.init_clock_gating = dg1_init_clock_gating;
 +      else if (IS_GEN(dev_priv, 12))
                dev_priv->display.init_clock_gating = tgl_init_clock_gating;
        else if (IS_GEN(dev_priv, 11))
                dev_priv->display.init_clock_gating = icl_init_clock_gating;
This page took 0.202575 seconds and 4 git commands to generate.