]> Git Repo - linux.git/commitdiff
Merge drm/drm-next into drm-misc-next
authorMaxime Ripard <[email protected]>
Mon, 25 Oct 2021 13:27:56 +0000 (15:27 +0200)
committerMaxime Ripard <[email protected]>
Mon, 25 Oct 2021 13:27:56 +0000 (15:27 +0200)
drm-misc-next hasn't been updated in a while and I need a post -rc2
state to merge some vc4 patches.

Signed-off-by: Maxime Ripard <[email protected]>
1  2 
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/drm_dp_helper.c
drivers/gpu/drm/drm_modeset_lock.c
drivers/gpu/drm/i915/i915_request.c
include/drm/drm_connector.h
include/drm/drm_dp_helper.h
include/linux/dma-resv.h

diff --combined MAINTAINERS
index 100d7f93a15bf6154ee8aa6927d4731740c9a232,fbfeca76f2af49a70b4c4afced064e2b1b901edb..00e372b9ffdbfb2a6d1244e0823309b226485f26
@@@ -977,12 -977,12 +977,12 @@@ L:      [email protected]
  S:    Maintained
  F:    drivers/platform/x86/amd-pmc.*
  
- AMD POWERPLAY
+ AMD POWERPLAY AND SWSMU
  M:    Evan Quan <[email protected]>
  L:    [email protected]
  S:    Supported
  T:    git https://gitlab.freedesktop.org/agd5f/linux.git
- F:    drivers/gpu/drm/amd/pm/powerplay/
+ F:    drivers/gpu/drm/amd/pm/
  
  AMD PTDMA DRIVER
  M:    Sanjay R Mehta <[email protected]>
@@@ -6422,14 -6422,6 +6422,14 @@@ F:    drivers/gpu/drm/drm_panel.
  F:    drivers/gpu/drm/panel/
  F:    include/drm/drm_panel.h
  
 +DRM PRIVACY-SCREEN CLASS
 +M:    Hans de Goede <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +F:    drivers/gpu/drm/drm_privacy_screen*
 +F:    include/drm/drm_privacy_screen*
 +
  DRM TTM SUBSYSTEM
  M:    Christian Koenig <[email protected]>
  M:    Huang Rui <[email protected]>
@@@ -14364,7 -14356,8 +14364,8 @@@ F:   Documentation/devicetree/bindings/pc
  F:    drivers/pci/controller/pci-ixp4xx.c
  
  PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
- M:    Jonathan Derrick <[email protected]>
+ M:    Nirmal Patel <[email protected]>
+ R:    Jonathan Derrick <[email protected]>
  L:    [email protected]
  S:    Supported
  F:    drivers/pci/controller/vmd.c
index 722e3c9e88824d9a9339bd75453d612af41e82c5,19d4d8b544906f50f11d06fa209f5345998902ba..30b7dde496fc644a3fceeda4488164ee32424052
@@@ -515,6 -515,15 +515,15 @@@ static int amdgpu_bo_move(struct ttm_bu
                goto out;
        }
  
+       if (bo->type == ttm_bo_type_device &&
+           new_mem->mem_type == TTM_PL_VRAM &&
+           old_mem->mem_type != TTM_PL_VRAM) {
+               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
+                * accesses the BO after it's moved.
+                */
+               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+       }
        if (adev->mman.buffer_funcs_enabled) {
                if (((old_mem->mem_type == TTM_PL_SYSTEM &&
                      new_mem->mem_type == TTM_PL_VRAM) ||
                        return r;
        }
  
-       if (bo->type == ttm_bo_type_device &&
-           new_mem->mem_type == TTM_PL_VRAM &&
-           old_mem->mem_type != TTM_PL_VRAM) {
-               /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU
-                * accesses the BO after it's moved.
-                */
-               abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-       }
  out:
        /* update statistics */
        atomic64_add(bo->base.size, &adev->num_bytes_moved);
@@@ -1345,9 -1345,10 +1345,9 @@@ static bool amdgpu_ttm_bo_eviction_valu
                                            const struct ttm_place *place)
  {
        unsigned long num_pages = bo->resource->num_pages;
 +      struct dma_resv_iter resv_cursor;
        struct amdgpu_res_cursor cursor;
 -      struct dma_resv_list *flist;
        struct dma_fence *f;
 -      int i;
  
        /* Swapout? */
        if (bo->resource->mem_type == TTM_PL_SYSTEM)
         * If true, then return false as any KFD process needs all its BOs to
         * be resident to run successfully
         */
 -      flist = dma_resv_shared_list(bo->base.resv);
 -      if (flist) {
 -              for (i = 0; i < flist->shared_count; ++i) {
 -                      f = rcu_dereference_protected(flist->shared[i],
 -                              dma_resv_held(bo->base.resv));
 -                      if (amdkfd_fence_check_mm(f, current->mm))
 -                              return false;
 -              }
 +      dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
 +              if (amdkfd_fence_check_mm(f, current->mm))
 +                      return false;
        }
  
        switch (bo->resource->mem_type) {
@@@ -2043,6 -2049,36 +2043,36 @@@ error_free
        return r;
  }
  
+ /**
+  * amdgpu_ttm_evict_resources - evict memory buffers
+  * @adev: amdgpu device object
+  * @mem_type: evicted BO's memory type
+  *
+  * Evicts all @mem_type buffers on the lru list of the memory type.
+  *
+  * Returns:
+  * 0 for success or a negative error code on failure.
+  */
+ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
+ {
+       struct ttm_resource_manager *man;
+       switch (mem_type) {
+       case TTM_PL_VRAM:
+       case TTM_PL_TT:
+       case AMDGPU_PL_GWS:
+       case AMDGPU_PL_GDS:
+       case AMDGPU_PL_OA:
+               man = ttm_manager_type(&adev->mman.bdev, mem_type);
+               break;
+       default:
+               DRM_ERROR("Trying to evict invalid memory type\n");
+               return -EINVAL;
+       }
+       return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+ }
  #if defined(CONFIG_DEBUG_FS)
  
  static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
index e42dd79ed6f45e416454100fa991c137ec077858,0e7dc23f78e7ff767ece9a031c39a0fe39e54a95..a96ae4c0e040df6ae1b80204d8020cd5a95c13cc
@@@ -800,7 -800,7 +800,7 @@@ static int amdgpu_vm_clear_bo(struct am
        struct amdgpu_bo *bo = &vmbo->bo;
        unsigned entries, ats_entries;
        uint64_t addr;
-       int r;
+       int r, idx;
  
        /* Figure out our place in the hierarchy */
        if (ancestor->parent) {
                        return r;
        }
  
+       if (!drm_dev_enter(adev_to_drm(adev), &idx))
+               return -ENODEV;
        r = vm->update_funcs->map_table(vmbo);
        if (r)
-               return r;
+               goto exit;
  
        memset(&params, 0, sizeof(params));
        params.adev = adev;
  
        r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
        if (r)
-               return r;
+               goto exit;
  
        addr = 0;
        if (ats_entries) {
                r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
                                             value, flags);
                if (r)
-                       return r;
+                       goto exit;
  
                addr += ats_entries * 8;
        }
                r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
                                             value, flags);
                if (r)
-                       return r;
+                       goto exit;
        }
  
-       return vm->update_funcs->commit(&params, NULL);
+       r = vm->update_funcs->commit(&params, NULL);
+ exit:
+       drm_dev_exit(idx);
+       return r;
  }
  
  /**
@@@ -1384,11 -1390,14 +1390,14 @@@ int amdgpu_vm_update_pdes(struct amdgpu
                          struct amdgpu_vm *vm, bool immediate)
  {
        struct amdgpu_vm_update_params params;
-       int r;
+       int r, idx;
  
        if (list_empty(&vm->relocated))
                return 0;
  
+       if (!drm_dev_enter(adev_to_drm(adev), &idx))
+               return -ENODEV;
        memset(&params, 0, sizeof(params));
        params.adev = adev;
        params.vm = vm;
  
        r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
        if (r)
-               return r;
+               goto exit;
  
        while (!list_empty(&vm->relocated)) {
                struct amdgpu_vm_bo_base *entry;
        r = vm->update_funcs->commit(&params, &vm->last_update);
        if (r)
                goto error;
+       drm_dev_exit(idx);
        return 0;
  
  error:
        amdgpu_vm_invalidate_pds(adev, vm);
+ exit:
+       drm_dev_exit(idx);
        return r;
  }
  
@@@ -1706,7 -1718,7 +1718,7 @@@ int amdgpu_vm_bo_update_mapping(struct 
        enum amdgpu_sync_mode sync_mode;
        int r, idx;
  
-       if (!drm_dev_enter(&adev->ddev, &idx))
+       if (!drm_dev_enter(adev_to_drm(adev), &idx))
                return -ENODEV;
  
        memset(&params, 0, sizeof(params));
@@@ -2090,14 -2102,30 +2102,14 @@@ static void amdgpu_vm_free_mapping(stru
  static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
  {
        struct dma_resv *resv = vm->root.bo->tbo.base.resv;
 -      struct dma_fence *excl, **shared;
 -      unsigned i, shared_count;
 -      int r;
 +      struct dma_resv_iter cursor;
 +      struct dma_fence *fence;
  
 -      r = dma_resv_get_fences(resv, &excl, &shared_count, &shared);
 -      if (r) {
 -              /* Not enough memory to grab the fence list, as last resort
 -               * block for all the fences to complete.
 -               */
 -              dma_resv_wait_timeout(resv, true, false,
 -                                                  MAX_SCHEDULE_TIMEOUT);
 -              return;
 -      }
 -
 -      /* Add a callback for each fence in the reservation object */
 -      amdgpu_vm_prt_get(adev);
 -      amdgpu_vm_add_prt_cb(adev, excl);
 -
 -      for (i = 0; i < shared_count; ++i) {
 +      dma_resv_for_each_fence(&cursor, resv, true, fence) {
 +              /* Add a callback for each fence in the reservation object */
                amdgpu_vm_prt_get(adev);
 -              amdgpu_vm_add_prt_cb(adev, shared[i]);
 +              amdgpu_vm_add_prt_cb(adev, fence);
        }
 -
 -      kfree(shared);
  }
  
  /**
index 3b46eb31c74bbab9715543380bd127fe6c67896c,4d0d1e8e51fa77e530821ec7a90114093a9b4e7c..ada0a1ff262d749db5e30a6fe1116dba8e135474
@@@ -130,6 -130,20 +130,20 @@@ u8 drm_dp_get_adjust_request_pre_emphas
  }
  EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
  
+ /* DP 2.0 128b/132b */
+ u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
+                                  int lane)
+ {
+       int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+       int s = ((lane & 1) ?
+                DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT :
+                DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT);
+       u8 l = dp_link_status(link_status, i);
+       return (l >> s) & 0xf;
+ }
+ EXPORT_SYMBOL(drm_dp_get_adjust_tx_ffe_preset);
  u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
                                         unsigned int lane)
  {
  }
  EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor);
  
 -void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
 -                                          const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 +static int __8b10b_clock_recovery_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
  {
 -      unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
 -                                       DP_TRAINING_AUX_RD_MASK;
 -
        if (rd_interval > 4)
 -              drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n",
 +              drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n",
                            aux->name, rd_interval);
  
 -      if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
 -              rd_interval = 100;
 -      else
 -              rd_interval *= 4 * USEC_PER_MSEC;
 +      if (rd_interval == 0)
 +              return 100;
  
 -      usleep_range(rd_interval, rd_interval * 2);
 +      return rd_interval * 4 * USEC_PER_MSEC;
  }
 -EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
  
 -static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
 -                                               unsigned long rd_interval)
 +static int __8b10b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
  {
        if (rd_interval > 4)
 -              drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n",
 +              drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x (max 4)\n",
                            aux->name, rd_interval);
  
        if (rd_interval == 0)
 -              rd_interval = 400;
 +              return 400;
 +
 +      return rd_interval * 4 * USEC_PER_MSEC;
 +}
 +
 +static int __128b132b_channel_eq_delay_us(const struct drm_dp_aux *aux, u8 rd_interval)
 +{
 +      switch (rd_interval) {
 +      default:
 +              drm_dbg_kms(aux->drm_dev, "%s: invalid AUX interval 0x%02x\n",
 +                          aux->name, rd_interval);
 +              fallthrough;
 +      case DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US:
 +              return 400;
 +      case DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS:
 +              return 4000;
 +      case DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS:
 +              return 8000;
 +      case DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS:
 +              return 12000;
 +      case DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS:
 +              return 16000;
 +      case DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS:
 +              return 32000;
 +      case DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS:
 +              return 64000;
 +      }
 +}
 +
 +/*
 + * The link training delays are different for:
 + *
 + *  - Clock recovery vs. channel equalization
 + *  - DPRX vs. LTTPR
 + *  - 128b/132b vs. 8b/10b
 + *  - DPCD rev 1.3 vs. later
 + *
 + * Get the correct delay in us, reading DPCD if necessary.
 + */
 +static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                      enum drm_dp_phy dp_phy, bool uhbr, bool cr)
 +{
 +      int (*parse)(const struct drm_dp_aux *aux, u8 rd_interval);
 +      unsigned int offset;
 +      u8 rd_interval, mask;
 +
 +      if (dp_phy == DP_PHY_DPRX) {
 +              if (uhbr) {
 +                      if (cr)
 +                              return 100;
 +
 +                      offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL;
 +                      mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
 +                      parse = __128b132b_channel_eq_delay_us;
 +              } else {
 +                      if (cr && dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
 +                              return 100;
 +
 +                      offset = DP_TRAINING_AUX_RD_INTERVAL;
 +                      mask = DP_TRAINING_AUX_RD_MASK;
 +                      if (cr)
 +                              parse = __8b10b_clock_recovery_delay_us;
 +                      else
 +                              parse = __8b10b_channel_eq_delay_us;
 +              }
 +      } else {
 +              if (uhbr) {
 +                      offset = DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy);
 +                      mask = DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK;
 +                      parse = __128b132b_channel_eq_delay_us;
 +              } else {
 +                      if (cr)
 +                              return 100;
 +
 +                      offset = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy);
 +                      mask = DP_TRAINING_AUX_RD_MASK;
 +                      parse = __8b10b_channel_eq_delay_us;
 +              }
 +      }
 +
 +      if (offset < DP_RECEIVER_CAP_SIZE) {
 +              rd_interval = dpcd[offset];
 +      } else {
 +              if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) {
 +                      drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n",
 +                                  aux->name);
 +                      /* arbitrary default delay */
 +                      return 400;
 +              }
 +      }
 +
 +      return parse(aux, rd_interval & mask);
 +}
 +
 +int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                   enum drm_dp_phy dp_phy, bool uhbr)
 +{
 +      return __read_delay(aux, dpcd, dp_phy, uhbr, true);
 +}
 +EXPORT_SYMBOL(drm_dp_read_clock_recovery_delay);
 +
 +int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                               enum drm_dp_phy dp_phy, bool uhbr)
 +{
 +      return __read_delay(aux, dpcd, dp_phy, uhbr, false);
 +}
 +EXPORT_SYMBOL(drm_dp_read_channel_eq_delay);
 +
 +void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
 +                                          const u8 dpcd[DP_RECEIVER_CAP_SIZE])
 +{
 +      u8 rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
 +              DP_TRAINING_AUX_RD_MASK;
 +      int delay_us;
 +
 +      if (dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
 +              delay_us = 100;
        else
 -              rd_interval *= 4 * USEC_PER_MSEC;
 +              delay_us = __8b10b_clock_recovery_delay_us(aux, rd_interval);
 +
 +      usleep_range(delay_us, delay_us * 2);
 +}
 +EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
 +
 +static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
 +                                               u8 rd_interval)
 +{
 +      int delay_us = __8b10b_channel_eq_delay_us(aux, rd_interval);
  
 -      usleep_range(rd_interval, rd_interval * 2);
 +      usleep_range(delay_us, delay_us * 2);
  }
  
  void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
@@@ -324,15 -221,33 +338,33 @@@ EXPORT_SYMBOL(drm_dp_lttpr_link_train_c
  
  u8 drm_dp_link_rate_to_bw_code(int link_rate)
  {
-       /* Spec says link_bw = link_rate / 0.27Gbps */
-       return link_rate / 27000;
+       switch (link_rate) {
+       case 1000000:
+               return DP_LINK_BW_10;
+       case 1350000:
+               return DP_LINK_BW_13_5;
+       case 2000000:
+               return DP_LINK_BW_20;
+       default:
+               /* Spec says link_bw = link_rate / 0.27Gbps */
+               return link_rate / 27000;
+       }
  }
  EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
  
  int drm_dp_bw_code_to_link_rate(u8 link_bw)
  {
-       /* Spec says link_rate = link_bw * 0.27Gbps */
-       return link_bw * 27000;
+       switch (link_bw) {
+       case DP_LINK_BW_10:
+               return 1000000;
+       case DP_LINK_BW_13_5:
+               return 1350000;
+       case DP_LINK_BW_20:
+               return 2000000;
+       default:
+               /* Spec says link_rate = link_bw * 0.27Gbps */
+               return link_bw * 27000;
+       }
  }
  EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
  
@@@ -707,7 -622,7 +739,7 @@@ static u8 drm_dp_downstream_port_count(
  static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
                                          u8 dpcd[DP_RECEIVER_CAP_SIZE])
  {
-       u8 dpcd_ext[6];
+       u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
        int ret;
  
        /*
index 83cf0bc64eb9932a3e03d10ac24356ce6b15a0a6,bf8a6e823a15075ef6b8dbbe252c7d62b1bc1870..c97323365675c9e66b897a0ac7fbe51bf71ca69e
@@@ -25,7 -25,6 +25,7 @@@
  #include <drm/drm_crtc.h>
  #include <drm/drm_device.h>
  #include <drm/drm_modeset_lock.h>
 +#include <drm/drm_print.h>
  
  /**
   * DOC: kms locking
  
  static DEFINE_WW_CLASS(crtc_ww_class);
  
 +#if IS_ENABLED(CONFIG_DRM_DEBUG_MODESET_LOCK)
 +static noinline depot_stack_handle_t __drm_stack_depot_save(void)
 +{
 +      unsigned long entries[8];
 +      unsigned int n;
 +
 +      n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
 +
 +      return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
 +}
 +
 +static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
 +{
 +      struct drm_printer p = drm_debug_printer("drm_modeset_lock");
 +      unsigned long *entries;
 +      unsigned int nr_entries;
 +      char *buf;
 +
 +      buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
 +      if (!buf)
 +              return;
 +
 +      nr_entries = stack_depot_fetch(stack_depot, &entries);
 +      stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 2);
 +
 +      drm_printf(&p, "attempting to lock a contended lock without backoff:\n%s", buf);
 +
 +      kfree(buf);
 +}
 +#else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
 +static depot_stack_handle_t __drm_stack_depot_save(void)
 +{
 +      return 0;
 +}
 +static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
 +{
 +}
 +#endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
 +
  /**
   * drm_modeset_lock_all - take all modeset locks
   * @dev: DRM device
@@@ -265,9 -225,7 +265,9 @@@ EXPORT_SYMBOL(drm_modeset_acquire_fini)
   */
  void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx)
  {
 -      WARN_ON(ctx->contended);
 +      if (WARN_ON(ctx->contended))
 +              __drm_stack_depot_print(ctx->stack_depot);
 +
        while (!list_empty(&ctx->locked)) {
                struct drm_modeset_lock *lock;
  
@@@ -285,13 -243,12 +285,13 @@@ static inline int modeset_lock(struct d
  {
        int ret;
  
 -      WARN_ON(ctx->contended);
 +      if (WARN_ON(ctx->contended))
 +              __drm_stack_depot_print(ctx->stack_depot);
  
        if (ctx->trylock_only) {
                lockdep_assert_held(&ctx->ww_ctx);
  
-               if (!ww_mutex_trylock(&lock->mutex))
+               if (!ww_mutex_trylock(&lock->mutex, NULL))
                        return -EBUSY;
                else
                        return 0;
                ret = 0;
        } else if (ret == -EDEADLK) {
                ctx->contended = lock;
 +              ctx->stack_depot = __drm_stack_depot_save();
        }
  
        return ret;
@@@ -340,7 -296,6 +340,7 @@@ int drm_modeset_backoff(struct drm_mode
        struct drm_modeset_lock *contended = ctx->contended;
  
        ctx->contended = NULL;
 +      ctx->stack_depot = 0;
  
        if (WARN_ON(!contended))
                return 0;
index 3839712ebd237848b6dbfa537f165b470b9c3b9c,2c3cd6e635b5db735e96a8d436a36f55f62b83ac..820a1f38b271e095ffbe17bf1294582110c1312b
@@@ -829,8 -829,6 +829,6 @@@ static void __i915_request_ctor(void *a
        i915_sw_fence_init(&rq->submit, submit_notify);
        i915_sw_fence_init(&rq->semaphore, semaphore_notify);
  
-       dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
        rq->capture_list = NULL;
  
        init_llist_head(&rq->execute_cb);
@@@ -905,17 -903,12 +903,12 @@@ __i915_request_create(struct intel_cont
        rq->ring = ce->ring;
        rq->execution_mask = ce->engine->mask;
  
-       kref_init(&rq->fence.refcount);
-       rq->fence.flags = 0;
-       rq->fence.error = 0;
-       INIT_LIST_HEAD(&rq->fence.cb_list);
        ret = intel_timeline_get_seqno(tl, rq, &seqno);
        if (ret)
                goto err_free;
  
-       rq->fence.context = tl->fence_context;
-       rq->fence.seqno = seqno;
+       dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+                      tl->fence_context, seqno);
  
        RCU_INIT_POINTER(rq->timeline, tl);
        rq->hwsp_seqno = tl->hwsp_seqno;
@@@ -1152,6 -1145,12 +1145,12 @@@ __emit_semaphore_wait(struct i915_reque
        return 0;
  }
  
+ static bool
+ can_use_semaphore_wait(struct i915_request *to, struct i915_request *from)
+ {
+       return to->engine->gt->ggtt == from->engine->gt->ggtt;
+ }
  static int
  emit_semaphore_wait(struct i915_request *to,
                    struct i915_request *from,
        const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
        struct i915_sw_fence *wait = &to->submit;
  
+       if (!can_use_semaphore_wait(to, from))
+               goto await_fence;
        if (!intel_context_use_semaphores(to->context))
                goto await_fence;
  
@@@ -1263,7 -1265,8 +1265,8 @@@ __i915_request_await_execution(struct i
         * immediate execution, and so we must wait until it reaches the
         * active slot.
         */
-       if (intel_engine_has_semaphores(to->engine) &&
+       if (can_use_semaphore_wait(to, from) &&
+           intel_engine_has_semaphores(to->engine) &&
            !i915_request_has_initial_breadcrumb(to)) {
                err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
                if (err < 0)
@@@ -1332,6 -1335,25 +1335,25 @@@ i915_request_await_external(struct i915
        return err;
  }
  
+ static inline bool is_parallel_rq(struct i915_request *rq)
+ {
+       return intel_context_is_parallel(rq->context);
+ }
+ static inline struct intel_context *request_to_parent(struct i915_request *rq)
+ {
+       return intel_context_to_parent(rq->context);
+ }
+ static bool is_same_parallel_context(struct i915_request *to,
+                                    struct i915_request *from)
+ {
+       if (is_parallel_rq(to))
+               return request_to_parent(to) == request_to_parent(from);
+       return false;
+ }
  int
  i915_request_await_execution(struct i915_request *rq,
                             struct dma_fence *fence)
                 * want to run our callback in all cases.
                 */
  
-               if (dma_fence_is_i915(fence))
+               if (dma_fence_is_i915(fence)) {
+                       if (is_same_parallel_context(rq, to_request(fence)))
+                               continue;
                        ret = __i915_request_await_execution(rq,
                                                             to_request(fence));
-               else
+               } else {
                        ret = i915_request_await_external(rq, fence);
+               }
                if (ret < 0)
                        return ret;
        } while (--nchild);
@@@ -1468,10 -1493,13 +1493,13 @@@ i915_request_await_dma_fence(struct i91
                                                 fence))
                        continue;
  
-               if (dma_fence_is_i915(fence))
+               if (dma_fence_is_i915(fence)) {
+                       if (is_same_parallel_context(rq, to_request(fence)))
+                               continue;
                        ret = i915_request_await_request(rq, to_request(fence));
-               else
+               } else {
                        ret = i915_request_await_external(rq, fence);
+               }
                if (ret < 0)
                        return ret;
  
@@@ -1509,49 -1537,89 +1537,65 @@@ i915_request_await_object(struct i915_r
                          struct drm_i915_gem_object *obj,
                          bool write)
  {
 -      struct dma_fence *excl;
 +      struct dma_resv_iter cursor;
 +      struct dma_fence *fence;
        int ret = 0;
  
 -      if (write) {
 -              struct dma_fence **shared;
 -              unsigned int count, i;
 -
 -              ret = dma_resv_get_fences(obj->base.resv, &excl, &count,
 -                                        &shared);
 +      dma_resv_for_each_fence(&cursor, obj->base.resv, write, fence) {
 +              ret = i915_request_await_dma_fence(to, fence);
                if (ret)
 -                      return ret;
 -
 -              for (i = 0; i < count; i++) {
 -                      ret = i915_request_await_dma_fence(to, shared[i]);
 -                      if (ret)
 -                              break;
 -
 -                      dma_fence_put(shared[i]);
 -              }
 -
 -              for (; i < count; i++)
 -                      dma_fence_put(shared[i]);
 -              kfree(shared);
 -      } else {
 -              excl = dma_resv_get_excl_unlocked(obj->base.resv);
 -      }
 -
 -      if (excl) {
 -              if (ret == 0)
 -                      ret = i915_request_await_dma_fence(to, excl);
 -
 -              dma_fence_put(excl);
 +                      break;
        }
  
        return ret;
  }
  
  static struct i915_request *
- __i915_request_add_to_timeline(struct i915_request *rq)
+ __i915_request_ensure_parallel_ordering(struct i915_request *rq,
+                                       struct intel_timeline *timeline)
  {
-       struct intel_timeline *timeline = i915_request_timeline(rq);
        struct i915_request *prev;
  
-       /*
-        * Dependency tracking and request ordering along the timeline
-        * is special cased so that we can eliminate redundant ordering
-        * operations while building the request (we know that the timeline
-        * itself is ordered, and here we guarantee it).
-        *
-        * As we know we will need to emit tracking along the timeline,
-        * we embed the hooks into our request struct -- at the cost of
-        * having to have specialised no-allocation interfaces (which will
-        * be beneficial elsewhere).
-        *
-        * A second benefit to open-coding i915_request_await_request is
-        * that we can apply a slight variant of the rules specialised
-        * for timelines that jump between engines (such as virtual engines).
-        * If we consider the case of virtual engine, we must emit a dma-fence
-        * to prevent scheduling of the second request until the first is
-        * complete (to maximise our greedy late load balancing) and this
-        * precludes optimising to use semaphores serialisation of a single
-        * timeline across engines.
-        */
+       GEM_BUG_ON(!is_parallel_rq(rq));
+       prev = request_to_parent(rq)->parallel.last_rq;
+       if (prev) {
+               if (!__i915_request_is_complete(prev)) {
+                       i915_sw_fence_await_sw_fence(&rq->submit,
+                                                    &prev->submit,
+                                                    &rq->submitq);
+                       if (rq->engine->sched_engine->schedule)
+                               __i915_sched_node_add_dependency(&rq->sched,
+                                                                &prev->sched,
+                                                                &rq->dep,
+                                                                0);
+               }
+               i915_request_put(prev);
+       }
+       request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
+       return to_request(__i915_active_fence_set(&timeline->last_request,
+                                                 &rq->fence));
+ }
+ static struct i915_request *
+ __i915_request_ensure_ordering(struct i915_request *rq,
+                              struct intel_timeline *timeline)
+ {
+       struct i915_request *prev;
+       GEM_BUG_ON(is_parallel_rq(rq));
        prev = to_request(__i915_active_fence_set(&timeline->last_request,
                                                  &rq->fence));
        if (prev && !__i915_request_is_complete(prev)) {
                bool uses_guc = intel_engine_uses_guc(rq->engine);
+               bool pow2 = is_power_of_2(READ_ONCE(prev->engine)->mask |
+                                         rq->engine->mask);
+               bool same_context = prev->context == rq->context;
  
                /*
                 * The requests are supposed to be kept in order. However,
                 * is used as a barrier for external modification to this
                 * context.
                 */
-               GEM_BUG_ON(prev->context == rq->context &&
+               GEM_BUG_ON(same_context &&
                           i915_seqno_passed(prev->fence.seqno,
                                             rq->fence.seqno));
  
-               if ((!uses_guc &&
-                    is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
-                   (uses_guc && prev->context == rq->context))
+               if ((same_context && uses_guc) || (!uses_guc && pow2))
                        i915_sw_fence_await_sw_fence(&rq->submit,
                                                     &prev->submit,
                                                     &rq->submitq);
                                                         0);
        }
  
+       return prev;
+ }
+ static struct i915_request *
+ __i915_request_add_to_timeline(struct i915_request *rq)
+ {
+       struct intel_timeline *timeline = i915_request_timeline(rq);
+       struct i915_request *prev;
+       /*
+        * Dependency tracking and request ordering along the timeline
+        * is special cased so that we can eliminate redundant ordering
+        * operations while building the request (we know that the timeline
+        * itself is ordered, and here we guarantee it).
+        *
+        * As we know we will need to emit tracking along the timeline,
+        * we embed the hooks into our request struct -- at the cost of
+        * having to have specialised no-allocation interfaces (which will
+        * be beneficial elsewhere).
+        *
+        * A second benefit to open-coding i915_request_await_request is
+        * that we can apply a slight variant of the rules specialised
+        * for timelines that jump between engines (such as virtual engines).
+        * If we consider the case of virtual engine, we must emit a dma-fence
+        * to prevent scheduling of the second request until the first is
+        * complete (to maximise our greedy late load balancing) and this
+        * precludes optimising to use semaphores serialisation of a single
+        * timeline across engines.
+        *
+        * We do not order parallel submission requests on the timeline as each
+        * parallel submission context has its own timeline and the ordering
+        * rules for parallel requests are that they must be submitted in the
+        * order received from the execbuf IOCTL. So rather than using the
+        * timeline we store a pointer to last request submitted in the
+        * relationship in the gem context and insert a submission fence
+        * between that request and request passed into this function or
+        * alternatively we use completion fence if gem context has a single
+        * timeline and this is the first submission of an execbuf IOCTL.
+        */
+       if (likely(!is_parallel_rq(rq)))
+               prev = __i915_request_ensure_ordering(rq, timeline);
+       else
+               prev = __i915_request_ensure_parallel_ordering(rq, timeline);
        /*
         * Make sure that no request gazumped us - if it was allocated after
         * our i915_request_alloc() and called __i915_request_add() before
@@@ -1835,7 -1945,7 +1921,7 @@@ long i915_request_wait(struct i915_requ
         * completion. That requires having a good predictor for the request
         * duration, which we currently lack.
         */
-       if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
+       if (CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT &&
            __i915_spin_request(rq, state))
                goto out;
  
index b83ba0285f2941d3e184e66614a54ddcee3fd4bc,379746d3266f0b4e3dabfd906b2c422ba1f15b5c..b501d0badaea2ed0da94f19dc96bdc1c1ddeb4b5
@@@ -27,7 -27,6 +27,7 @@@
  #include <linux/llist.h>
  #include <linux/ctype.h>
  #include <linux/hdmi.h>
 +#include <linux/notifier.h>
  #include <drm/drm_mode_object.h>
  #include <drm/drm_util.h>
  
@@@ -41,7 -40,6 +41,7 @@@ struct drm_encoder
  struct drm_property;
  struct drm_property_blob;
  struct drm_printer;
 +struct drm_privacy_screen;
  struct edid;
  struct i2c_adapter;
  
@@@ -322,30 -320,6 +322,30 @@@ struct drm_monitor_range_info 
        u8 max_vfreq;
  };
  
 +/**
 + * enum drm_privacy_screen_status - privacy screen status
 + *
 + * This enum is used to track and control the state of the integrated privacy
 + * screen present on some display panels, via the "privacy-screen sw-state"
 + * and "privacy-screen hw-state" properties. Note the _LOCKED enum values
 + * are only valid for the "privacy-screen hw-state" property.
 + *
 + * @PRIVACY_SCREEN_DISABLED:
 + *  The privacy-screen on the panel is disabled
 + * @PRIVACY_SCREEN_ENABLED:
 + *  The privacy-screen on the panel is enabled
 + * @PRIVACY_SCREEN_DISABLED_LOCKED:
 + *  The privacy-screen on the panel is disabled and locked (cannot be changed)
 + * @PRIVACY_SCREEN_ENABLED_LOCKED:
 + *  The privacy-screen on the panel is enabled and locked (cannot be changed)
 + */
 +enum drm_privacy_screen_status {
 +      PRIVACY_SCREEN_DISABLED = 0,
 +      PRIVACY_SCREEN_ENABLED,
 +      PRIVACY_SCREEN_DISABLED_LOCKED,
 +      PRIVACY_SCREEN_ENABLED_LOCKED,
 +};
 +
  /*
   * This is a consolidated colorimetry list supported by HDMI and
   * DP protocol standard. The respective connectors will register
@@@ -616,6 -590,18 +616,18 @@@ struct drm_display_info 
         * @monitor_range: Frequency range supported by monitor range descriptor
         */
        struct drm_monitor_range_info monitor_range;
+       /**
+        * @mso_stream_count: eDP Multi-SST Operation (MSO) stream count from
+        * the DisplayID VESA vendor block. 0 for conventional Single-Stream
+        * Transport (SST), or 2 or 4 MSO streams.
+        */
+       u8 mso_stream_count;
+       /**
+        * @mso_pixel_overlap: eDP MSO segment pixel overlap, 0-8 pixels.
+        */
+       u8 mso_pixel_overlap;
  };
  
  int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@@ -807,12 -793,6 +819,12 @@@ struct drm_connector_state 
         */
        u8 max_bpc;
  
 +      /**
 +       * @privacy_screen_sw_state: See :ref:`Standard Connector
 +       * Properties<standard_connector_properties>`
 +       */
 +      enum drm_privacy_screen_status privacy_screen_sw_state;
 +
        /**
         * @hdr_output_metadata:
         * DRM blob property for HDR output metadata
@@@ -1441,24 -1421,6 +1453,24 @@@ struct drm_connector 
         */
        struct drm_property *max_bpc_property;
  
 +      /** @privacy_screen: drm_privacy_screen for this connector, or NULL. */
 +      struct drm_privacy_screen *privacy_screen;
 +
 +      /** @privacy_screen_notifier: privacy-screen notifier_block */
 +      struct notifier_block privacy_screen_notifier;
 +
 +      /**
 +       * @privacy_screen_sw_state_property: Optional atomic property for the
 +       * connector to control the integrated privacy screen.
 +       */
 +      struct drm_property *privacy_screen_sw_state_property;
 +
 +      /**
 +       * @privacy_screen_hw_state_property: Optional atomic property for the
 +       * connector to report the actual integrated privacy screen state.
 +       */
 +      struct drm_property *privacy_screen_hw_state_property;
 +
  #define DRM_CONNECTOR_POLL_HPD (1 << 0)
  #define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
  #define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
@@@ -1782,11 -1744,6 +1794,11 @@@ int drm_connector_set_panel_orientation
        int width, int height);
  int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
                                          int min, int max);
 +void drm_connector_create_privacy_screen_properties(struct drm_connector *conn);
 +void drm_connector_attach_privacy_screen_properties(struct drm_connector *conn);
 +void drm_connector_attach_privacy_screen_provider(
 +      struct drm_connector *connector, struct drm_privacy_screen *priv);
 +void drm_connector_update_privacy_screen(const struct drm_connector_state *connector_state);
  
  /**
   * struct drm_tile_group - Tile group metadata
index b653c5da7065100317f33f4819086b5e7522ace9,b52df4db3e8fe2b737bb9ae3145f1e6944da5a5b..afdf7f4183f9a0dadfd22adeafb70bf2fbe27822
@@@ -1114,15 -1114,8 +1114,15 @@@ struct drm_panel
  # define DP_UHBR20                             (1 << 1)
  # define DP_UHBR13_5                           (1 << 2)
  
 -#define DP_128B132B_TRAINING_AUX_RD_INTERVAL   0x2216 /* 2.0 */
 -# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
 +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL                    0x2216 /* 2.0 */
 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK              0x7f
 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_400_US            0x00
 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_4_MS              0x01
 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_8_MS              0x02
 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_12_MS             0x03
 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_16_MS             0x04
 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_32_MS             0x05
 +# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_64_MS             0x06
  
  #define DP_TEST_264BIT_CUSTOM_PATTERN_7_0             0x2230
  #define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0x2250
  #define DP_MAX_LANE_COUNT_PHY_REPEATER                            0xf0004 /* 1.4a */
  #define DP_Repeater_FEC_CAPABILITY                        0xf0004 /* 1.4 */
  #define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT             0xf0005 /* 1.4a */
+ #define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER          0xf0006 /* 2.0 */
+ # define DP_PHY_REPEATER_128B132B_SUPPORTED               (1 << 0)
+ /* See DP_128B132B_SUPPORTED_LINK_RATES for values */
+ #define DP_PHY_REPEATER_128B132B_RATES                            0xf0007 /* 2.0 */
  
  enum drm_dp_phy {
        DP_PHY_DPRX,
  # define DP_VOLTAGE_SWING_LEVEL_3_SUPPORTED               BIT(0)
  # define DP_PRE_EMPHASIS_LEVEL_3_SUPPORTED                BIT(1)
  
 +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1  0xf0022 /* 2.0 */
 +#define DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER(dp_phy)     \
 +      DP_LTTPR_REG(dp_phy, DP_128B132B_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1)
 +/* see DP_128B132B_TRAINING_AUX_RD_INTERVAL for values */
 +
  #define DP_LANE0_1_STATUS_PHY_REPEATER1                           0xf0030 /* 1.3 */
  #define DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy) \
        DP_LTTPR_REG(dp_phy, DP_LANE0_1_STATUS_PHY_REPEATER1)
@@@ -1522,6 -1514,8 +1526,8 @@@ u8 drm_dp_get_adjust_request_voltage(co
                                     int lane);
  u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
                                          int lane);
+ u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
+                                  int lane);
  u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
                                         unsigned int lane);
  
  #define DP_LTTPR_COMMON_CAP_SIZE      8
  #define DP_LTTPR_PHY_CAP_SIZE         3
  
 +int drm_dp_read_clock_recovery_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                   enum drm_dp_phy dp_phy, bool uhbr);
 +int drm_dp_read_channel_eq_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                               enum drm_dp_phy dp_phy, bool uhbr);
 +
  void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
                                            const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
  void drm_dp_lttpr_link_train_clock_recovery_delay(void);
diff --combined include/linux/dma-resv.h
index 45f1d4812a37279c0ef4cbbc9ab6e988086a63fe,8b6c20636a7925310ef3cb05ff8e286d8d07173b..dbd235ab447f9678202d531102d7ebc1122b568a
@@@ -170,20 -170,15 +170,20 @@@ struct dma_resv_iter 
        /** @index: index into the shared fences */
        unsigned int index;
  
 -      /** @fences: the shared fences */
 +      /** @fences: the shared fences; private, *MUST* not dereference  */
        struct dma_resv_list *fences;
  
 +      /** @shared_count: number of shared fences */
 +      unsigned int shared_count;
 +
        /** @is_restarted: true if this is the first returned fence */
        bool is_restarted;
  };
  
  struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
  struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
 +struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
 +struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
  
  /**
   * dma_resv_iter_begin - initialize a dma_resv_iter object
@@@ -249,24 -244,6 +249,24 @@@ static inline bool dma_resv_iter_is_res
        for (fence = dma_resv_iter_first_unlocked(cursor);              \
             fence; fence = dma_resv_iter_next_unlocked(cursor))
  
 +/**
 + * dma_resv_for_each_fence - fence iterator
 + * @cursor: a struct dma_resv_iter pointer
 + * @obj: a dma_resv object pointer
 + * @all_fences: true if all fences should be returned
 + * @fence: the current fence
 + *
 + * Iterate over the fences in a struct dma_resv object while holding the
 + * &dma_resv.lock. @all_fences controls if the shared fences are returned as
 + * well. The cursor initialisation is part of the iterator and the fence stays
 + * valid as long as the lock is held and so no extra reference to the fence is
 + * taken.
 + */
 +#define dma_resv_for_each_fence(cursor, obj, all_fences, fence)       \
 +      for (dma_resv_iter_begin(cursor, obj, all_fences),      \
 +           fence = dma_resv_iter_first(cursor); fence;        \
 +           fence = dma_resv_iter_next(cursor))
 +
  #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
  #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
  
@@@ -380,7 -357,7 +380,7 @@@ static inline int dma_resv_lock_slow_in
   */
  static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
  {
-       return ww_mutex_trylock(&obj->lock);
+       return ww_mutex_trylock(&obj->lock, NULL);
  }
  
  /**
This page took 0.21242 seconds and 4 git commands to generate.