* Without a 1:1 association between relocation handles and
* the execobject[] index, we instead create a hashtable.
* We size it dynamically based on available memory, starting
- * first with 1:1 assocative hash and scaling back until
+ * first with 1:1 associative hash and scaling back until
* the allocation succeeds.
*
* Later on we use a positive lut_size to indicate we are
u64_to_user_ptr(entry->relocs_ptr);
unsigned long remain = entry->relocation_count;
- if (unlikely(remain > N_RELOC(ULONG_MAX)))
+ if (unlikely(remain > N_RELOC(INT_MAX)))
return -EINVAL;
/*
if (size == 0)
return 0;
- if (size > N_RELOC(ULONG_MAX))
+ if (size > N_RELOC(INT_MAX))
return -EINVAL;
addr = u64_to_user_ptr(entry->relocs_ptr);
GT_TRACE(gt, "end\n");
}
+ static void set_wedged_work(struct work_struct *w)
+ {
+ struct intel_gt *gt = container_of(w, struct intel_gt, wedge);
+ intel_wakeref_t wf;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wf)
+ __intel_gt_set_wedged(gt);
+ }
+
void intel_gt_set_wedged(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
if (GEM_SHOW_DEBUG()) {
struct drm_printer p = drm_dbg_printer(>->i915->drm,
- DRM_UT_DRIVER, __func__);
+ DRM_UT_DRIVER, NULL);
struct intel_engine_cs *engine;
enum intel_engine_id id;
init_waitqueue_head(>->reset.queue);
mutex_init(>->reset.mutex);
init_srcu_struct(>->reset.backoff_srcu);
+ INIT_WORK(>->wedge, set_wedged_work);
/*
* While undesirable to wait inside the shrinker, complain anyway.
struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
gt_err(w->gt, "%s timed out, cancelling all in-flight rendering.\n", w->name);
- intel_gt_set_wedged(w->gt);
+ set_wedged_work(&w->gt->wedge);
}
void __intel_init_wedge(struct intel_wedge_me *w,