]> Git Repo - linux.git/commitdiff
Merge tag 'drm-intel-gt-next-2024-12-18' of https://gitlab.freedesktop.org/drm/i915...
authorDave Airlie <[email protected]>
Wed, 18 Dec 2024 21:59:20 +0000 (07:59 +1000)
committerDave Airlie <[email protected]>
Wed, 18 Dec 2024 21:59:21 +0000 (07:59 +1000)
Driver Changes:

- More accurate engine busyness metrics with GuC submission (Umesh)
- Ensure partial BO segment offset never exceeds allowed max (Krzysztof)
- Flush GuC CT receive tasklet during reset preparation (Zhanjun)

- Code cleanups and refactoring (David, Lucas)
- Debugging improvements (Jesus)
- Selftest improvements (Sk)

Signed-off-by: Dave Airlie <[email protected]>
From: Joonas Lahtinen <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1  2 
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c

index 9ede6f240d7936e48853e8a59db69be23a9b745a,a2812621625bf8e13df9eb05c753a6c5d12ba5e3..12f1ba7ca9c1953e9d91c83ebfd7651d42375344
@@@ -1243,6 -1243,21 +1243,21 @@@ static void __get_engine_usage_record(s
        } while (++i < 6);
  }
  
+ static void __set_engine_usage_record(struct intel_engine_cs *engine,
+                                     u32 last_in, u32 id, u32 total)
+ {
+       struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine);
+ #define record_write(map_, field_, val_) \
+       iosys_map_wr_field(map_, 0, struct guc_engine_usage_record, field_, val_)
+       record_write(&rec_map, last_switch_in_stamp, last_in);
+       record_write(&rec_map, current_context_index, id);
+       record_write(&rec_map, total_runtime, total);
+ #undef record_write
+ }
  static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
  {
        struct intel_engine_guc_stats *stats = &engine->stats.guc;
@@@ -1339,7 -1354,7 +1354,7 @@@ static ktime_t guc_engine_busyness(stru
         * start_gt_clk is derived from GuC state. To get a consistent
         * view of activity, we query the GuC state only if gt is awake.
         */
 -      wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
 +      wakeref = in_reset ? NULL : intel_gt_pm_get_if_awake(gt);
        if (wakeref) {
                stats_saved = *stats;
                gt_stamp_saved = guc->timestamp.gt_stamp;
                total += intel_gt_clock_interval_to_ns(gt, clk);
        }
  
+       if (total > stats->total)
+               stats->total = total;
        spin_unlock_irqrestore(&guc->timestamp.lock, flags);
  
-       return ns_to_ktime(total);
+       return ns_to_ktime(stats->total);
  }
  
  static void guc_enable_busyness_worker(struct intel_guc *guc)
@@@ -1431,8 -1449,21 +1449,21 @@@ static void __reset_guc_busyness_stats(
  
        guc_update_pm_timestamp(guc, &unused);
        for_each_engine(engine, gt, id) {
+               struct intel_engine_guc_stats *stats = &engine->stats.guc;
                guc_update_engine_gt_clks(engine);
-               engine->stats.guc.prev_total = 0;
+               /*
+                * If resetting a running context, accumulate the active
+                * time as well since there will be no context switch.
+                */
+               if (stats->running) {
+                       u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
+                       stats->total_gt_clks += clk;
+               }
+               stats->prev_total = 0;
+               stats->running = 0;
        }
  
        spin_unlock_irqrestore(&guc->timestamp.lock, flags);
@@@ -1543,6 -1574,9 +1574,9 @@@ err_trylock
  
  static int guc_action_enable_usage_stats(struct intel_guc *guc)
  {
+       struct intel_gt *gt = guc_to_gt(guc);
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
        u32 offset = intel_guc_engine_usage_offset(guc);
        u32 action[] = {
                INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF,
                0,
        };
  
+       for_each_engine(engine, gt, id)
+               __set_engine_usage_record(engine, 0, 0xffffffff, 0);
        return intel_guc_send(guc, action, ARRAY_SIZE(action));
  }
  
@@@ -1688,6 -1725,10 +1725,10 @@@ void intel_guc_submission_reset_prepare
        spin_lock_irq(guc_to_gt(guc)->irq_lock);
        spin_unlock_irq(guc_to_gt(guc)->irq_lock);
  
+       /* Flush tasklet */
+       tasklet_disable(&guc->ct.receive_tasklet);
+       tasklet_enable(&guc->ct.receive_tasklet);
        guc_flush_submissions(guc);
        guc_flush_destroyed_contexts(guc);
        flush_work(&guc->ct.requests.worker);
@@@ -2005,6 -2046,8 +2046,8 @@@ void intel_guc_submission_cancel_reques
  
  void intel_guc_submission_reset_finish(struct intel_guc *guc)
  {
+       int outstanding;
        /* Reset called during driver load or during wedge? */
        if (unlikely(!guc_submission_initialized(guc) ||
                     !intel_guc_is_fw_running(guc) ||
         * see in CI if this happens frequently / a precursor to taking down the
         * machine.
         */
-       if (atomic_read(&guc->outstanding_submission_g2h))
-               guc_err(guc, "Unexpected outstanding GuC to Host in reset finish\n");
+       outstanding = atomic_read(&guc->outstanding_submission_g2h);
+       if (outstanding)
+               guc_err(guc, "Unexpected outstanding GuC to Host response(s) in reset finish: %d\n",
+                       outstanding);
        atomic_set(&guc->outstanding_submission_g2h, 0);
  
        intel_guc_global_policies_update(guc);
This page took 0.111456 seconds and 4 git commands to generate.