]> Git Repo - linux.git/commitdiff
Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm...
authorDave Airlie <[email protected]>
Thu, 17 Aug 2023 20:41:51 +0000 (06:41 +1000)
committerDave Airlie <[email protected]>
Thu, 17 Aug 2023 20:42:12 +0000 (06:42 +1000)
This time mostly cleanups around the runtime power management handling
and slightly improved GPU hang handling. Also some additions to the
HWDB to get the driver working properly on more NXP i.MX8MP IP cores.

Signed-off-by: Dave Airlie <[email protected]>
From: Lucas Stach <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
1  2 
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c

index ea55f6b7b744a755d421fbd471e914f49346bb14,b17a5418022037a564bd246219ddad5e5078aec2..a8d3fa81e4ec5d40fabe0f63018074372489b8d3
@@@ -6,7 -6,9 +6,9 @@@
  #include <linux/component.h>
  #include <linux/dma-mapping.h>
  #include <linux/module.h>
- #include <linux/of_platform.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/platform_device.h>
  #include <linux/uaccess.h>
  
  #include <drm/drm_debugfs.h>
@@@ -481,7 -483,10 +483,7 @@@ static const struct drm_driver etnaviv_
        .driver_features    = DRIVER_GEM | DRIVER_RENDER,
        .open               = etnaviv_open,
        .postclose           = etnaviv_postclose,
 -      .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 -      .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
 -      .gem_prime_mmap     = drm_gem_prime_mmap,
  #ifdef CONFIG_DEBUG_FS
        .debugfs_init       = etnaviv_debugfs_init,
  #endif
index bbc9c54871f43cd4c188c38190649bba31ecda1a,0382cd91eebf9c026b2e2e3ff6b2f3c470ddd432..9276756e1397d37effffaf4dfe84dc940748a6e4
@@@ -8,8 -8,8 +8,8 @@@
  #include <linux/delay.h>
  #include <linux/dma-fence.h>
  #include <linux/dma-mapping.h>
 +#include <linux/mod_devicetable.h>
  #include <linux/module.h>
 -#include <linux/of_device.h>
  #include <linux/platform_device.h>
  #include <linux/pm_runtime.h>
  #include <linux/regulator/consumer.h>
@@@ -493,6 -493,14 +493,14 @@@ static void etnaviv_gpu_update_clock(st
                clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
                etnaviv_gpu_load_clock(gpu, clock);
        }
+       /*
+        * Choose number of wait cycles to target a ~30us (1/32768) max latency
+        * until new work is picked up by the FE when it polls in the idle loop.
+        * If the GPU base frequency is unknown use 200 wait cycles.
+        */
+       gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale),
+                                  200UL, 0xffffUL);
  }
  
  static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
        /* We rely on the GPU running, so program the clock */
        etnaviv_gpu_update_clock(gpu);
  
-       gpu->fe_running = false;
+       gpu->state = ETNA_GPU_STATE_RESET;
        gpu->exec_state = -1;
        if (gpu->mmu_context)
                etnaviv_iommu_context_put(gpu->mmu_context);
@@@ -651,8 -659,6 +659,6 @@@ void etnaviv_gpu_start_fe(struct etnavi
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
                          VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
        }
-       gpu->fe_running = true;
  }
  
  static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu,
        u16 prefetch;
        u32 address;
  
+       WARN_ON(gpu->state != ETNA_GPU_STATE_INITIALIZED);
        /* setup the MMU */
        etnaviv_iommu_restore(gpu, context);
  
                                        &gpu->mmu_context->cmdbuf_mapping);
  
        etnaviv_gpu_start_fe(gpu, address, prefetch);
+       gpu->state = ETNA_GPU_STATE_RUNNING;
  }
  
  static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
  
  static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
  {
+       WARN_ON(!(gpu->state == ETNA_GPU_STATE_IDENTIFIED ||
+                 gpu->state == ETNA_GPU_STATE_RESET));
        if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
             etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
            gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
        etnaviv_gpu_setup_pulse_eater(gpu);
  
        gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
+       gpu->state = ETNA_GPU_STATE_INITIALIZED;
  }
  
  int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
            (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
                gpu->sec_mode = ETNA_SEC_KERNEL;
  
+       gpu->state = ETNA_GPU_STATE_IDENTIFIED;
        ret = etnaviv_hw_reset(gpu);
        if (ret) {
                dev_err(gpu->dev, "GPU reset failed\n");
        pm_runtime_mark_last_busy(gpu->dev);
        pm_runtime_put_autosuspend(gpu->dev);
  
-       gpu->initialized = true;
        return 0;
  
  fail:
@@@ -1059,50 -1074,6 +1074,6 @@@ pm_put
  }
  #endif
  
- void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit)
- {
-       struct etnaviv_gpu *gpu = submit->gpu;
-       char *comm = NULL, *cmd = NULL;
-       struct task_struct *task;
-       unsigned int i;
-       dev_err(gpu->dev, "recover hung GPU!\n");
-       task = get_pid_task(submit->pid, PIDTYPE_PID);
-       if (task) {
-               comm = kstrdup(task->comm, GFP_KERNEL);
-               cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
-               put_task_struct(task);
-       }
-       if (comm && cmd)
-               dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd);
-       kfree(cmd);
-       kfree(comm);
-       if (pm_runtime_get_sync(gpu->dev) < 0)
-               goto pm_put;
-       mutex_lock(&gpu->lock);
-       etnaviv_hw_reset(gpu);
-       /* complete all events, the GPU won't do it after the reset */
-       spin_lock(&gpu->event_spinlock);
-       for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
-               complete(&gpu->event_free);
-       bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
-       spin_unlock(&gpu->event_spinlock);
-       etnaviv_gpu_hw_init(gpu);
-       mutex_unlock(&gpu->lock);
-       pm_runtime_mark_last_busy(gpu->dev);
- pm_put:
-       pm_runtime_put_autosuspend(gpu->dev);
- }
  /* fence object management */
  struct etnaviv_fence {
        struct etnaviv_gpu *gpu;
@@@ -1183,20 -1154,22 +1154,22 @@@ static int event_alloc(struct etnaviv_g
        unsigned int *events)
  {
        unsigned long timeout = msecs_to_jiffies(10 * 10000);
-       unsigned i, acquired = 0;
+       unsigned i, acquired = 0, rpm_count = 0;
+       int ret;
  
        for (i = 0; i < nr_events; i++) {
-               unsigned long ret;
+               unsigned long remaining;
  
-               ret = wait_for_completion_timeout(&gpu->event_free, timeout);
+               remaining = wait_for_completion_timeout(&gpu->event_free, timeout);
  
-               if (!ret) {
+               if (!remaining) {
                        dev_err(gpu->dev, "wait_for_completion_timeout failed");
+                       ret = -EBUSY;
                        goto out;
                }
  
                acquired++;
-               timeout = ret;
+               timeout = remaining;
        }
  
        spin_lock(&gpu->event_spinlock);
  
        spin_unlock(&gpu->event_spinlock);
  
+       for (i = 0; i < nr_events; i++) {
+               ret = pm_runtime_resume_and_get(gpu->dev);
+               if (ret)
+                       goto out_rpm;
+               rpm_count++;
+       }
        return 0;
  
+ out_rpm:
+       for (i = 0; i < rpm_count; i++)
+               pm_runtime_put_autosuspend(gpu->dev);
  out:
        for (i = 0; i < acquired; i++)
                complete(&gpu->event_free);
  
-       return -EBUSY;
+       return ret;
  }
  
  static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
                clear_bit(event, gpu->event_bitmap);
                complete(&gpu->event_free);
        }
+       pm_runtime_put_autosuspend(gpu->dev);
  }
  
  /*
@@@ -1371,15 -1356,6 +1356,6 @@@ struct dma_fence *etnaviv_gpu_submit(st
        unsigned int i, nr_events = 1, event[3];
        int ret;
  
-       if (!submit->runtime_resumed) {
-               ret = pm_runtime_get_sync(gpu->dev);
-               if (ret < 0) {
-                       pm_runtime_put_noidle(gpu->dev);
-                       return NULL;
-               }
-               submit->runtime_resumed = true;
-       }
        /*
         * if there are performance monitor requests we need to have
         * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE
                goto out_unlock;
        }
  
-       if (!gpu->fe_running)
+       if (gpu->state == ETNA_GPU_STATE_INITIALIZED)
                etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context);
  
        if (submit->prev_mmu_context)
@@@ -1454,6 -1430,49 +1430,49 @@@ static void sync_point_worker(struct wo
        etnaviv_gpu_start_fe(gpu, addr + 2, 2);
  }
  
+ void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit)
+ {
+       struct etnaviv_gpu *gpu = submit->gpu;
+       char *comm = NULL, *cmd = NULL;
+       struct task_struct *task;
+       unsigned int i;
+       dev_err(gpu->dev, "recover hung GPU!\n");
+       task = get_pid_task(submit->pid, PIDTYPE_PID);
+       if (task) {
+               comm = kstrdup(task->comm, GFP_KERNEL);
+               cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+               put_task_struct(task);
+       }
+       if (comm && cmd)
+               dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd);
+       kfree(cmd);
+       kfree(comm);
+       if (pm_runtime_get_sync(gpu->dev) < 0)
+               goto pm_put;
+       mutex_lock(&gpu->lock);
+       etnaviv_hw_reset(gpu);
+       /* complete all events, the GPU won't do it after the reset */
+       spin_lock(&gpu->event_spinlock);
+       for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
+               event_free(gpu, i);
+       spin_unlock(&gpu->event_spinlock);
+       etnaviv_gpu_hw_init(gpu);
+       mutex_unlock(&gpu->lock);
+       pm_runtime_mark_last_busy(gpu->dev);
+ pm_put:
+       pm_runtime_put_autosuspend(gpu->dev);
+ }
  static void dump_mmu_fault(struct etnaviv_gpu *gpu)
  {
        static const char *fault_reasons[] = {
@@@ -1520,6 -1539,8 +1539,8 @@@ static irqreturn_t irq_handler(int irq
  
                if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
                        dump_mmu_fault(gpu);
+                       gpu->state = ETNA_GPU_STATE_FAULT;
+                       drm_sched_fault(&gpu->sched);
                        intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
                }
  
@@@ -1628,9 -1649,9 +1649,9 @@@ int etnaviv_gpu_wait_idle(struct etnavi
        } while (1);
  }
  
- static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
+ static void etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
  {
-       if (gpu->initialized && gpu->fe_running) {
+       if (gpu->state == ETNA_GPU_STATE_RUNNING) {
                /* Replace the last WAIT with END */
                mutex_lock(&gpu->lock);
                etnaviv_buffer_end(gpu);
                 */
                etnaviv_gpu_wait_idle(gpu, 100);
  
-               gpu->fe_running = false;
+               gpu->state = ETNA_GPU_STATE_INITIALIZED;
        }
  
        gpu->exec_state = -1;
-       return etnaviv_gpu_clk_disable(gpu);
  }
  
  static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
@@@ -1733,13 -1752,11 +1752,11 @@@ static int etnaviv_gpu_bind(struct devi
        if (ret)
                goto out_workqueue;
  
-       if (IS_ENABLED(CONFIG_PM))
-               ret = pm_runtime_get_sync(gpu->dev);
-       else
+       if (!IS_ENABLED(CONFIG_PM)) {
                ret = etnaviv_gpu_clk_enable(gpu);
-       if (ret < 0)
-               goto out_sched;
+               if (ret < 0)
+                       goto out_sched;
+       }
  
        gpu->drm = drm;
        gpu->fence_context = dma_fence_context_alloc(1);
  
        priv->gpu[priv->num_gpus++] = gpu;
  
-       pm_runtime_mark_last_busy(gpu->dev);
-       pm_runtime_put_autosuspend(gpu->dev);
        return 0;
  
  out_sched:
@@@ -1785,16 -1799,14 +1799,14 @@@ static void etnaviv_gpu_unbind(struct d
                pm_runtime_put_sync_suspend(gpu->dev);
        } else {
                etnaviv_gpu_hw_suspend(gpu);
+               etnaviv_gpu_clk_disable(gpu);
        }
  
        if (gpu->mmu_context)
                etnaviv_iommu_context_put(gpu->mmu_context);
  
-       if (gpu->initialized) {
-               etnaviv_cmdbuf_free(&gpu->buffer);
-               etnaviv_iommu_global_fini(gpu);
-               gpu->initialized = false;
-       }
+       etnaviv_cmdbuf_free(&gpu->buffer);
+       etnaviv_iommu_global_fini(gpu);
  
        gpu->drm = NULL;
        xa_destroy(&gpu->user_fences);
@@@ -1918,7 -1930,11 +1930,11 @@@ static int etnaviv_gpu_rpm_suspend(stru
                return -EBUSY;
        }
  
-       return etnaviv_gpu_hw_suspend(gpu);
+       etnaviv_gpu_hw_suspend(gpu);
+       gpu->state = ETNA_GPU_STATE_IDENTIFIED;
+       return etnaviv_gpu_clk_disable(gpu);
  }
  
  static int etnaviv_gpu_rpm_resume(struct device *dev)
                return ret;
  
        /* Re-initialise the basic hardware state */
-       if (gpu->drm && gpu->initialized) {
+       if (gpu->state == ETNA_GPU_STATE_IDENTIFIED) {
                ret = etnaviv_gpu_hw_resume(gpu);
                if (ret) {
                        etnaviv_gpu_clk_disable(gpu);
This page took 0.077462 seconds and 4 git commands to generate.