]> Git Repo - linux.git/blobdiff - drivers/gpu/drm/amd/amdgpu/amdgpu.h
drm/amdgpu: use kmemdup rather than duplicating its implementation
[linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu.h
index 2ba448ee948b5cc753f0a4f820baaeea80abce1a..57b427f958da1b5d77f35cb9724321fd3ab6a3f5 100644 (file)
@@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size;
 extern int amdgpu_enable_scheduler;
 extern int amdgpu_sched_jobs;
 extern int amdgpu_sched_hw_submission;
+extern int amdgpu_enable_semaphores;
 
 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS         3000
 #define AMDGPU_MAX_USEC_TIMEOUT                        100000  /* 100 ms */
@@ -98,6 +99,9 @@ extern int amdgpu_sched_hw_submission;
 #define AMDGPU_MAX_COMPUTE_RINGS               8
 #define AMDGPU_MAX_VCE_RINGS                   2
 
+/* max number of IP instances */
+#define AMDGPU_MAX_SDMA_INSTANCES              2
+
 /* number of hw syncs before falling back on blocking */
 #define AMDGPU_NUM_SYNCS                       4
 
@@ -183,6 +187,7 @@ struct amdgpu_vm;
 struct amdgpu_ring;
 struct amdgpu_semaphore;
 struct amdgpu_cs_parser;
+struct amdgpu_job;
 struct amdgpu_irq_src;
 struct amdgpu_fpriv;
 
@@ -246,7 +251,7 @@ struct amdgpu_buffer_funcs {
        unsigned        copy_num_dw;
 
        /* used for buffer migration */
-       void (*emit_copy_buffer)(struct amdgpu_ring *ring,
+       void (*emit_copy_buffer)(struct amdgpu_ib *ib,
                                 /* src addr in bytes */
                                 uint64_t src_offset,
                                 /* dst addr in bytes */
@@ -261,7 +266,7 @@ struct amdgpu_buffer_funcs {
        unsigned        fill_num_dw;
 
        /* used for buffer clearing */
-       void (*emit_fill_buffer)(struct amdgpu_ring *ring,
+       void (*emit_fill_buffer)(struct amdgpu_ib *ib,
                                 /* value to write to memory */
                                 uint32_t src_data,
                                 /* dst addr in bytes */
@@ -339,6 +344,8 @@ struct amdgpu_ring_funcs {
        int (*test_ring)(struct amdgpu_ring *ring);
        int (*test_ib)(struct amdgpu_ring *ring);
        bool (*is_lockup)(struct amdgpu_ring *ring);
+       /* insert NOP packets */
+       void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
 };
 
 /*
@@ -426,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
 void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
 
-void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
                                   struct amdgpu_irq_src *irq_src,
                                   unsigned irq_type);
@@ -439,11 +446,11 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
 
-bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
-int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
 signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
-                         struct amdgpu_fence **fences,
-                         bool intr, long t);
+                                 struct fence **array,
+                                 uint32_t count,
+                                 bool intr,
+                                 signed long t);
 struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
 void amdgpu_fence_unref(struct amdgpu_fence **fence);
 
@@ -516,7 +523,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
                       uint64_t dst_offset,
                       uint32_t byte_count,
                       struct reservation_object *resv,
-                      struct amdgpu_fence **fence);
+                      struct fence **fence);
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 
 struct amdgpu_bo_list_entry {
@@ -652,7 +659,7 @@ struct amdgpu_sa_bo {
        struct amdgpu_sa_manager        *manager;
        unsigned                        soffset;
        unsigned                        eoffset;
-       struct amdgpu_fence             *fence;
+       struct fence                    *fence;
 };
 
 /*
@@ -694,7 +701,7 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
                                struct amdgpu_semaphore *semaphore);
 void amdgpu_semaphore_free(struct amdgpu_device *adev,
                           struct amdgpu_semaphore **semaphore,
-                          struct amdgpu_fence *fence);
+                          struct fence *fence);
 
 /*
  * Synchronization
@@ -702,7 +709,8 @@ void amdgpu_semaphore_free(struct amdgpu_device *adev,
 struct amdgpu_sync {
        struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
        struct amdgpu_fence     *sync_to[AMDGPU_MAX_RINGS];
-       struct amdgpu_fence     *last_vm_update;
+       DECLARE_HASHTABLE(fences, 4);
+       struct fence            *last_vm_update;
 };
 
 void amdgpu_sync_create(struct amdgpu_sync *sync);
@@ -714,8 +722,10 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
                     void *owner);
 int amdgpu_sync_rings(struct amdgpu_sync *sync,
                      struct amdgpu_ring *ring);
+struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
+int amdgpu_sync_wait(struct amdgpu_sync *sync);
 void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
-                     struct amdgpu_fence *fence);
+                     struct fence *fence);
 
 /*
  * GART structures, functions & helpers
@@ -830,7 +840,9 @@ struct amdgpu_flip_work {
        uint64_t                        base;
        struct drm_pending_vblank_event *event;
        struct amdgpu_bo                *old_rbo;
-       struct fence                    *fence;
+       struct fence                    *excl;
+       unsigned                        shared_count;
+       struct fence                    **shared;
 };
 
 
@@ -871,7 +883,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                                         struct amdgpu_ring *ring,
                                         struct amdgpu_ib *ibs,
                                         unsigned num_ibs,
-                                        int (*free_job)(struct amdgpu_cs_parser *),
+                                        int (*free_job)(struct amdgpu_job *),
                                         void *owner,
                                         struct fence **fence);
 
@@ -879,7 +891,7 @@ struct amdgpu_ring {
        struct amdgpu_device            *adev;
        const struct amdgpu_ring_funcs  *funcs;
        struct amdgpu_fence_driver      fence_drv;
-       struct amd_gpu_scheduler        *scheduler;
+       struct amd_gpu_scheduler        sched;
 
        spinlock_t              fence_lock;
        struct mutex            *ring_lock;
@@ -957,7 +969,7 @@ struct amdgpu_vm_id {
        unsigned                id;
        uint64_t                pd_gpu_addr;
        /* last flushed PD/PT update */
-       struct amdgpu_fence     *flushed_updates;
+       struct fence            *flushed_updates;
        /* last use of vmid */
        struct amdgpu_fence     *last_id_use;
 };
@@ -982,6 +994,7 @@ struct amdgpu_vm {
        /* contains the page directory */
        struct amdgpu_bo        *page_directory;
        unsigned                max_pde_used;
+       struct fence            *page_directory_fence;
 
        /* array of page tables, one for each page directory entry */
        struct amdgpu_vm_pt     *page_tables;
@@ -1041,7 +1054,7 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
 int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
 
 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
-                             struct fence *fence, uint64_t queued_seq);
+                             struct fence *fence);
 struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
                                   struct amdgpu_ring *ring, uint64_t seq);
 
@@ -1076,8 +1089,6 @@ struct amdgpu_bo_list {
        struct amdgpu_bo_list_entry *array;
 };
 
-struct amdgpu_bo_list *
-amdgpu_bo_list_clone(struct amdgpu_bo_list *list);
 struct amdgpu_bo_list *
 amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
 void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
@@ -1209,6 +1220,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
 void amdgpu_ring_free_size(struct amdgpu_ring *ring);
 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
 int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
+void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
 void amdgpu_ring_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
 void amdgpu_ring_undo(struct amdgpu_ring *ring);
@@ -1254,15 +1266,19 @@ struct amdgpu_cs_parser {
 
        /* user fence */
        struct amdgpu_user_fence uf;
+};
 
-       struct amdgpu_ring *ring;
-       struct mutex job_lock;
-       struct work_struct job_work;
-       int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
-       int (*run_job)(struct amdgpu_cs_parser *sched_job);
-       int (*free_job)(struct amdgpu_cs_parser *sched_job);
-       struct amd_sched_fence *s_fence;
+struct amdgpu_job {
+       struct amd_sched_job    base;
+       struct amdgpu_device    *adev;
+       struct amdgpu_ib        *ibs;
+       uint32_t                num_ibs;
+       struct mutex            job_lock;
+       struct amdgpu_user_fence uf;
+       int (*free_job)(struct amdgpu_job *job);
 };
+#define to_amdgpu_job(sched_job)               \
+               container_of((sched_job), struct amdgpu_job, base)
 
 static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
 {
@@ -1658,7 +1674,6 @@ struct amdgpu_uvd {
        struct amdgpu_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
-       void                    *saved_bo;
        atomic_t                handles[AMDGPU_MAX_UVD_HANDLES];
        struct drm_file         *filp[AMDGPU_MAX_UVD_HANDLES];
        struct delayed_work     idle_work;
@@ -1702,6 +1717,7 @@ struct amdgpu_sdma {
        uint32_t                feature_version;
 
        struct amdgpu_ring      ring;
+       bool                    burst_nop;
 };
 
 /*
@@ -2050,7 +2066,7 @@ struct amdgpu_device {
        struct amdgpu_gfx               gfx;
 
        /* sdma */
-       struct amdgpu_sdma              sdma[2];
+       struct amdgpu_sdma              sdma[AMDGPU_MAX_SDMA_INSTANCES];
        struct amdgpu_irq_src           sdma_trap_irq;
        struct amdgpu_irq_src           sdma_illegal_inst_irq;
 
@@ -2189,6 +2205,21 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
        ring->ring_free_dw--;
 }
 
+static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
+{
+       struct amdgpu_device *adev = ring->adev;
+       int i;
+
+       for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++)
+               if (&adev->sdma[i].ring == ring)
+                       break;
+
+       if (i < AMDGPU_MAX_SDMA_INSTANCES)
+               return &adev->sdma[i];
+       else
+               return NULL;
+}
+
 /*
  * ASICs macro.
  */
@@ -2240,8 +2271,8 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
 #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
 #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
 #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
-#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
-#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b))
+#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
 #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
 #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
 #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
@@ -2342,7 +2373,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
                      struct amdgpu_sync *sync);
 void amdgpu_vm_flush(struct amdgpu_ring *ring,
                     struct amdgpu_vm *vm,
-                    struct amdgpu_fence *updates);
+                    struct fence *updates);
 void amdgpu_vm_fence(struct amdgpu_device *adev,
                     struct amdgpu_vm *vm,
                     struct amdgpu_fence *fence);
@@ -2372,7 +2403,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
                       uint64_t addr);
 void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
                      struct amdgpu_bo_va *bo_va);
-
+int amdgpu_vm_free_job(struct amdgpu_job *job);
 /*
  * functions used by amdgpu_encoder.c
  */
This page took 0.0461 seconds and 4 git commands to generate.