hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ?
AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
- r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type, hw_prio);
+ r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
+ hw_prio, NULL);
if (r)
return r;
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
- AMDGPU_RING_PRIO_DEFAULT);
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
if (r)
return r;
}
return 0;
}
-static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
+static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
+ bool from_wq)
{
u32 enc, se_id, sh_id, cu_id;
char type[20];
* or from BH in which case we can access SQ_EDC_INFO
* instance
*/
- if (in_task()) {
+ if (from_wq) {
mutex_lock(&adev->grbm_idx_mutex);
gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
struct sq_work *sq_work = container_of(work, struct sq_work, work);
- gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
+ gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true);
}
static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
* just print whatever info is possible directly from the ISR.
*/
if (work_pending(&adev->gfx.sq_work.work)) {
- gfx_v8_0_parse_sq_irq(adev, ih_data);
+ gfx_v8_0_parse_sq_irq(adev, ih_data, false);
} else {
adev->gfx.sq_work.ih_data = ih_data;
schedule_work(&adev->gfx.sq_work.work);