From: Daniel Vetter Date: Tue, 14 Dec 2021 08:43:25 +0000 (+0100) Subject: Merge v5.16-rc5 into drm-next X-Git-Tag: v5.17-rc1~173^2~13 X-Git-Url: https://repo.jachan.dev/linux.git/commitdiff_plain/99b03ca651f1 Merge v5.16-rc5 into drm-next Thomas Zimmermann requested a fixes backmerge, specifically also for 96c5f82ef0a1 ("drm/vc4: fix error code in vc4_create_object()") Just a bunch of adjacent changes conflicts, even the big pile of them in vc4. Signed-off-by: Daniel Vetter --- 99b03ca651f1c409d296d6c6e9440d9b005c722f diff --cc drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index c5362c23e687,6348559608ce..5df89a295177 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@@ -1579,17 -1504,18 +1579,19 @@@ int amdgpu_amdkfd_gpuvm_alloc_memory_of allocate_init_user_pages_failed: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); +err_pin_bo: drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: - drm_gem_object_put(gobj); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: - unreserve_mem_limit(adev, size, alloc_domain, !!sg); + unreserve_mem_limit(adev, size, flags); err_reserve_limit: mutex_destroy(&(*mem)->lock); - kfree(*mem); + if (gobj) + drm_gem_object_put(gobj); + else + kfree(*mem); err: if (sg) { sg_free_table(sg); diff --cc drivers/gpu/drm/i915/gt/intel_gtt.c index 0dd254cb1f69,b67f620c3d93..9fee968d57db --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@@ -6,9 -6,8 +6,10 @@@ #include /* fault-inject.h is not standalone! */ #include + #include +#include + #include "gem/i915_gem_lmem.h" #include "i915_trace.h" #include "intel_gt.h" diff --cc drivers/gpu/drm/vc4/vc4_kms.c index 79d4d9dd1394,b61792d2aa65..bf3706f97ec7 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@@ -340,21 -337,12 +340,21 @@@ static void vc4_atomic_commit_tail(stru struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; - struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; + struct vc4_hvs_state *new_hvs_state; struct drm_crtc *crtc; struct vc4_hvs_state *old_hvs_state; + unsigned int channel; int i; + old_hvs_state = vc4_hvs_get_old_global_state(state); - if (WARN_ON(!old_hvs_state)) ++ if (WARN_ON(IS_ERR(old_hvs_state))) + return; + + new_hvs_state = vc4_hvs_get_new_global_state(state); - if (WARN_ON(!new_hvs_state)) ++ if (WARN_ON(IS_ERR(new_hvs_state))) + return; + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { struct vc4_crtc_state *vc4_crtc_state; @@@ -365,31 -353,32 +365,33 @@@ vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); } - old_hvs_state = vc4_hvs_get_old_global_state(state); - if (IS_ERR(old_hvs_state)) - return; + if (vc4->hvs->hvs5) { + unsigned long core_rate = max_t(unsigned long, + 500000000, + new_hvs_state->core_clock_rate); + + clk_set_min_rate(hvs->core_clk, core_rate); + } - for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { - struct vc4_crtc_state *vc4_crtc_state = - to_vc4_crtc_state(old_crtc_state); - unsigned int channel = vc4_crtc_state->assigned_channel; + for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) { + struct drm_crtc_commit *commit; int ret; - if (channel == VC4_HVS_CHANNEL_DISABLED) + if (!old_hvs_state->fifo_state[channel].in_use) continue; - if (!old_hvs_state->fifo_state[channel].in_use) + commit = old_hvs_state->fifo_state[channel].pending_commit; + if (!commit) continue; - ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit); + ret = drm_crtc_commit_wait(commit); if (ret) drm_err(dev, "Timed out waiting for commit\n"); + + drm_crtc_commit_put(commit); + old_hvs_state->fifo_state[channel].pending_commit = NULL; } - if (vc4->hvs->hvs5) - clk_set_min_rate(hvs->core_clk, 500000000); - drm_atomic_helper_commit_modeset_disables(dev, state); vc4_ctm_commit(vc4, state); @@@ -673,19 -667,11 +675,13 @@@ vc4_hvs_channels_duplicate_state(struc __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); - for (i = 0; i < HVS_NUM_CHANNELS; i++) { state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; + state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load; - - if (!old_state->fifo_state[i].pending_commit) - continue; - - state->fifo_state[i].pending_commit = - drm_crtc_commit_get(old_state->fifo_state[i].pending_commit); } + state->core_clock_rate = old_state->core_clock_rate; + return &state->base; } @@@ -840,76 -826,6 +836,76 @@@ static int vc4_pv_muxing_atomic_check(s return 0; } +static int +vc4_core_clock_atomic_check(struct drm_atomic_state *state) +{ + struct vc4_dev *vc4 = to_vc4_dev(state->dev); + struct drm_private_state *priv_state; + struct vc4_hvs_state *hvs_new_state; + struct vc4_load_tracker_state *load_state; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc *crtc; + unsigned int num_outputs; + unsigned long pixel_rate; + unsigned long cob_rate; + unsigned int i; + + priv_state = drm_atomic_get_private_obj_state(state, + &vc4->load_tracker); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + + load_state = to_vc4_load_tracker_state(priv_state); + + hvs_new_state = vc4_hvs_get_global_state(state); - if (!hvs_new_state) - return -EINVAL; ++ if (IS_ERR(hvs_new_state)) ++ return PTR_ERR(hvs_new_state); + + for_each_oldnew_crtc_in_state(state, crtc, + old_crtc_state, + new_crtc_state, + i) { + if (old_crtc_state->active) { + struct vc4_crtc_state *old_vc4_state = + to_vc4_crtc_state(old_crtc_state); + unsigned int channel = old_vc4_state->assigned_channel; + + hvs_new_state->fifo_state[channel].fifo_load = 0; + } + + if (new_crtc_state->active) { + struct vc4_crtc_state *new_vc4_state = + to_vc4_crtc_state(new_crtc_state); + unsigned int channel = new_vc4_state->assigned_channel; + + hvs_new_state->fifo_state[channel].fifo_load = + new_vc4_state->hvs_load; + } + } + + cob_rate = 0; + num_outputs = 0; + for (i = 0; i < HVS_NUM_CHANNELS; i++) { + if (!hvs_new_state->fifo_state[i].in_use) + continue; + + num_outputs++; + cob_rate += hvs_new_state->fifo_state[i].fifo_load; + } + + pixel_rate = load_state->hvs_load; + if (num_outputs > 1) { + pixel_rate = (pixel_rate * 40) / 100; + } else { + pixel_rate = (pixel_rate * 60) / 100; + } + + hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate); + + return 0; +} + + static int vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) {