#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_framebuffer.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/i2c.h>
#define AMDGPU_MAX_BL_LEVEL 0xFF
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
struct amdgpu_backlight_privdata {
struct amdgpu_encoder *encoder;
uint8_t negative;
};
- #endif
-
struct amdgpu_atom_ss {
uint16_t percentage;
uint16_t percentage_divider;
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_gem_atomic_helper.h>
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
#include "dcn/dcn_1_0_sh_mask.h"
#include "soc15_hw_ip.h"
+ #include "soc15_common.h"
#include "vega10_ip_offset.h"
#include "soc15_common.h"
+ #include "gc/gc_11_0_0_offset.h"
+ #include "gc/gc_11_0_0_sh_mask.h"
+
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
#include "modules/inc/mod_info_packet.h"
#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
+ #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
+ MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
+ #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
+ MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
+
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
- /* Control PSR based on vblank requirements from OS */
+ /*
+ * Control PSR based on vblank requirements from OS
+ *
+ * If panel supports PSR SU, there's no need to disable PSR when OS is
+ * submitting fast atomic commits (we infer this by whether the OS
+ * requests vblank events). Fast atomic commits will simply trigger a
+ * full-frame-update (FFU); a specific case of selective-update (SU)
+ * where the SU region is the full hactive*vactive region. See
+ * fill_dc_dirty_rects().
+ */
if (vblank_work->stream && vblank_work->stream->link) {
if (vblank_work->enable) {
- if (vblank_work->stream->link->psr_settings.psr_allow_active)
+ if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 &&
+ vblank_work->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(vblank_work->stream);
} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
!vblank_work->stream->link->psr_settings.psr_allow_active &&
DRM_INFO("Seamless boot condition check passed\n");
}
+ init_data.flags.enable_mipi_converter_optimization = true;
+
INIT_LIST_HEAD(&adev->dm.da_list);
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
return 0;
default:
break;
dmub_asic = DMUB_ASIC_DCN316;
fw_name_dmub = FIRMWARE_DCN316_DMUB;
break;
+ case IP_VERSION(3, 2, 0):
+ dmub_asic = DMUB_ASIC_DCN32;
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ dmub_asic = DMUB_ASIC_DCN321;
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
default:
/* ASIC doesn't support DMUB. */
return 0;
} else {
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
- drm_dp_mst_topology_mgr_set_mst(mgr, false);
+ dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
need_hotplug = true;
}
}
return;
}
- static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
- {
- struct dc_stream_state *stream_state;
- struct amdgpu_dm_connector *aconnector = link->priv;
- struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
- struct dc_stream_update stream_update;
- bool dpms_off = true;
-
- memset(&stream_update, 0, sizeof(stream_update));
- stream_update.dpms_off = &dpms_off;
-
- mutex_lock(&adev->dm.dc_lock);
- stream_state = dc_stream_find_from_link(link);
-
- if (stream_state == NULL) {
- DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
- mutex_unlock(&adev->dm.dc_lock);
- return;
- }
-
- stream_update.stream = stream_state;
- acrtc_state->force_dpms_off = true;
- dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
- stream_state, &stream_update,
- stream_state->ctx->dc->current_state);
- mutex_unlock(&adev->dm.dc_lock);
- }
-
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
{
- u32 max_cll, min_cll, max, min, q, r;
+ u32 max_avg, min_cll, max, min, q, r;
struct amdgpu_dm_backlight_caps *caps;
struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
caps = &dm->backlight_caps[i];
caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
caps->aux_support = false;
- max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+ max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
if (caps->ext_caps->bits.oled == 1 /*||
* The results of the above expressions can be verified at
* pre_computed_values.
*/
- q = max_cll >> 5;
- r = max_cll % 32;
+ q = max_avg >> 5;
+ r = max_avg % 32;
max = (1 << q) * pre_computed_values[r];
// min luminance: maxLum * (CV/255)^2 / 100
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
+ #ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
- struct dm_crtc_state *dm_crtc_state = NULL;
+ #endif
if (adev->dm.disable_hpd_irq)
return;
- if (dm_con_state->base.state && dm_con_state->base.crtc)
- dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
- dm_con_state->base.state,
- dm_con_state->base.crtc));
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
drm_kms_helper_connector_hotplug_event(connector);
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
- if (new_connection_type == dc_connection_none &&
- aconnector->dc_link->type == dc_connection_none &&
- dm_crtc_state)
- dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
-
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
int bl_idx)
{
else
DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
}
- #endif
static int initialize_plane(struct amdgpu_display_manager *dm,
struct amdgpu_mode_info *mode_info, int plane_id,
static void register_backlight_device(struct amdgpu_display_manager *dm,
struct dc_link *link)
{
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
link->type != dc_connection_none) {
/*
dm->num_of_edps++;
}
}
- #endif
}
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
case IP_VERSION(2, 1, 0):
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
psr_feature_enabled = true;
break;
default:
}
}
- /* Disable vblank IRQs aggressively for power-saving. */
- adev_to_drm(adev)->vblank_disable_immediate = true;
-
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
case IP_VERSION(3, 1, 3):
case IP_VERSION(3, 1, 5):
case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
- unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
+ unsigned int pipes_log2;
+
+ pipes_log2 = min(5u, mod_pipe_xor_bits);
fill_gfx9_tiling_info_from_device(adev, tiling_info);
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
}
+ static void
+ add_gfx11_modifiers(struct amdgpu_device *adev,
+ uint64_t **mods, uint64_t *size, uint64_t *capacity)
+ {
+ int num_pipes = 0;
+ int pipe_xor_bits = 0;
+ int num_pkrs = 0;
+ int pkrs = 0;
+ u32 gb_addr_config;
+ u8 i = 0;
+ unsigned swizzle_r_x;
+ uint64_t modifier_r_x;
+ uint64_t modifier_dcc_best;
+ uint64_t modifier_dcc_4k;
+
+ /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from
+ * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} */
+ gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG);
+ ASSERT(gb_addr_config != 0);
+
+ num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
+ pkrs = ilog2(num_pkrs);
+ num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES);
+ pipe_xor_bits = ilog2(num_pipes);
+
+ for (i = 0; i < 2; i++) {
+ /* Insert the best one first. */
+ /* R_X swizzle modes are the best for rendering and DCC requires them. */
+ if (num_pipes > 16)
+ swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
+ else
+ swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
+
+ modifier_r_x = AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs);
+
+ /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
+ modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
+
+ /* DCC settings for 4K and greater resolutions. (required by display hw) */
+ modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
+
+ add_modifier(mods, size, capacity, modifier_dcc_best);
+ add_modifier(mods, size, capacity, modifier_dcc_4k);
+
+ add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1));
+ add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1));
+
+ add_modifier(mods, size, capacity, modifier_r_x);
+ }
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D));
+ }
+
static int
- get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
+ get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
{
uint64_t size = 0, capacity = 128;
*mods = NULL;
else
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
break;
+ case AMDGPU_FAMILY_GC_11_0_0:
+ add_gfx11_modifiers(adev, mods, &size, &capacity);
+ break;
}
add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
dcc->enable = 1;
dcc->meta_pitch = afb->base.pitches[1];
dcc->independent_64b_blks = independent_64b_blks;
- if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
if (independent_64b_blks && independent_128b_blks)
dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
else if (independent_128b_blks)
return 0;
}
+ /**
+ * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
+ *
+ * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
+ * remote fb
+ * @old_plane_state: Old state of @plane
+ * @new_plane_state: New state of @plane
+ * @crtc_state: New state of CRTC connected to the @plane
+ * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ *
+ * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
+ * (referred to as "damage clips" in DRM nomenclature) that require updating on
+ * the eDP remote buffer. The responsibility of specifying the dirty regions is
+ * amdgpu_dm's.
+ *
+ * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
+ * plane with regions that require flushing to the eDP remote buffer. In
+ * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
+ * implicitly provide damage clips without any client support via the plane
+ * bounds.
+ *
+ * Today, amdgpu_dm only supports the MPO and cursor usecase.
+ *
+ * TODO: Also enable for FB_DAMAGE_CLIPS
+ */
+ static void fill_dc_dirty_rects(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ struct drm_crtc_state *crtc_state,
+ struct dc_flip_addrs *flip_addrs)
+ {
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct rect *dirty_rects = flip_addrs->dirty_rects;
+ uint32_t num_clips;
+ bool bb_changed;
+ bool fb_changed;
+ uint32_t i = 0;
+
+ flip_addrs->dirty_rect_count = 0;
+
+ /*
+ * Cursor plane has it's own dirty rect update interface. See
+ * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return;
+
+ /*
+ * Today, we only consider MPO use-case for PSR SU. If MPO not
+ * requested, and there is a plane update, do FFU.
+ */
+ if (!dm_crtc_state->mpo_requested) {
+ dirty_rects[0].x = 0;
+ dirty_rects[0].y = 0;
+ dirty_rects[0].width = dm_crtc_state->base.mode.crtc_hdisplay;
+ dirty_rects[0].height = dm_crtc_state->base.mode.crtc_vdisplay;
+ flip_addrs->dirty_rect_count = 1;
+ DRM_DEBUG_DRIVER("[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
+ new_plane_state->plane->base.id,
+ dm_crtc_state->base.mode.crtc_hdisplay,
+ dm_crtc_state->base.mode.crtc_vdisplay);
+ return;
+ }
+
+ /*
+ * MPO is requested. Add entire plane bounding box to dirty rects if
+ * flipped to or damaged.
+ *
+ * If plane is moved or resized, also add old bounding box to dirty
+ * rects.
+ */
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ fb_changed = old_plane_state->fb->base.id !=
+ new_plane_state->fb->base.id;
+ bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
+ old_plane_state->crtc_y != new_plane_state->crtc_y ||
+ old_plane_state->crtc_w != new_plane_state->crtc_w ||
+ old_plane_state->crtc_h != new_plane_state->crtc_h);
+
+ DRM_DEBUG_DRIVER("[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
+ new_plane_state->plane->base.id,
+ bb_changed, fb_changed, num_clips);
+
+ if (num_clips || fb_changed || bb_changed) {
+ dirty_rects[i].x = new_plane_state->crtc_x;
+ dirty_rects[i].y = new_plane_state->crtc_y;
+ dirty_rects[i].width = new_plane_state->crtc_w;
+ dirty_rects[i].height = new_plane_state->crtc_h;
+ DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
+ new_plane_state->plane->base.id,
+ dirty_rects[i].x, dirty_rects[i].y,
+ dirty_rects[i].width, dirty_rects[i].height);
+ i += 1;
+ }
+
+ /* Add old plane bounding-box if plane is moved or resized */
+ if (bb_changed) {
+ dirty_rects[i].x = old_plane_state->crtc_x;
+ dirty_rects[i].y = old_plane_state->crtc_y;
+ dirty_rects[i].width = old_plane_state->crtc_w;
+ dirty_rects[i].height = old_plane_state->crtc_h;
+ DRM_DEBUG_DRIVER("[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)\n",
+ old_plane_state->plane->base.id,
+ dirty_rects[i].x, dirty_rects[i].y,
+ dirty_rects[i].width, dirty_rects[i].height);
+ i += 1;
+ }
+
+ flip_addrs->dirty_rect_count = i;
+ }
+
static void update_stream_scaling_settings(const struct drm_display_mode *mode,
const struct dm_connector_state *dm_state,
struct dc_stream_state *stream)
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
- state->force_dpms_off = cur->force_dpms_off;
+ state->mpo_requested = cur->mpo_requested;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
}
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+#ifdef CONFIG_DEBUG_FS
static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
{
crtc_debugfs_init(crtc);
dm_set_vblank(crtc, false);
}
- /* Implemented only the options currently availible for the driver */
+ /* Implemented only the options currently available for the driver */
static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.reset = dm_crtc_reset_state,
.destroy = amdgpu_dm_crtc_destroy,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+#if defined(CONFIG_DEBUG_FS)
.late_register = amdgpu_dm_crtc_late_register,
#endif
};
if (aconnector->mst_mgr.dev)
drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
for (i = 0; i < dm->num_of_edps; i++) {
if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
backlight_device_unregister(dm->backlight_dev[i]);
dm->backlight_dev[i] = NULL;
}
}
- #endif
if (aconnector->dc_em_sink)
dc_sink_release(aconnector->dc_em_sink);
break;
}
- dc_result = dc_validate_stream(adev->dm.dc, stream);
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
+
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
if (dc_result != DC_OK) {
DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
}
- static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
+ int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
{
switch (display_color_depth) {
case COLOR_DEPTH_666:
goto error_unpin;
}
+ r = drm_gem_plane_helper_prepare_fb(plane, new_state);
+ if (unlikely(r != 0))
+ goto error_unpin;
+
amdgpu_bo_unreserve(rbo);
afb->address = amdgpu_bo_gpu_offset(rbo);
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
int planes_count = 0, vpos, hpos;
- long r;
unsigned long flags;
- struct amdgpu_bo *abo;
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
continue;
}
- abo = gem_to_amdgpu_bo(fb->obj[0]);
-
- /*
- * Wait for all fences on this FB. Do limited wait to avoid
- * deadlock during GPU reset when this fence will not signal
- * but we hold reservation lock for the BO.
- */
- r = dma_resv_wait_timeout(abo->tbo.base.resv,
- DMA_RESV_USAGE_WRITE, false,
- msecs_to_jiffies(5000));
- if (unlikely(r <= 0))
- DRM_ERROR("Waiting for fences timed out!");
-
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state,
afb->tiling_flags,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
+ fill_dc_dirty_rects(plane, old_plane_state, new_plane_state,
+ new_crtc_state,
+ &bundle->flip_addrs[planes_count]);
+
/*
* Only allow immediate flips for fast updates that don't
* change FB pitch, DCC state, rotation or mirroing.
* and rely on sending it from software.
*/
if (acrtc_attach->base.state->event &&
- acrtc_state->active_planes > 0 &&
- !acrtc_state->force_dpms_off) {
+ acrtc_state->active_planes > 0) {
drm_crtc_vblank_get(pcrtc);
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
/* Allow PSR when skip count is 0. */
acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+
+ /*
+ * If sink supports PSR SU, there is no need to rely on
+ * a vblank event disable request to enable PSR. PSR SU
+ * can be enabled immediately once OS demonstrates an
+ * adequate number of fast atomic commits to notify KMD
+ * of update events. See `vblank_control_worker()`.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+ !acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_enable(acrtc_state->stream);
} else {
acrtc_attach->dm_irq_params.allow_psr_entry = false;
}
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
int crtc_disable_count = 0;
bool mode_set_reset_required = false;
+ int r;
trace_amdgpu_dm_atomic_commit_tail_begin(state);
+ r = drm_atomic_helper_wait_for_fences(dev, state, false);
+ if (unlikely(r))
+ DRM_ERROR("Waiting for fences timed out!");
+
drm_atomic_helper_update_legacy_modeset_state(dev, state);
dm_state = dm_atomic_get_new_state(state);
/* Update audio instances for each connector. */
amdgpu_dm_commit_audio(dev, state);
- #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
- defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
/* restore the backlight level */
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i] &&
(dm->actual_brightness[i] != dm->brightness[i]))
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
- #endif
+
/*
* send vblank event on all events not handled in flip and
* mark consumed event for drm_atomic_helper_commit_hw_done
* added MST connectors not found in existing crtc_state in the chained mode
* TODO: need to dig out the root cause of that
*/
- if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
+ if (!aconnector)
goto skip_modeset;
if (modereset_required(new_crtc_state))
}
}
}
- pre_validate_dsc(state, &dm_state, vars);
+ if (!pre_validate_dsc(state, &dm_state, vars)) {
+ ret = -EINVAL;
+ goto fail;
+ }
}
#endif
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) {
DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
goto fail;
}
/* apply phy settings from user */
for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) {
- link_lane_settings.lane_settings[r].VOLTAGE_SWING =
+ link_lane_settings.hw_lane_settings[r].VOLTAGE_SWING =
(enum dc_voltage_swing) (param[0]);
- link_lane_settings.lane_settings[r].PRE_EMPHASIS =
+ link_lane_settings.hw_lane_settings[r].PRE_EMPHASIS =
(enum dc_pre_emphasis) (param[1]);
- link_lane_settings.lane_settings[r].POST_CURSOR2 =
+ link_lane_settings.hw_lane_settings[r].POST_CURSOR2 =
(enum dc_post_cursor2) (param[2]);
}
}
for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
- link_training_settings.lane_settings[i] = link->cur_lane_setting[i];
+ link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i];
dc_link_set_test_pattern(
link,
}
/*
- * Returns the current and maximum output bpc for the connector.
- * Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
+ * Returns the current bpc for the crtc.
+ * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_bpc
*/
-static int output_bpc_show(struct seq_file *m, void *data)
+static int amdgpu_current_bpc_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
- struct drm_crtc *crtc = NULL;
+ struct drm_crtc *crtc = m->private;
+ struct drm_device *dev = crtc->dev;
struct dm_crtc_state *dm_crtc_state = NULL;
int res = -ENODEV;
unsigned int bpc;
mutex_lock(&dev->mode_config.mutex);
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- if (connector->state == NULL)
- goto unlock;
-
- crtc = connector->state->crtc;
- if (crtc == NULL)
- goto unlock;
-
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->state == NULL)
goto unlock;
}
seq_printf(m, "Current: %u\n", bpc);
- seq_printf(m, "Maximum: %u\n", connector->display_info.bpc);
res = 0;
unlock:
- if (crtc)
- drm_modeset_unlock(&crtc->mutex);
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&crtc->mutex);
mutex_unlock(&dev->mode_config.mutex);
return res;
}
+DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc);
/*
* Example usage:
DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support);
DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
-DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status);
#ifdef CONFIG_DRM_AMD_DC_HDCP
DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
const struct file_operations *fops;
} connector_debugfs_entries[] = {
{"force_yuv420_output", &force_yuv420_output_fops},
- {"output_bpc", &output_bpc_fops},
{"trigger_hotplug", &trigger_hotplug_debugfs_fops},
{"internal_display", &internal_display_fops}
};
DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get,
crc_win_update_set, "%llu\n");
-
+#endif
void crtc_debugfs_init(struct drm_crtc *crtc)
{
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry);
if (!dir)
&crc_win_y_end_fops);
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
&crc_win_update_fops);
-
-}
#endif
+ debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
+ crtc, &amdgpu_current_bpc_fops);
+}
+
/*
* Writes DTN log state to the user supplied buffer.
* Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log
return link_rate;
}
- static void vendor_specific_lttpr_wa_one_start(struct dc_link *link)
- {
- const uint8_t vendor_lttpr_write_data[4] = {0x1, 0x50, 0x63, 0xff};
- const uint8_t offset = dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- uint32_t vendor_lttpr_write_address = 0xF004F;
-
- if (offset != 0xFF)
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- /* W/A for certain LTTPR to reset their lane settings, part one of two */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data[0],
- sizeof(vendor_lttpr_write_data));
- }
-
- static void vendor_specific_lttpr_wa_one_two(
- struct dc_link *link,
- const uint8_t rate)
- {
- if (link->apply_vendor_specific_lttpr_link_rate_wa) {
- uint8_t toggle_rate = 0x0;
-
- if (rate == 0x6)
- toggle_rate = 0xA;
- else
- toggle_rate = 0x6;
-
- if (link->vendor_specific_lttpr_link_rate_wa == rate) {
- /* W/A for certain LTTPR to reset internal state for link training */
- core_link_write_dpcd(
- link,
- DP_LINK_BW_SET,
- &toggle_rate,
- 1);
- }
-
- /* Store the last attempted link rate for this link */
- link->vendor_specific_lttpr_link_rate_wa = rate;
- }
- }
-
static void dp_fixed_vs_pe_read_lane_adjust(
struct dc_link *link,
union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX])
}
}
- static void vendor_specific_lttpr_wa_four(
- struct dc_link *link,
- bool apply_wa)
- {
- const uint8_t vendor_lttpr_write_data_one[4] = {0x1, 0x55, 0x63, 0x8};
- const uint8_t vendor_lttpr_write_data_two[4] = {0x1, 0x55, 0x63, 0x0};
- const uint8_t offset = dp_convert_to_count(
- link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
- uint32_t vendor_lttpr_write_address = 0xF004F;
- uint8_t sink_status = 0;
- uint8_t i;
-
- if (offset != 0xFF)
- vendor_lttpr_write_address +=
- ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
-
- /* W/A to pass through DPCD write of TPS=0 to DPRX */
- if (apply_wa) {
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_one[0],
- sizeof(vendor_lttpr_write_data_one));
- }
-
- /* clear training pattern set */
- dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
-
- if (apply_wa) {
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_two[0],
- sizeof(vendor_lttpr_write_data_two));
- }
-
- /* poll for intra-hop disable */
- for (i = 0; i < 10; i++) {
- if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
- (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
- break;
- udelay(1000);
- }
- }
-
static void dp_fixed_vs_pe_set_retimer_lane_settings(
struct dc_link *link,
const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX],
<_settings->link_settings.link_rate_set, 1);
} else {
rate = get_dpcd_link_rate(<_settings->link_settings);
- if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
- (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
- vendor_specific_lttpr_wa_one_start(link);
-
- if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
- (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN))
- vendor_specific_lttpr_wa_one_two(link, rate);
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
uint32_t dpcd_base_lt_offset;
uint8_t dpcd_lt_buffer[5] = {0};
- union dpcd_training_pattern dpcd_pattern = { 0 };
+ union dpcd_training_pattern dpcd_pattern = {0};
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
void dp_hw_to_dpcd_lane_settings(
const struct link_training_settings *lt_settings,
const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+ union dpcd_training_lane dpcd_lane_settings[])
{
uint8_t lane = 0;
const struct link_training_settings *lt_settings,
const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
- union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+ union dpcd_training_lane dpcd_lane_settings[])
{
uint32_t lane;
return;
- for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) {
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
if (lt_settings->voltage_swing)
lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
if (lt_settings->pre_emphasis)
offset,
lane01_status_address, dpcd_buf[0],
lane01_status_address + 1, dpcd_buf[1]);
- } else {
- DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
- __func__,
- lane01_status_address, dpcd_buf[0],
- lane01_status_address + 1, dpcd_buf[1]);
- }
- lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
- if (is_repeater(link, offset))
lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- if (is_repeater(link, offset)) {
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
" 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
__func__,
lane01_adjust_address + 1,
dpcd_buf[lane_adjust_offset + 1]);
} else {
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ",
+ __func__,
+ lane01_status_address, dpcd_buf[0],
+ lane01_status_address + 1, dpcd_buf[1]);
+
+ lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1;
+
DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n",
__func__,
lane01_adjust_address,
dp_translate_training_aux_read_interval(
link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
- if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
- (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- wait_time_microsec = 16000;
- }
-
dp_wait_for_training_aux_rd_interval(
link,
wait_time_microsec);
/* 5. check CR done*/
if (!dp_is_cr_done(lane_count, dpcd_lane_status))
- return LINK_TRAINING_EQ_FAIL_CR;
+ return dpcd_lane_status[0].bits.CR_DONE_0 ?
+ LINK_TRAINING_EQ_FAIL_CR_PARTIAL :
+ LINK_TRAINING_EQ_FAIL_CR;
/* 6. check CHEQ done*/
if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
retries_cr = 0;
retry_count = 0;
/* 3. wait receiver to lock-on*/
wait_time_microsec = lt_settings->cr_pattern_time;
- if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
- (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) {
- wait_time_microsec = 16000;
- }
-
dp_wait_for_training_aux_rd_interval(
link,
wait_time_microsec);
lt_settings->always_match_dpcd_with_hw_lane_settings = false;
}
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
- lt_settings->lane_settings[lane].VOLTAGE_SWING =
+ lt_settings->hw_lane_settings[lane].VOLTAGE_SWING =
lt_settings->voltage_swing != NULL ?
*lt_settings->voltage_swing :
VOLTAGE_SWING_LEVEL0;
- lt_settings->lane_settings[lane].PRE_EMPHASIS =
+ lt_settings->hw_lane_settings[lane].PRE_EMPHASIS =
lt_settings->pre_emphasis != NULL ?
*lt_settings->pre_emphasis
: PRE_EMPHASIS_DISABLED;
- lt_settings->lane_settings[lane].POST_CURSOR2 =
+ lt_settings->hw_lane_settings[lane].POST_CURSOR2 =
lt_settings->post_cursor2 != NULL ?
*lt_settings->post_cursor2
: POST_CURSOR2_DISABLED;
}
- dp_hw_to_dpcd_lane_settings(lt_settings,
- lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ if (lt_settings->always_match_dpcd_with_hw_lane_settings)
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
/* Initialize training timings */
if (overrides->cr_pattern_time != NULL)
case LINK_TRAINING_EQ_FAIL_CR:
lt_result = "CR failed in EQ";
break;
+ case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
+ lt_result = "CR failed in EQ partially";
+ break;
case LINK_TRAINING_EQ_FAIL_EQ:
lt_result = "EQ failed";
break;
link_rate,
lt_settings->link_settings.lane_count,
lt_result,
- lt_settings->lane_settings[0].VOLTAGE_SWING,
- lt_settings->lane_settings[0].PRE_EMPHASIS,
+ lt_settings->hw_lane_settings[0].VOLTAGE_SWING,
+ lt_settings->hw_lane_settings[0].PRE_EMPHASIS,
lt_spread);
}
uint32_t wait_time = 0;
union lane_align_status_updated dpcd_lane_status_updated = {0};
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- enum link_training_result status = LINK_TRAINING_SUCCESS;
+ enum dc_status status = DC_OK;
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
/* Transmit 128b/132b_TPS1 over Main-Link */
lt_settings->pattern_for_eq, DPRX);
/* poll for channel EQ done */
- while (status == LINK_TRAINING_SUCCESS) {
+ while (result == LINK_TRAINING_SUCCESS) {
dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
wait_time += aux_rd_interval;
- dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
- if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
dpcd_lane_status)) {
/* pass */
break;
} else if (loop_count >= lt_settings->eq_loop_count_limit) {
- status = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
+ result = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
- status = DP_128b_132b_LT_FAILED;
+ result = DP_128b_132b_LT_FAILED;
} else {
dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX);
dpcd_set_lane_settings(link, lt_settings, DPRX);
}
/* poll for EQ interlane align done */
- while (status == LINK_TRAINING_SUCCESS) {
- if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
+ while (result == LINK_TRAINING_SUCCESS) {
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
/* pass */
break;
} else if (wait_time >= lt_settings->eq_wait_time_limit) {
- status = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
+ result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
- status = DP_128b_132b_LT_FAILED;
+ result = DP_128b_132b_LT_FAILED;
} else {
dp_wait_for_training_aux_rd_interval(link,
lt_settings->eq_pattern_time);
wait_time += lt_settings->eq_pattern_time;
- dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
}
}
- return status;
+ return result;
}
static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
struct link_training_settings *lt_settings)
{
/* Assumption: assume hardware has transmitted eq pattern */
- enum link_training_result status = LINK_TRAINING_SUCCESS;
+ enum dc_status status = DC_OK;
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
union lane_align_status_updated dpcd_lane_status_updated = {0};
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
uint32_t wait_time = 0;
/* initiate CDS done sequence */
dpcd_set_training_pattern(link, lt_settings->pattern_for_cds);
/* poll for CDS interlane align done and symbol lock */
- while (status == LINK_TRAINING_SUCCESS) {
+ while (result == LINK_TRAINING_SUCCESS) {
dp_wait_for_training_aux_rd_interval(link,
lt_settings->cds_pattern_time);
wait_time += lt_settings->cds_pattern_time;
- dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
&dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
- if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
/* pass */
break;
} else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
- status = DP_128b_132b_LT_FAILED;
+ result = DP_128b_132b_LT_FAILED;
} else if (wait_time >= lt_settings->cds_wait_time_limit) {
- status = DP_128b_132b_CDS_DONE_TIMEOUT;
+ result = DP_128b_132b_CDS_DONE_TIMEOUT;
}
}
- return status;
+ return result;
}
static enum link_training_result dp_perform_8b_10b_link_training(
link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0};
const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68};
+ uint32_t pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa;
uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0};
uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0};
uint32_t vendor_lttpr_write_address = 0xF004F;
if (offset != 0xFF) {
vendor_lttpr_write_address +=
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
+
+ /* Certain display and cable configuration require extra delay */
+ if (offset > 2)
+ pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2;
}
/* Vendor specific: Reset lane settings */
/* Perform Clock Recovery Sequence */
if (status == LINK_TRAINING_SUCCESS) {
+ const uint8_t max_vendor_dpcd_retries = 10;
uint32_t retries_cr;
uint32_t retry_count;
uint32_t wait_time_microsec;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+ enum dc_status dpcd_status = DC_OK;
+ uint8_t i = 0;
retries_cr = 0;
retry_count = 0;
lt_settings->pattern_for_cr,
0);
/* Vendor specific: Disable intercept */
- core_link_write_dpcd(
- link,
- vendor_lttpr_write_address,
- &vendor_lttpr_write_data_intercept_dis[0],
- sizeof(vendor_lttpr_write_data_intercept_dis));
+ for (i = 0; i < max_vendor_dpcd_retries; i++) {
+ msleep(pre_disable_intercept_delay_ms);
+ dpcd_status = core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_dis[0],
+ sizeof(vendor_lttpr_write_data_intercept_dis));
+
+ if (dpcd_status == DC_OK)
+ break;
+
+ core_link_write_dpcd(
+ link,
+ vendor_lttpr_write_address,
+ &vendor_lttpr_write_data_intercept_en[0],
+ sizeof(vendor_lttpr_write_data_intercept_en));
+ }
} else {
vendor_lttpr_write_data_vs[3] = 0;
vendor_lttpr_write_data_pe[3] = 0;
<_settings);
/* reset previous training states */
- if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
- (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- link->apply_vendor_specific_lttpr_link_rate_wa = true;
- vendor_specific_lttpr_wa_four(link, true);
- } else {
- dpcd_exit_training_mode(link);
- }
+ dpcd_exit_training_mode(link);
/* configure link prior to entering training mode */
dpcd_configure_lttpr_mode(link, <_settings);
ASSERT(0);
/* exit training mode */
- if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
- (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
- link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
- link->apply_vendor_specific_lttpr_link_rate_wa = false;
- vendor_specific_lttpr_wa_four(link, (status != LINK_TRAINING_SUCCESS));
- } else {
- dpcd_exit_training_mode(link);
- }
+ dpcd_exit_training_mode(link);
/* switch to video idle */
if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
j = 0;
while (j < attempts && fail_count < (attempts * 10)) {
- DC_LOG_HW_LINK_TRAINING("%s: Beginning link training attempt %u of %d @ rate(%d) x lane(%d)\n",
- __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d)\n",
+ __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
cur_link_settings.lane_count);
dp_enable_link_phy(
fail_count++;
dp_trace_lt_fail_count_update(link, fail_count, false);
- /* latest link training still fail, skip delay and keep PHY on
- */
- if (j == (attempts - 1) && link->ep_type == DISPLAY_ENDPOINT_PHY)
- break;
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY) {
+ /* latest link training still fail or link training is aborted
+ * skip delay and keep PHY on
+ */
+ if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT))
+ break;
+ }
- DC_LOG_WARNING("%s: Link training attempt %u of %d failed @ rate(%d) x lane(%d)\n",
- __func__, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
- cur_link_settings.lane_count);
+ DC_LOG_WARNING("%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) : fail reason:(%d)\n",
+ __func__, link->link_index, (unsigned int)j + 1, attempts, cur_link_settings.link_rate,
+ cur_link_settings.lane_count, status);
dp_disable_link_phy(link, &pipe_ctx->link_res, signal);
*/
req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings);
- if (req_bw > link_bw)
- break;
+ is_link_bw_low = (req_bw > link_bw);
+ is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) &&
+ (cur_link_settings.lane_count <= LANE_COUNT_ONE));
+ if (is_link_bw_low)
+ DC_LOG_WARNING(
+ "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n",
+ __func__, link->link_index, req_bw, link_bw);
}
msleep(delay_between_attempts);
struct dc_link_settings *cur,
enum link_training_result training_result)
{
- if (!cur)
- return false;
- if (!max)
- return false;
-
if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING ||
link->dc->debug.force_dp2_lt_fallback_method)
return decide_fallback_link_setting_max_bw_policy(link, max, cur,
break;
}
case LINK_TRAINING_EQ_FAIL_EQ:
+ case LINK_TRAINING_EQ_FAIL_CR_PARTIAL:
{
if (!reached_minimum_lane_count(cur->lane_count)) {
cur->lane_count = reduce_lane_count(cur->lane_count);
&dpcd_lane_adjustment[0].raw,
sizeof(dpcd_lane_adjustment));
- if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
- (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
dp_fixed_vs_pe_read_lane_adjust(
link,
return true;
}
- bool dp_retrieve_lttpr_cap(struct dc_link *link)
+ /* Logic to determine LTTPR mode */
+ static void determine_lttpr_mode(struct dc_link *link)
{
- uint8_t lttpr_dpcd_data[8];
bool allow_lttpr_non_transparent_mode = 0;
bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
- enum dc_status status = DC_ERROR_UNEXPECTED;
- bool is_lttpr_present = false;
- memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
allow_lttpr_non_transparent_mode = 1;
}
- /*
- * Logic to determine LTTPR mode
- */
link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
if (vbios_lttpr_enable && vbios_lttpr_interop)
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
link->dc->debug.dpia_debug.bits.force_non_lttpr)
link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
#endif
+ }
+
+ bool dp_retrieve_lttpr_cap(struct dc_link *link)
+ {
+ uint8_t lttpr_dpcd_data[8];
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+ bool is_lttpr_present = false;
+
+ memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
+
+ /* Logic to determine LTTPR mode*/
+ determine_lttpr_mode(link);
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ !link->dc->debug.disable_fixed_vs_aux_timeout_wa) {
+ /* Fixed VS workaround for AUX timeout */
+ const uint32_t fixed_vs_address = 0xF004F;
+ const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc};
+
+ core_link_write_dpcd(
+ link,
+ fixed_vs_address,
+ fixed_vs_data,
+ sizeof(fixed_vs_data));
+ }
+
/* By reading LTTPR capability, RX assumes that we will enable
* LTTPR extended aux timeout if LTTPR is present.
*/
uint64_t time_taken_ms = 0;
enum dc_connection_type type = dc_connection_none;
- status = core_link_read_dpcd(
- link,
- DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
- &dpcd_data,
- sizeof(dpcd_data));
+ determine_lttpr_mode(link);
+
+ /* Issue an AUX read to test DPRX responsiveness. If LTTPR is supported the first read is expected to
+ * be to determine LTTPR capabilities. Otherwise trying to read power state should be an innocuous AUX read.
+ */
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+ status = core_link_read_dpcd(
+ link,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ &dpcd_data,
+ sizeof(dpcd_data));
+ else
+ status = core_link_read_dpcd(
+ link,
+ DP_SET_POWER,
+ &dpcd_data,
+ sizeof(dpcd_data));
if (status != DC_OK) {
DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.",
uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
uint8_t backlight_adj_cap;
+ uint8_t general_edp_cap;
retrieve_link_cap(link);
link->dpcd_caps.edp_supported_link_rates_count = 0;
link->dpcd_caps.dynamic_backlight_capable_edp =
(backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false;
+ core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1,
+ &general_edp_cap, sizeof(general_edp_cap));
+
+ link->dpcd_caps.set_power_state_capable_edp =
+ (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false;
+
dc_link_set_default_brightness_aux(link);
core_link_read_dpcd(link, DP_EDP_DPCD_REV,
if (is_dp_phy_pattern(test_pattern)) {
/* Set DPCD Lane Settings before running test pattern */
if (p_link_settings != NULL) {
- if (link->dc->debug.apply_vendor_specific_lttpr_wa &&
- (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
+ if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) &&
link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
dp_fixed_vs_pe_set_retimer_lane_settings(
link,
if (allocate) {
avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ /// Validation should filter out modes that exceed link BW
+ ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT);
+ if (req_slot_count > MAX_MTP_SLOT_COUNT)
+ return false;
} else {
/// Leave req_slot_count = 0 if allocate is false.
}
+ proposed_table->stream_count = 1; /// Always 1 stream for SST
+ proposed_table->stream_allocations[0].slot_count = req_slot_count;
+ proposed_table->stream_allocations[0].vcp_id = vc_id;
+
+ if (link->aux_access_disabled)
+ return true;
+
/// Write DPCD 2C0 = 1 to start updating
update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1;
core_link_write_dpcd(
&start_time_slot,
1);
- ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); /// Validation should filter out modes that exceed link BW
core_link_write_dpcd(
link,
DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT,
// TODO - DP2.0 Payload: Read and log the payload table from downstream branch
}
- proposed_table->stream_count = 1; /// Always 1 stream for SST
- proposed_table->stream_allocations[0].slot_count = req_slot_count;
- proposed_table->stream_allocations[0].vcp_id = vc_id;
-
return result;
}
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated lane_status_updated;
+ if (link->aux_access_disabled)
+ return true;
for (i = 0; i < act_retries; i++) {
get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated);
unsigned int i;
if (link->connector_signal == SIGNAL_TYPE_EDP) {
- link->dc->hwss.edp_power_control(link, true);
+ if (!link->dc->config.edp_no_power_sequencing)
+ link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
}
goto failed;
}
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
return ret;
}
+
+ void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu)
+ {
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ }
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
+ #define mmMP1_SMN_C2PMSG_66 0x0282
+ #define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+
+ #define mmMP1_SMN_C2PMSG_82 0x0292
+ #define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+
+ #define mmMP1_SMN_C2PMSG_90 0x029a
+ #define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+
#define SMU13_VOLTAGE_SCALE 4
#define LINK_WIDTH_MAX 6
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
+ break;
+ default:
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ break;
+ }
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
+ } else if ((frev == 3) && (crev == 1)) {
+ return 0;
} else if ((frev == 4) && (crev == 0)) {
smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
feature->feature_num < 64)
return -EINVAL;
- bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+ bitmap_to_arr32(feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
{
- if (smu->smu_table.thermal_controller_type)
- return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
-
- return 0;
+ return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
}
int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
if (state == SMU_BACO_STATE_ENTER) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
- 0,
+ smu_baco->maco_support ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO,
NULL);
} else {
ret = smu_cmn_send_smc_msg(smu,
SMU_BACO_STATE_EXIT);
}
+ int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
+ {
+ uint16_t index;
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_EnableGfxImu);
+
+ return smu_cmn_send_msg_without_waiting(smu, index, 0);
+ }
+
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
smu_table->clocks_table, false);
}
+
+ void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
+ {
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ }
+
+ int smu_v13_0_mode1_reset(struct smu_context *smu)
+ {
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ if (!ret)
+ msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+ }
* alloc_ordered_workqueue - allocate an ordered workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
- * @args...: args for @fmt
+ * @args: args for @fmt
*
* Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are
struct delayed_work *dwork, unsigned long delay);
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
-extern void flush_workqueue(struct workqueue_struct *wq);
+extern void __flush_workqueue(struct workqueue_struct *wq);
extern void drain_workqueue(struct workqueue_struct *wq);
extern int schedule_on_each_cpu(work_func_t func);
int execute_in_process_context(work_func_t fn, struct execute_work *);
extern bool flush_work(struct work_struct *work);
+ extern bool cancel_work(struct work_struct *work);
extern bool cancel_work_sync(struct work_struct *work);
extern bool flush_delayed_work(struct delayed_work *dwork);
return queue_work(system_wq, work);
}
+/*
+ * Detect attempt to flush system-wide workqueues at compile time when possible.
+ *
+ * for reasons and steps for converting system-wide workqueues into local workqueues.
+ */
+extern void __warn_flushing_systemwide_wq(void)
+ __compiletime_warning("Please avoid flushing system-wide workqueues.");
+
/**
* flush_scheduled_work - ensure that any scheduled work has run to completion.
*
* Forces execution of the kernel-global workqueue and blocks until its
* completion.
*
- * Think twice before calling this function! It's very easy to get into
- * trouble if you don't take great care. Either of the following situations
- * will lead to deadlock:
+ * It's very easy to get into trouble if you don't take great care.
+ * Either of the following situations will lead to deadlock:
*
* One of the work items currently on the workqueue needs to acquire
* a lock held by your code or its caller.
* need to know that a particular work item isn't queued and isn't running.
* In such cases you should use cancel_delayed_work_sync() or
* cancel_work_sync() instead.
+ *
+ * Please stop calling this function! A conversion to stop flushing system-wide
+ * workqueues is in progress. This function will be removed after all in-tree
+ * users stopped calling this function.
*/
-static inline void flush_scheduled_work(void)
-{
- flush_workqueue(system_wq);
-}
+/*
+ * The background of commit 771c035372a036f8 ("deprecate the
+ * '__deprecated' attribute warnings entirely and for good") is that,
+ * since Linus builds all modules between every single pull he does,
+ * the standard kernel build needs to be _clean_ in order to be able to
+ * notice when new problems happen. Therefore, don't emit warning while
+ * there are in-tree users.
+ */
+#define flush_scheduled_work() \
+({ \
+ if (0) \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(system_wq); \
+})
+
+/*
+ * Although there is no longer in-tree caller, for now just emit warning
+ * in order to give out-of-tree callers time to update.
+ */
+#define flush_workqueue(wq) \
+({ \
+ struct workqueue_struct *_wq = (wq); \
+ \
+ if ((__builtin_constant_p(_wq == system_wq) && \
+ _wq == system_wq) || \
+ (__builtin_constant_p(_wq == system_highpri_wq) && \
+ _wq == system_highpri_wq) || \
+ (__builtin_constant_p(_wq == system_long_wq) && \
+ _wq == system_long_wq) || \
+ (__builtin_constant_p(_wq == system_unbound_wq) && \
+ _wq == system_unbound_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_wq) && \
+ _wq == system_freezable_wq) || \
+ (__builtin_constant_p(_wq == system_power_efficient_wq) && \
+ _wq == system_power_efficient_wq) || \
+ (__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
+ _wq == system_freezable_power_efficient_wq)) \
+ __warn_flushing_systemwide_wq(); \
+ __flush_workqueue(_wq); \
+})
/**
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
*
* The main surface is Y-tiled and is at plane index 0 whereas CCS is linear
* and at index 1. The clear color is stored at index 2, and the pitch should
- * be ignored. The clear color structure is 256 bits. The first 128 bits
+ * be 64 bytes aligned. The clear color structure is 256 bits. The first 128 bits
* represents Raw Clear Color Red, Green, Blue and Alpha color each represented
* by 32 bits. The raw clear color is consumed by the 3d engine and generates
* the converted clear color of size 64 bits. The first 32 bits store the Lower
* outside of the GEM object in a reserved memory area dedicated for the
* storage of the CCS data for all RC/RC_CC/MC compressible GEM objects. The
* main surface pitch is required to be a multiple of four Tile 4 widths. The
- * clear color is stored at plane index 1 and the pitch should be ignored. The
- * format of the 256 bits of clear color data matches the one used for the
- * I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
+ * clear color is stored at plane index 1 and the pitch should be 64 bytes
+ * aligned. The format of the 256 bits of clear color data matches the one used
+ * for the I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC modifier, see its description
* for details.
*/
#define I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC fourcc_mod_code(INTEL, 12)
#define AMD_FMT_MOD_TILE_VER_GFX9 1
#define AMD_FMT_MOD_TILE_VER_GFX10 2
#define AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS 3
+ #define AMD_FMT_MOD_TILE_VER_GFX11 4
/*
* 64K_S is the same for GFX9/GFX10/GFX10_RBPLUS and hence has GFX9 as canonical
#define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25
#define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26
#define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27
+ #define AMD_FMT_MOD_TILE_GFX11_256K_R_X 31
#define AMD_FMT_MOD_DCC_BLOCK_64B 0
#define AMD_FMT_MOD_DCC_BLOCK_128B 1
#define AMD_FMT_MOD_PIPE_MASK 0x7
#define AMD_FMT_MOD_SET(field, value) \
- ((uint64_t)(value) << AMD_FMT_MOD_##field##_SHIFT)
+ ((__u64)(value) << AMD_FMT_MOD_##field##_SHIFT)
#define AMD_FMT_MOD_GET(field, value) \
(((value) >> AMD_FMT_MOD_##field##_SHIFT) & AMD_FMT_MOD_##field##_MASK)
#define AMD_FMT_MOD_CLEAR(field) \
- (~((uint64_t)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
+ (~((__u64)AMD_FMT_MOD_##field##_MASK << AMD_FMT_MOD_##field##_SHIFT))
#if defined(__cplusplus)
}
}
/**
- * flush_workqueue - ensure that any scheduled work has run to completion.
+ * __flush_workqueue - ensure that any scheduled work has run to completion.
* @wq: workqueue to flush
*
* This function sleeps until all work items which were queued on entry
* have finished execution, but it is not livelocked by new incoming ones.
*/
-void flush_workqueue(struct workqueue_struct *wq)
+void __flush_workqueue(struct workqueue_struct *wq)
{
struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list),
out_unlock:
mutex_unlock(&wq->mutex);
}
-EXPORT_SYMBOL(flush_workqueue);
+EXPORT_SYMBOL(__flush_workqueue);
/**
* drain_workqueue - drain a workqueue
wq->flags |= __WQ_DRAINING;
mutex_unlock(&wq->mutex);
reflush:
- flush_workqueue(wq);
+ __flush_workqueue(wq);
mutex_lock(&wq->mutex);
return ret;
}
+ /*
+ * See cancel_delayed_work()
+ */
+ bool cancel_work(struct work_struct *work)
+ {
+ return __cancel_work(work, false);
+ }
+ EXPORT_SYMBOL(cancel_work);
+
/**
* cancel_delayed_work - cancel a delayed work
* @dwork: delayed_work to cancel
for_each_pool_worker(worker, pool) {
kthread_set_per_cpu(worker->task, -1);
- WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
+ WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, wq_unbound_cpumask) < 0);
}
mutex_unlock(&wq_pool_attach_mutex);
wq_online = true;
wq_watchdog_init();
}
+
+/*
+ * Despite the naming, this is a no-op function which is here only for avoiding
+ * link error. Since compile-time warning may fail to catch, we will need to
+ * emit run-time warning from __flush_workqueue().
+ */
+void __warn_flushing_systemwide_wq(void) { }
+EXPORT_SYMBOL(__warn_flushing_systemwide_wq);