2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <linux/string.h>
27 #include <linux/acpi.h>
28 #include <linux/version.h>
29 #include <linux/i2c.h>
31 #include <drm/drm_probe_helper.h>
32 #include <drm/amdgpu_drm.h>
33 #include <drm/drm_edid.h>
35 #include "dm_services.h"
38 #include "amdgpu_dm.h"
39 #include "amdgpu_dm_irq.h"
40 #include "amdgpu_dm_mst_types.h"
42 #include "dm_helpers.h"
44 /* dm_helpers_parse_edid_caps
48 * @edid: [in] pointer to edid
49 * edid_caps: [in] pointer to edid caps
53 enum dc_edid_status dm_helpers_parse_edid_caps(
54 struct dc_context *ctx,
55 const struct dc_edid *edid,
56 struct dc_edid_caps *edid_caps)
58 struct edid *edid_buf = (struct edid *) edid->raw_edid;
66 enum dc_edid_status result = EDID_OK;
68 if (!edid_caps || !edid)
69 return EDID_BAD_INPUT;
71 if (!drm_edid_is_valid(edid_buf))
72 result = EDID_BAD_CHECKSUM;
74 edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
75 ((uint16_t) edid_buf->mfg_id[1])<<8;
76 edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
77 ((uint16_t) edid_buf->prod_code[1])<<8;
78 edid_caps->serial_number = edid_buf->serial;
79 edid_caps->manufacture_week = edid_buf->mfg_week;
80 edid_caps->manufacture_year = edid_buf->mfg_year;
82 /* One of the four detailed_timings stores the monitor name. It's
83 * stored in an array of length 13. */
84 for (i = 0; i < 4; i++) {
85 if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
86 while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
87 if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
90 edid_caps->display_name[j] =
91 edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
97 edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
98 (struct edid *) edid->raw_edid);
100 sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads);
104 edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
105 for (i = 0; i < edid_caps->audio_mode_count; ++i) {
106 struct cea_sad *sad = &sads[i];
108 edid_caps->audio_modes[i].format_code = sad->format;
109 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
110 edid_caps->audio_modes[i].sample_rate = sad->freq;
111 edid_caps->audio_modes[i].sample_size = sad->byte2;
114 sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb);
116 if (sadb_count < 0) {
117 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
122 edid_caps->speaker_flags = sadb[0];
124 edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
132 static void get_payload_table(
133 struct amdgpu_dm_connector *aconnector,
134 struct dp_mst_stream_allocation_table *proposed_table)
137 struct drm_dp_mst_topology_mgr *mst_mgr =
138 &aconnector->mst_port->mst_mgr;
140 mutex_lock(&mst_mgr->payload_lock);
142 proposed_table->stream_count = 0;
144 /* number of active streams */
145 for (i = 0; i < mst_mgr->max_payloads; i++) {
146 if (mst_mgr->payloads[i].num_slots == 0)
147 break; /* end of vcp_id table */
149 ASSERT(mst_mgr->payloads[i].payload_state !=
150 DP_PAYLOAD_DELETE_LOCAL);
152 if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
153 mst_mgr->payloads[i].payload_state ==
156 struct dp_mst_stream_allocation *sa =
157 &proposed_table->stream_allocations[
158 proposed_table->stream_count];
160 sa->slot_count = mst_mgr->payloads[i].num_slots;
161 sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
162 proposed_table->stream_count++;
166 mutex_unlock(&mst_mgr->payload_lock);
169 void dm_helpers_dp_update_branch_info(
170 struct dc_context *ctx,
171 const struct dc_link *link)
175 * Writes payload allocation table in immediate downstream device.
177 bool dm_helpers_dp_mst_write_payload_allocation_table(
178 struct dc_context *ctx,
179 const struct dc_stream_state *stream,
180 struct dp_mst_stream_allocation_table *proposed_table,
183 struct amdgpu_dm_connector *aconnector;
184 struct dm_connector_state *dm_conn_state;
185 struct drm_dp_mst_topology_mgr *mst_mgr;
186 struct drm_dp_mst_port *mst_port;
189 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
190 /* Accessing the connector state is required for vcpi_slots allocation
191 * and directly relies on behaviour in commit check
192 * that blocks before commit guaranteeing that the state
193 * is not gonna be swapped while still in use in commit tail */
195 if (!aconnector || !aconnector->mst_port)
198 dm_conn_state = to_dm_connector_state(aconnector->base.state);
200 mst_mgr = &aconnector->mst_port->mst_mgr;
202 if (!mst_mgr->mst_state)
205 mst_port = aconnector->port;
209 ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
211 dm_conn_state->vcpi_slots);
216 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
219 /* It's OK for this to fail */
220 drm_dp_update_payload_part1(mst_mgr);
222 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
223 * AUX message. The sequence is slot 1-63 allocated sequence for each
224 * stream. AMD ASIC stream slot allocation should follow the same
225 * sequence. copy DRM MST allocation to dc */
227 get_payload_table(aconnector, proposed_table);
233 * poll pending down reply
235 void dm_helpers_dp_mst_poll_pending_down_reply(
236 struct dc_context *ctx,
237 const struct dc_link *link)
241 * Clear payload allocation table before enable MST DP link.
243 void dm_helpers_dp_mst_clear_payload_allocation_table(
244 struct dc_context *ctx,
245 const struct dc_link *link)
249 * Polls for ACT (allocation change trigger) handled and sends
250 * ALLOCATE_PAYLOAD message.
252 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
253 struct dc_context *ctx,
254 const struct dc_stream_state *stream)
256 struct amdgpu_dm_connector *aconnector;
257 struct drm_dp_mst_topology_mgr *mst_mgr;
260 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
262 if (!aconnector || !aconnector->mst_port)
265 mst_mgr = &aconnector->mst_port->mst_mgr;
267 if (!mst_mgr->mst_state)
270 ret = drm_dp_check_act_status(mst_mgr);
278 bool dm_helpers_dp_mst_send_payload_allocation(
279 struct dc_context *ctx,
280 const struct dc_stream_state *stream,
283 struct amdgpu_dm_connector *aconnector;
284 struct drm_dp_mst_topology_mgr *mst_mgr;
285 struct drm_dp_mst_port *mst_port;
287 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
289 if (!aconnector || !aconnector->mst_port)
292 mst_port = aconnector->port;
294 mst_mgr = &aconnector->mst_port->mst_mgr;
296 if (!mst_mgr->mst_state)
299 /* It's OK for this to fail */
300 drm_dp_update_payload_part2(mst_mgr);
303 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
308 void dm_dtn_log_begin(struct dc_context *ctx,
309 struct dc_log_buffer_ctx *log_ctx)
311 static const char msg[] = "[dtn begin]\n";
318 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
321 void dm_dtn_log_append_v(struct dc_context *ctx,
322 struct dc_log_buffer_ctx *log_ctx,
323 const char *msg, ...)
330 /* No context, redirect to dmesg. */
331 struct va_format vaf;
337 pr_info("%pV", &vaf);
343 /* Measure the output. */
345 n = vsnprintf(NULL, 0, msg, args);
351 /* Reallocate the string buffer as needed. */
352 total = log_ctx->pos + n + 1;
354 if (total > log_ctx->size) {
355 char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
358 memcpy(buf, log_ctx->buf, log_ctx->pos);
362 log_ctx->size = total;
369 /* Write the formatted string to the log buffer. */
372 log_ctx->buf + log_ctx->pos,
373 log_ctx->size - log_ctx->pos,
382 void dm_dtn_log_end(struct dc_context *ctx,
383 struct dc_log_buffer_ctx *log_ctx)
385 static const char msg[] = "[dtn end]\n";
392 dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
395 bool dm_helpers_dp_mst_start_top_mgr(
396 struct dc_context *ctx,
397 const struct dc_link *link,
400 struct amdgpu_dm_connector *aconnector = link->priv;
403 DRM_ERROR("Failed to find connector for link!");
408 DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
409 aconnector, aconnector->base.base.id);
413 DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
414 aconnector, aconnector->base.base.id);
416 return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
419 void dm_helpers_dp_mst_stop_top_mgr(
420 struct dc_context *ctx,
421 const struct dc_link *link)
423 struct amdgpu_dm_connector *aconnector = link->priv;
426 DRM_ERROR("Failed to find connector for link!");
430 DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
431 aconnector, aconnector->base.base.id);
433 if (aconnector->mst_mgr.mst_state == true)
434 drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
437 bool dm_helpers_dp_read_dpcd(
438 struct dc_context *ctx,
439 const struct dc_link *link,
445 struct amdgpu_dm_connector *aconnector = link->priv;
448 DC_LOG_DC("Failed to find connector for link!\n");
452 return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
456 bool dm_helpers_dp_write_dpcd(
457 struct dc_context *ctx,
458 const struct dc_link *link,
463 struct amdgpu_dm_connector *aconnector = link->priv;
466 DRM_ERROR("Failed to find connector for link!");
470 return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
471 address, (uint8_t *)data, size) > 0;
474 bool dm_helpers_submit_i2c(
475 struct dc_context *ctx,
476 const struct dc_link *link,
477 struct i2c_command *cmd)
479 struct amdgpu_dm_connector *aconnector = link->priv;
480 struct i2c_msg *msgs;
482 int num = cmd->number_of_payloads;
486 DRM_ERROR("Failed to find connector for link!");
490 msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
495 for (i = 0; i < num; i++) {
496 msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
497 msgs[i].addr = cmd->payloads[i].address;
498 msgs[i].len = cmd->payloads[i].length;
499 msgs[i].buf = cmd->payloads[i].data;
502 result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
508 bool dm_helpers_dp_write_dsc_enable(
509 struct dc_context *ctx,
510 const struct dc_stream_state *stream,
514 uint8_t enable_dsc = enable ? 1 : 0;
515 struct amdgpu_dm_connector *aconnector;
520 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
521 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
523 if (!aconnector->dsc_aux)
526 return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
529 if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
530 return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
535 bool dm_helpers_is_dp_sink_present(struct dc_link *link)
537 bool dp_sink_present;
538 struct amdgpu_dm_connector *aconnector = link->priv;
541 BUG_ON("Failed to find connector for link!");
545 mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
546 dp_sink_present = dc_link_is_dp_sink_present(link);
547 mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
548 return dp_sink_present;
551 enum dc_edid_status dm_helpers_read_local_edid(
552 struct dc_context *ctx,
553 struct dc_link *link,
554 struct dc_sink *sink)
556 struct amdgpu_dm_connector *aconnector = link->priv;
557 struct drm_connector *connector = &aconnector->base;
558 struct i2c_adapter *ddc;
560 enum dc_edid_status edid_status;
564 ddc = &aconnector->dm_dp_aux.aux.ddc;
566 ddc = &aconnector->i2c->base;
568 /* some dongles read edid incorrectly the first time,
569 * do check sum and retry to make sure read correct edid.
573 edid = drm_get_edid(&aconnector->base, ddc);
575 /* DP Compliance Test 4.2.2.6 */
576 if (link->aux_mode && connector->edid_corrupt)
577 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
579 if (!edid && connector->edid_corrupt) {
580 connector->edid_corrupt = false;
581 return EDID_BAD_CHECKSUM;
585 return EDID_NO_RESPONSE;
587 sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
588 memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
590 /* We don't need the original edid anymore */
593 /* connector->display_info will be parsed from EDID and saved
594 * into drm_connector->display_info from edid by call stack
596 * drm_parse_ycbcr420_deep_color_info
597 * drm_parse_hdmi_forum_vsdb
599 * drm_add_display_info
600 * drm_connector_update_edid_property
602 * drm_connector->display_info will be used by amdgpu_dm funcs,
603 * like fill_stream_properties_from_drm_display_mode
605 amdgpu_dm_update_connector_after_detect(aconnector);
607 edid_status = dm_helpers_parse_edid_caps(
612 } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
614 if (edid_status != EDID_OK)
615 DRM_ERROR("EDID err: %d, on connector: %s",
617 aconnector->base.name);
619 /* DP Compliance Test 4.2.2.3 */
621 drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
626 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
628 /* TODO: something */
630 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
632 void *dm_helpers_allocate_gpu_mem(
633 struct dc_context *ctx,
634 enum dc_gpu_mem_alloc_type type,
642 void dm_helpers_free_gpu_mem(
643 struct dc_context *ctx,
644 enum dc_gpu_mem_alloc_type type,