Most drivers can use drm_fbdev_generic_setup(). Driver have to implement
atomic modesetting and GEM vmap support. Historically, generic fbdev emulation
expected the framebuffer in system memory or system-like memory. By employing
- struct dma_buf_map, drivers with frambuffers in I/O memory can be supported
+ struct iosys_map, drivers with frambuffers in I/O memory can be supported
as well.
Contact: Maintainer of the driver you plan to convert
A number of callback functions in drm_fbdev_fb_ops could benefit from
being rewritten without dependencies on the fbdev module. Some of the
- helpers could further benefit from using struct dma_buf_map instead of
+ helpers could further benefit from using struct iosys_map instead of
raw pointers.
Level: Intermediate
- Use struct dma_buf_map throughout codebase
- ------------------------------------------
+ Use struct iosys_map throughout codebase
+ ----------------------------------------
- Pointers to shared device memory are stored in struct dma_buf_map. Each
+ Pointers to shared device memory are stored in struct iosys_map. Each
instance knows whether it refers to system or I/O memory. Most of the DRM-wide
- interface have been converted to use struct dma_buf_map, but implementations
+ interface have been converted to use struct iosys_map, but implementations
often still use raw pointers.
- The task is to use struct dma_buf_map where it makes sense.
+ The task is to use struct iosys_map where it makes sense.
- * Memory managers should use struct dma_buf_map for dma-buf-imported buffers.
- * TTM might benefit from using struct dma_buf_map internally.
- * Framebuffer copying and blitting helpers should operate on struct dma_buf_map.
+ * Memory managers should use struct iosys_map for dma-buf-imported buffers.
+ * TTM might benefit from using struct iosys_map internally.
+ * Framebuffer copying and blitting helpers should operate on struct iosys_map.
Level: Intermediate
+Request memory regions in all drivers
+-------------------------------------
+
+Go through all drivers and add code to request the memory regions that the
+driver uses. This requires adding calls to request_mem_region(),
+pci_request_region() or similar functions. Use helpers for managed cleanup
+where possible.
+
+Drivers are pretty bad at doing this and there used to be conflicts among
+DRM and fbdev drivers. Still, it's the correct thing to do.
+
+
+Level: Starter
+
Core refactorings
=================
static void ast_set_crtthd_reg(struct ast_private *ast)
{
/* Set Threshold */
- if (ast->chip == AST2300 || ast->chip == AST2400 ||
+ if (ast->chip == AST2600) {
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0xe0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0xa0);
+ } else if (ast->chip == AST2300 || ast->chip == AST2400 ||
ast->chip == AST2500) {
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa7, 0x78);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa6, 0x60);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
struct drm_framebuffer *fb = new_state->fb;
struct ast_private *ast = to_ast_private(plane->dev);
- struct dma_buf_map dst_map =
+ struct iosys_map dst_map =
ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].map;
u64 dst_off =
ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].off;
- struct dma_buf_map src_map = shadow_plane_state->data[0];
+ struct iosys_map src_map = shadow_plane_state->data[0];
unsigned int offset_x, offset_y;
u16 x, y;
u8 x_offset, y_offset;
struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
size_t i;
struct drm_gem_vram_object *gbo;
- struct dma_buf_map map;
+ struct iosys_map map;
for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) {
gbo = ast_cursor_plane->hwc[i].gbo;
struct drm_plane *cursor_plane = &ast_cursor_plane->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
- struct dma_buf_map map;
+ struct iosys_map map;
int ret;
s64 off;
# core driver code
i915-y += i915_driver.o \
i915_config.o \
- i915_irq.o \
i915_getparam.o \
+ i915_ioctl.o \
+ i915_irq.o \
i915_mitigations.o \
i915_module.o \
i915_params.o \
i915-y += \
$(gem-y) \
i915_active.o \
- i915_buddy.o \
i915_cmd_parser.o \
i915_deps.o \
i915_gem_evict.o \
*
*/
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include "display/intel_display.h"
#include "display/intel_display_types.h"
#include "display/intel_gmbus.h"
#include "i915_drv.h"
+ #include "i915_reg.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 :
psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames;
- switch (psr_table->lines_to_wait) {
- case 0:
- i915->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT;
- break;
- case 1:
- i915->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT;
- break;
- case 2:
- i915->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT;
- break;
- case 3:
- i915->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
- break;
- default:
- drm_dbg_kms(&i915->drm,
- "VBT has unknown PSR lines to wait %u\n",
- psr_table->lines_to_wait);
- break;
- }
-
/*
* New psr options 0=500us, 1=100us, 2=2500us, 3=0us
* Old decimal value is wake up time in multiples of 100 us.
i915->vbt.ports[port] = devdata;
}
+ static bool has_ddi_port_info(struct drm_i915_private *i915)
+ {
+ return DISPLAY_VER(i915) >= 5 || IS_G4X(i915);
+ }
+
static void parse_ddi_ports(struct drm_i915_private *i915)
{
struct intel_bios_encoder_data *devdata;
- if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
- return;
-
- if (i915->vbt.version < 155)
+ if (!has_ddi_port_info(i915))
return;
list_for_each_entry(devdata, &i915->vbt.display_devices, node)
return vbt;
}
+ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
+ {
+ u32 count, data, found, store = 0;
+ u32 static_region, oprom_offset;
+ u32 oprom_size = 0x200000;
+ u16 vbt_size;
+ u32 *vbt;
+
+ static_region = intel_uncore_read(&i915->uncore, SPI_STATIC_REGIONS);
+ static_region &= OPTIONROM_SPI_REGIONID_MASK;
+ intel_uncore_write(&i915->uncore, PRIMARY_SPI_REGIONID, static_region);
+
+ oprom_offset = intel_uncore_read(&i915->uncore, OROM_OFFSET);
+ oprom_offset &= OROM_OFFSET_MASK;
+
+ for (count = 0; count < oprom_size; count += 4) {
+ intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, oprom_offset + count);
+ data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER);
+
+ if (data == *((const u32 *)"$VBT")) {
+ found = oprom_offset + count;
+ break;
+ }
+ }
+
+ if (count >= oprom_size)
+ goto err_not_found;
+
+ /* Get VBT size and allocate space for the VBT */
+ intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found +
+ offsetof(struct vbt_header, vbt_size));
+ vbt_size = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER);
+ vbt_size &= 0xffff;
+
+ vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL);
+ if (!vbt)
+ goto err_not_found;
+
+ for (count = 0; count < vbt_size; count += 4) {
+ intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found + count);
+ data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER);
+ *(vbt + store++) = data;
+ }
+
+ if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+ goto err_free_vbt;
+
+ drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
+
+ return (struct vbt_header *)vbt;
+
+ err_free_vbt:
+ kfree(vbt);
+ err_not_found:
+ return NULL;
+ }
+
static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
pci_unmap_rom(pdev, oprom);
+ drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
+
return vbt;
err_free_vbt:
init_vbt_defaults(i915);
- /* If the OpRegion does not have VBT, look in PCI ROM. */
+ /*
+ * If the OpRegion does not have VBT, look in SPI flash through MMIO or
+ * PCI mapping
+ */
+ if (!vbt && IS_DGFX(i915)) {
+ oprom_vbt = spi_oprom_get_vbt(i915);
+ vbt = oprom_vbt;
+ }
+
if (!vbt) {
oprom_vbt = oprom_get_vbt(i915);
- if (!oprom_vbt)
- goto out;
-
vbt = oprom_vbt;
-
- drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n");
}
+ if (!vbt)
+ goto out;
+
bdb = get_bdb_header(vbt);
i915->vbt.version = bdb->version;
*/
bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
{
- const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
- static const struct {
- u16 dp, hdmi;
- } port_mapping[] = {
- [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
- [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
- [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
- [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
- [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
- };
-
- if (HAS_DDI(i915))
- return i915->vbt.ports[port];
-
- /* FIXME maybe deal with port A as well? */
- if (drm_WARN_ON(&i915->drm,
- port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
- return false;
-
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
- child = &devdata->child;
-
- if ((child->dvo_port == port_mapping[port].dp ||
- child->dvo_port == port_mapping[port].hdmi) &&
- (child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
- DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
- return true;
- }
+ if (WARN_ON(!has_ddi_port_info(i915)))
+ return true;
- return false;
+ return i915->vbt.ports[port];
}
/**
*/
bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port)
{
- const struct intel_bios_encoder_data *devdata;
- const struct child_device_config *child;
- static const short port_mapping[] = {
- [PORT_B] = DVO_PORT_DPB,
- [PORT_C] = DVO_PORT_DPC,
- [PORT_D] = DVO_PORT_DPD,
- [PORT_E] = DVO_PORT_DPE,
- [PORT_F] = DVO_PORT_DPF,
- };
-
- if (HAS_DDI(i915)) {
- const struct intel_bios_encoder_data *devdata;
-
- devdata = intel_bios_encoder_data_lookup(i915, port);
-
- return devdata && intel_bios_encoder_supports_edp(devdata);
- }
-
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
- child = &devdata->child;
-
- if (child->dvo_port == port_mapping[port] &&
- (child->device_type & DEVICE_TYPE_eDP_BITS) ==
- (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
- return true;
- }
+ const struct intel_bios_encoder_data *devdata =
+ intel_bios_encoder_data_lookup(i915, port);
- return false;
+ return devdata && intel_bios_encoder_supports_edp(devdata);
}
- static bool child_dev_is_dp_dual_mode(const struct child_device_config *child)
+ static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata)
{
- if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) !=
- (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ const struct child_device_config *child = &devdata->child;
+
+ if (!intel_bios_encoder_supports_dp(devdata) ||
+ !intel_bios_encoder_supports_hdmi(devdata))
return false;
if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA)
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915,
enum port port)
{
- static const struct {
- u16 dp, hdmi;
- } port_mapping[] = {
- /*
- * Buggy VBTs may declare DP ports as having
- * HDMI type dvo_port :( So let's check both.
- */
- [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
- [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
- [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
- [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
- [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
- };
- const struct intel_bios_encoder_data *devdata;
-
- if (HAS_DDI(i915)) {
- const struct intel_bios_encoder_data *devdata;
-
- devdata = intel_bios_encoder_data_lookup(i915, port);
-
- return devdata && child_dev_is_dp_dual_mode(&devdata->child);
- }
-
- if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
- return false;
-
- list_for_each_entry(devdata, &i915->vbt.display_devices, node) {
- if ((devdata->child.dvo_port == port_mapping[port].dp ||
- devdata->child.dvo_port == port_mapping[port].hdmi) &&
- child_dev_is_dp_dual_mode(&devdata->child))
- return true;
- }
+ const struct intel_bios_encoder_data *devdata =
+ intel_bios_encoder_data_lookup(i915, port);
- return false;
+ return devdata && intel_bios_encoder_supports_dp_dual_mode(devdata);
}
/**
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
- static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
- const struct intel_link_m_n *m_n,
- const struct intel_link_m_n *m2_n2);
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
{
i915_reg_t reg = PIPEDSL(pipe);
u32 line1, line2;
- u32 line_mask;
- if (DISPLAY_VER(dev_priv) == 2)
- line_mask = DSL_LINEMASK_GEN2;
- else
- line_mask = DSL_LINEMASK_GEN3;
-
- line1 = intel_de_read(dev_priv, reg) & line_mask;
+ line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
msleep(5);
- line2 = intel_de_read(dev_priv, reg) & line_mask;
+ line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK;
return line1 != line2;
}
if (DISPLAY_VER(dev_priv) >= 4) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
- i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (intel_de_wait_for_clear(dev_priv, reg,
- I965_PIPECONF_ACTIVE, 100))
- drm_WARN(&dev_priv->drm, 1,
- "pipe_off wait timed out\n");
+ if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder),
+ PIPECONF_STATE_ENABLE, 100))
+ drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
- drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
- IPS_ENABLE | IPS_PCODE_CONTROL));
+ drm_WARN_ON(dev, snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
if (IS_BROADWELL(dev_priv)) {
drm_WARN_ON(dev,
- sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+ snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
if (plane->enable_flip_done &&
plane->pipe == crtc->pipe &&
- update_planes & BIT(plane->id))
+ update_planes & BIT(plane->id) &&
+ plane_state->do_async_flip)
plane->enable_flip_done(plane);
}
}
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
if (plane->disable_flip_done &&
plane->pipe == crtc->pipe &&
- update_planes & BIT(plane->id))
+ update_planes & BIT(plane->id) &&
+ plane_state->do_async_flip)
plane->disable_flip_done(plane);
}
}
plane->disable_arm(plane, crtc_state);
}
+ static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
+ {
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (crtc_state->has_pch_encoder) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->fdi_m_n);
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->dp_m_n);
+ intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
+ &crtc_state->dp_m2_n2);
+ }
+
+ intel_set_transcoder_timings(crtc_state);
+
+ ilk_set_pipeconf(crtc_state);
+ }
+
static void ilk_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (intel_crtc_has_dp_encoder(new_crtc_state))
- intel_dp_set_m_n(new_crtc_state, M1_N1);
+ ilk_configure_cpu_transcoder(new_crtc_state);
- intel_set_transcoder_timings(new_crtc_state);
intel_set_pipe_src_size(new_crtc_state);
- if (new_crtc_state->has_pch_encoder)
- intel_cpu_transcoder_set_m_n(new_crtc_state,
- &new_crtc_state->fdi_m_n, NULL);
-
- ilk_set_pipeconf(new_crtc_state);
-
crtc->active = true;
intel_encoders_pre_enable(state, crtc);
if (new_crtc_state->has_pch_encoder) {
- /* Note: FDI PLL enabling _must_ be done before we enable the
- * cpu pipes, hence this is separate from all the other fdi/pch
- * enabling. */
- ilk_fdi_pll_enable(new_crtc_state);
+ ilk_pch_pre_enable(state, crtc);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
assert_fdi_rx_disabled(dev_priv, pipe);
static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *master_crtc_state;
struct intel_crtc *master_crtc;
struct drm_connector_state *conn_state;
if (crtc_state->bigjoiner_slave)
intel_encoders_pre_enable(state, master_crtc);
+ }
- /* need to enable VDSC, which we skipped in pre-enable */
- intel_dsc_enable(crtc_state);
+ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
+ {
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) >= 13)
- intel_uncompressed_joiner_enable(crtc_state);
+ if (crtc_state->has_pch_encoder) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->fdi_m_n);
+ } else if (intel_crtc_has_dp_encoder(crtc_state)) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->dp_m_n);
+ intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
+ &crtc_state->dp_m2_n2);
+ }
+
+ intel_set_transcoder_timings(crtc_state);
+
+ if (cpu_transcoder != TRANSCODER_EDP)
+ intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+ crtc_state->pixel_multiplier - 1);
+
+ hsw_set_frame_start_delay(crtc_state);
+
+ hsw_set_transconf(crtc_state);
}
static void hsw_crtc_enable(struct intel_atomic_state *state,
icl_ddi_bigjoiner_pre_enable(state, new_crtc_state);
}
+ intel_dsc_enable(new_crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 13)
+ intel_uncompressed_joiner_enable(new_crtc_state);
+
intel_set_pipe_src_size(new_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
bdw_set_pipemisc(new_crtc_state);
- if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) {
- intel_set_transcoder_timings(new_crtc_state);
-
- if (cpu_transcoder != TRANSCODER_EDP)
- intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
- new_crtc_state->pixel_multiplier - 1);
-
- if (new_crtc_state->has_pch_encoder)
- intel_cpu_transcoder_set_m_n(new_crtc_state,
- &new_crtc_state->fdi_m_n, NULL);
-
- hsw_set_frame_start_delay(new_crtc_state);
-
- hsw_set_transconf(new_crtc_state);
- }
+ if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder))
+ hsw_configure_cpu_transcoder(new_crtc_state);
crtc->active = true;
domains);
}
+ static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state)
+ {
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
+ intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder,
+ &crtc_state->dp_m_n);
+ intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder,
+ &crtc_state->dp_m2_n2);
+ }
+
+ intel_set_transcoder_timings(crtc_state);
+
+ i9xx_set_pipeconf(crtc_state);
+ }
+
static void valleyview_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
- if (intel_crtc_has_dp_encoder(new_crtc_state))
- intel_dp_set_m_n(new_crtc_state, M1_N1);
+ i9xx_configure_cpu_transcoder(new_crtc_state);
- intel_set_transcoder_timings(new_crtc_state);
intel_set_pipe_src_size(new_crtc_state);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
}
- i9xx_set_pipeconf(new_crtc_state);
-
crtc->active = true;
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
- if (intel_crtc_has_dp_encoder(new_crtc_state))
- intel_dp_set_m_n(new_crtc_state, M1_N1);
+ i9xx_configure_cpu_transcoder(new_crtc_state);
- intel_set_transcoder_timings(new_crtc_state);
intel_set_pipe_src_size(new_crtc_state);
- i9xx_set_pipeconf(new_crtc_state);
-
crtc->active = true;
if (DISPLAY_VER(dev_priv) != 2)
m_n->tu = 64;
compute_m_n(data_clock,
link_clock * nlanes * 8,
- &m_n->gmch_m, &m_n->gmch_n,
+ &m_n->data_m, &m_n->data_n,
constant_n);
compute_m_n(pixel_clock, link_clock,
}
}
- static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
- const struct intel_link_m_n *m_n)
+ void intel_zero_m_n(struct intel_link_m_n *m_n)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
+ /* corresponds to 0 register value */
+ memset(m_n, 0, sizeof(*m_n));
+ m_n->tu = 1;
+ }
- intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
- TU_SIZE(m_n->tu) | m_n->gmch_m);
- intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
- intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
- intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+ void intel_set_m_n(struct drm_i915_private *i915,
+ const struct intel_link_m_n *m_n,
+ i915_reg_t data_m_reg, i915_reg_t data_n_reg,
+ i915_reg_t link_m_reg, i915_reg_t link_n_reg)
+ {
+ intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m);
+ intel_de_write(i915, data_n_reg, m_n->data_n);
+ intel_de_write(i915, link_m_reg, m_n->link_m);
+ /*
+ * On BDW+ writing LINK_N arms the double buffered update
+ * of all the M/N registers, so it must be written last.
+ */
+ intel_de_write(i915, link_n_reg, m_n->link_n);
}
- static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
- enum transcoder transcoder)
+ bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
+ enum transcoder transcoder)
{
if (IS_HASWELL(dev_priv))
return transcoder == TRANSCODER_EDP;
- /*
- * Strictly speaking some registers are available before
- * gen7, but we only support DRRS on gen7+
- */
- return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv);
+ return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv);
}
- static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
- const struct intel_link_m_n *m_n,
- const struct intel_link_m_n *m2_n2)
+ void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ const struct intel_link_m_n *m_n)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- enum transcoder transcoder = crtc_state->cpu_transcoder;
- if (DISPLAY_VER(dev_priv) >= 5) {
- intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
- TU_SIZE(m_n->tu) | m_n->gmch_m);
- intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
- m_n->gmch_n);
- intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
- m_n->link_m);
- intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
- m_n->link_n);
- /*
- * M2_N2 registers are set only if DRRS is supported
- * (to make sure the registers are not unnecessarily accessed).
- */
- if (m2_n2 && crtc_state->has_drrs &&
- transcoder_has_m2_n2(dev_priv, transcoder)) {
- intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
- TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
- intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
- m2_n2->gmch_n);
- intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
- m2_n2->link_m);
- intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
- m2_n2->link_n);
- }
- } else {
- intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
- TU_SIZE(m_n->tu) | m_n->gmch_m);
- intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
- intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
- intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
- }
+ if (DISPLAY_VER(dev_priv) >= 5)
+ intel_set_m_n(dev_priv, m_n,
+ PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
+ PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
+ else
+ intel_set_m_n(dev_priv, m_n,
+ PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
+ PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
}
- void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
+ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ const struct intel_link_m_n *m_n)
{
- const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
- if (m_n == M1_N1) {
- dp_m_n = &crtc_state->dp_m_n;
- dp_m2_n2 = &crtc_state->dp_m2_n2;
- } else if (m_n == M2_N2) {
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- /*
- * M2_N2 registers are not supported. Hence m2_n2 divider value
- * needs to be programmed into M1_N1.
- */
- dp_m_n = &crtc_state->dp_m2_n2;
- } else {
- drm_err(&i915->drm, "Unsupported divider value\n");
+ if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
return;
- }
- if (crtc_state->has_pch_encoder)
- intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
- else
- intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
+ intel_set_m_n(dev_priv, m_n,
+ PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
+ PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
}
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
* always be the user's requested size.
*/
intel_de_write(dev_priv, PIPESRC(pipe),
- ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
+ PIPESRC_WIDTH(crtc_state->pipe_src_w - 1) |
+ PIPESRC_HEIGHT(crtc_state->pipe_src_h - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
u32 tmp;
tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
- pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
- pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
+ pipe_config->pipe_src_w = REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1;
+ pipe_config->pipe_src_h = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1;
}
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
switch (crtc_state->pipe_bpp) {
case 18:
- pipeconf |= PIPECONF_6BPC;
+ pipeconf |= PIPECONF_BPC_6;
break;
case 24:
- pipeconf |= PIPECONF_8BPC;
+ pipeconf |= PIPECONF_BPC_8;
break;
case 30:
- pipeconf |= PIPECONF_10BPC;
+ pipeconf |= PIPECONF_BPC_10;
break;
default:
/* Case prevented by intel_choose_pipe_bpp_dither. */
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
} else {
- pipeconf |= PIPECONF_PROGRESSIVE;
+ pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE;
}
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
- if (tmp & DISPPLANE_GAMMA_ENABLE)
+ if (tmp & DISP_PIPE_GAMMA_ENABLE)
crtc_state->gamma_enable = true;
if (!HAS_GMCH(dev_priv) &&
- tmp & DISPPLANE_PIPE_CSC_ENABLE)
+ tmp & DISP_PIPE_CSC_ENABLE)
crtc_state->csc_enable = true;
}
if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
IS_CHERRYVIEW(dev_priv)) {
switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_6BPC:
+ case PIPECONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_8BPC:
+ case PIPECONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_10BPC:
+ case PIPECONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
default:
+ MISSING_CASE(tmp);
break;
}
}
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
- pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
- PIPECONF_GAMMA_MODE_SHIFT;
+ pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp);
if (IS_CHERRYVIEW(dev_priv))
pipe_config->cgm_mode = intel_de_read(dev_priv,
switch (crtc_state->pipe_bpp) {
case 18:
- val |= PIPECONF_6BPC;
+ val |= PIPECONF_BPC_6;
break;
case 24:
- val |= PIPECONF_8BPC;
+ val |= PIPECONF_BPC_8;
break;
case 30:
- val |= PIPECONF_10BPC;
+ val |= PIPECONF_BPC_10;
break;
case 36:
- val |= PIPECONF_12BPC;
+ val |= PIPECONF_BPC_12;
break;
default:
/* Case prevented by intel_choose_pipe_bpp_dither. */
}
if (crtc_state->dither)
- val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+ val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACED_ILK;
+ val |= PIPECONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_PROGRESSIVE;
+ val |= PIPECONF_INTERLACE_PF_PD_ILK;
/*
* This would end up with an odd purple hue over
u32 val = 0;
if (IS_HASWELL(dev_priv) && crtc_state->dither)
- val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
+ val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP;
if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
- val |= PIPECONF_INTERLACED_ILK;
+ val |= PIPECONF_INTERLACE_IF_ID_ILK;
else
- val |= PIPECONF_PROGRESSIVE;
+ val |= PIPECONF_INTERLACE_PF_PD_ILK;
if (IS_HASWELL(dev_priv) &&
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
switch (crtc_state->pipe_bpp) {
case 18:
- val |= PIPEMISC_6_BPC;
+ val |= PIPEMISC_BPC_6;
break;
case 24:
- val |= PIPEMISC_8_BPC;
+ val |= PIPEMISC_BPC_8;
break;
case 30:
- val |= PIPEMISC_10_BPC;
+ val |= PIPEMISC_BPC_10;
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
if (DISPLAY_VER(dev_priv) > 12)
- val |= PIPEMISC_12_BPC_ADLP;
+ val |= PIPEMISC_BPC_12_ADLP;
break;
default:
MISSING_CASE(crtc_state->pipe_bpp);
}
intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe),
- PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK,
+ PIPE_MISC2_BUBBLE_COUNTER_MASK,
scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN :
PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS);
}
tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
switch (tmp & PIPEMISC_BPC_MASK) {
- case PIPEMISC_6_BPC:
+ case PIPEMISC_BPC_6:
return 18;
- case PIPEMISC_8_BPC:
+ case PIPEMISC_BPC_8:
return 24;
- case PIPEMISC_10_BPC:
+ case PIPEMISC_BPC_10:
return 30;
/*
* PORT OUTPUT 12 BPC defined for ADLP+.
* on older platforms, need to find a workaround for 12 BPC
* MIPI DSI HW readout.
*/
- case PIPEMISC_12_BPC_ADLP:
+ case PIPEMISC_BPC_12_ADLP:
if (DISPLAY_VER(dev_priv) > 12)
return 36;
fallthrough;
return DIV_ROUND_UP(bps, link_bw * 8);
}
- static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
- struct intel_link_m_n *m_n)
+ void intel_get_m_n(struct drm_i915_private *i915,
+ struct intel_link_m_n *m_n,
+ i915_reg_t data_m_reg, i915_reg_t data_n_reg,
+ i915_reg_t link_m_reg, i915_reg_t link_n_reg)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
-
- m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
- m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
- m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
- & ~TU_SIZE_MASK;
- m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
- m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
- & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
+ m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK;
+ m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK;
+ m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1;
}
- static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
- enum transcoder transcoder,
- struct intel_link_m_n *m_n,
- struct intel_link_m_n *m2_n2)
+ void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ struct intel_link_m_n *m_n)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (DISPLAY_VER(dev_priv) >= 5) {
- m_n->link_m = intel_de_read(dev_priv,
- PIPE_LINK_M1(transcoder));
- m_n->link_n = intel_de_read(dev_priv,
- PIPE_LINK_N1(transcoder));
- m_n->gmch_m = intel_de_read(dev_priv,
- PIPE_DATA_M1(transcoder))
- & ~TU_SIZE_MASK;
- m_n->gmch_n = intel_de_read(dev_priv,
- PIPE_DATA_N1(transcoder));
- m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
- & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
-
- if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
- m2_n2->link_m = intel_de_read(dev_priv,
- PIPE_LINK_M2(transcoder));
- m2_n2->link_n = intel_de_read(dev_priv,
- PIPE_LINK_N2(transcoder));
- m2_n2->gmch_m = intel_de_read(dev_priv,
- PIPE_DATA_M2(transcoder))
- & ~TU_SIZE_MASK;
- m2_n2->gmch_n = intel_de_read(dev_priv,
- PIPE_DATA_N2(transcoder));
- m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
- & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
- }
- } else {
- m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
- m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
- m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
- & ~TU_SIZE_MASK;
- m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
- m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
- & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
- }
- }
-
- void intel_dp_get_m_n(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
- {
- if (pipe_config->has_pch_encoder)
- intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
+ if (DISPLAY_VER(dev_priv) >= 5)
+ intel_get_m_n(dev_priv, m_n,
+ PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder),
+ PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder));
else
- intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
- &pipe_config->dp_m_n,
- &pipe_config->dp_m2_n2);
+ intel_get_m_n(dev_priv, m_n,
+ PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe),
+ PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe));
}
- void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
+ enum transcoder transcoder,
+ struct intel_link_m_n *m_n)
{
- intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
- &pipe_config->fdi_m_n, NULL);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder))
+ return;
+
+ intel_get_m_n(dev_priv, m_n,
+ PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder),
+ PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder));
}
static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state,
goto out;
switch (tmp & PIPECONF_BPC_MASK) {
- case PIPECONF_6BPC:
+ case PIPECONF_BPC_6:
pipe_config->pipe_bpp = 18;
break;
- case PIPECONF_8BPC:
+ case PIPECONF_BPC_8:
pipe_config->pipe_bpp = 24;
break;
- case PIPECONF_10BPC:
+ case PIPECONF_BPC_10:
pipe_config->pipe_bpp = 30;
break;
- case PIPECONF_12BPC:
+ case PIPECONF_BPC_12:
pipe_config->pipe_bpp = 36;
break;
default:
break;
}
- pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
- PIPECONF_GAMMA_MODE_SHIFT;
+ pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp);
pipe_config->csc_mode = intel_de_read(dev_priv,
PIPE_CSC_MODE(crtc->pipe));
active = true;
}
+ if (!active)
+ goto out;
+
intel_dsc_get_config(pipe_config);
if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
intel_uncompressed_joiner_get_config(pipe_config);
- if (!active)
- goto out;
-
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(dev_priv) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
return (src_w != dst_w || src_h != dst_h);
}
+ static bool intel_plane_do_async_flip(struct intel_plane *plane,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+ {
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+
+ if (!plane->async_flip)
+ return false;
+
+ if (!new_crtc_state->uapi.async_flip)
+ return false;
+
+ /*
+ * In platforms after DISPLAY13, we might need to override
+ * first async flip in order to change watermark levels
+ * as part of optimization.
+ * So for those, we are checking if this is a first async flip.
+ * For platforms earlier than DISPLAY13 we always do async flip.
+ */
+ return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip;
+ }
+
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
needs_scaling(new_plane_state))))
new_crtc_state->disable_lp_wm = true;
+ if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state))
+ new_plane_state->do_async_flip = true;
+
return 0;
}
struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
drm_dbg_kms(&i915->drm,
- "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n",
id, lane_count,
- m_n->gmch_m, m_n->gmch_n,
+ m_n->data_m, m_n->data_n,
m_n->link_m, m_n->link_n, m_n->tu);
}
if (intel_crtc_has_dp_encoder(pipe_config)) {
intel_dump_m_n_config(pipe_config, "dp m_n",
- pipe_config->lane_count, &pipe_config->dp_m_n);
- if (pipe_config->has_drrs)
- intel_dump_m_n_config(pipe_config, "dp m2_n2",
- pipe_config->lane_count,
- &pipe_config->dp_m2_n2);
+ pipe_config->lane_count,
+ &pipe_config->dp_m_n);
+ intel_dump_m_n_config(pipe_config, "dp m2_n2",
+ pipe_config->lane_count,
+ &pipe_config->dp_m2_n2);
}
drm_dbg_kms(&dev_priv->drm,
bool exact)
{
return m_n->tu == m2_n2->tu &&
- intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
- m2_n2->gmch_m, m2_n2->gmch_n, exact) &&
+ intel_compare_m_n(m_n->data_m, m_n->data_n,
+ m2_n2->data_m, m2_n2->data_n, exact) &&
intel_compare_m_n(m_n->link_m, m_n->link_n,
m2_n2->link_m, m2_n2->link_n, exact);
}
&pipe_config->name,\
!fastset)) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
- "(expected tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)", \
+ "(expected tu %i data %i/%i link %i/%i, " \
+ "found tu %i, data %i/%i link %i/%i)", \
current_config->name.tu, \
- current_config->name.gmch_m, \
- current_config->name.gmch_n, \
+ current_config->name.data_m, \
+ current_config->name.data_n, \
current_config->name.link_m, \
current_config->name.link_n, \
pipe_config->name.tu, \
- pipe_config->name.gmch_m, \
- pipe_config->name.gmch_n, \
+ pipe_config->name.data_m, \
+ pipe_config->name.data_n, \
pipe_config->name.link_m, \
pipe_config->name.link_n); \
ret = false; \
!intel_compare_link_m_n(¤t_config->alt_name, \
&pipe_config->name, !fastset)) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
- "(expected tu %i gmch %i/%i link %i/%i, " \
- "or tu %i gmch %i/%i link %i/%i, " \
- "found tu %i, gmch %i/%i link %i/%i)", \
+ "(expected tu %i data %i/%i link %i/%i, " \
+ "or tu %i data %i/%i link %i/%i, " \
+ "found tu %i, data %i/%i link %i/%i)", \
current_config->name.tu, \
- current_config->name.gmch_m, \
- current_config->name.gmch_n, \
+ current_config->name.data_m, \
+ current_config->name.data_n, \
current_config->name.link_m, \
current_config->name.link_n, \
current_config->alt_name.tu, \
- current_config->alt_name.gmch_m, \
- current_config->alt_name.gmch_n, \
+ current_config->alt_name.data_m, \
+ current_config->alt_name.data_n, \
current_config->alt_name.link_m, \
current_config->alt_name.link_n, \
pipe_config->name.tu, \
- pipe_config->name.gmch_m, \
- pipe_config->name.gmch_n, \
+ pipe_config->name.data_m, \
+ pipe_config->name.data_n, \
pipe_config->name.link_m, \
pipe_config->name.link_n); \
ret = false; \
PIPE_CONF_CHECK_I(lane_count);
PIPE_CONF_CHECK_X(lane_lat_optim_mask);
- if (DISPLAY_VER(dev_priv) < 8) {
- PIPE_CONF_CHECK_M_N(dp_m_n);
-
- if (current_config->has_drrs)
- PIPE_CONF_CHECK_M_N(dp_m2_n2);
- } else
+ if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) {
PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
+ } else {
+ PIPE_CONF_CHECK_M_N(dp_m_n);
+ PIPE_CONF_CHECK_M_N(dp_m2_n2);
+ }
PIPE_CONF_CHECK_X(output_types);
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
struct intel_crtc *slave_crtc, *master_crtc;
slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
if (!slave_crtc) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
- "CRTC + 1 to be used, doesn't exist\n",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Big joiner configuration requires "
+ "CRTC + 1 to be used, doesn't exist\n",
+ crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
if (slave_crtc_state->uapi.enable)
goto claimed;
- DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
- slave_crtc->base.base.id, slave_crtc->base.name);
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Used as slave for big joiner\n",
+ slave_crtc->base.base.id, slave_crtc->base.name);
return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
claimed:
- DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
- "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
- slave_crtc->base.base.id, slave_crtc->base.name,
- master_crtc->base.base.id, master_crtc->base.name);
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
+ "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
+ slave_crtc->base.base.id, slave_crtc->base.name,
+ master_crtc->base.base.id, master_crtc->base.name);
return -EINVAL;
}
cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state);
intel_update_cdclk(i915);
- intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
+ intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK");
cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
}
udelay(150); /* wait for warmup */
}
- intel_de_write(dev_priv, PIPECONF(pipe),
- PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
+ intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE);
intel_de_posting_read(dev_priv, PIPECONF(pipe));
intel_wait_for_pipe_scanline_moving(crtc);
pipe_name(pipe));
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
- DISPLAY_PLANE_ENABLE);
+ intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE);
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
- DISPLAY_PLANE_ENABLE);
+ intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE);
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
- DISPLAY_PLANE_ENABLE);
+ intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE);
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
+ intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK);
drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
+ intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK);
intel_de_write(dev_priv, PIPECONF(pipe), 0);
intel_de_posting_read(dev_priv, PIPECONF(pipe));
vlv_wm_sanitize(dev_priv);
} else if (DISPLAY_VER(dev_priv) >= 9) {
skl_wm_get_hw_state(dev_priv);
+ skl_wm_sanitize(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev_priv);
}
}
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+
+ intel_power_domains_sanitize_state(dev_priv);
}
void intel_display_resume(struct drm_device *dev)
#include <linux/pwm.h>
#include <linux/sched/clock.h>
+#include <drm/dp/drm_dp_dual_mode_helper.h>
+#include <drm/dp/drm_dp_mst_helper.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_dual_mode_helper.h>
-#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_dsc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
struct intel_fb_view view;
+ /* Indicates if async flip is required */
+ bool do_async_flip;
+
/* Plane pxp decryption state */
bool decrypt;
};
struct intel_dp_mst_encoder;
- /*
- * enum link_m_n_set:
- * When platform provides two set of M_N registers for dp, we can
- * program them and switch between them incase of DRRS.
- * But When only one such register is provided, we have to program the
- * required divider value on that registers itself based on the DRRS state.
- *
- * M1_N1 : Program dp_m_n on M1_N1 registers
- * dp_m2_n2 on M2_N2 registers (If supported)
- *
- * M2_N2 : Program dp_m2_n2 on M1_N1 registers
- * M2_N2 registers are not supported
- */
-
- enum link_m_n_set {
- /* Sets the m1_n1 and m2_n2 */
- M1_N1 = 0,
- M2_N2
- };
struct intel_dp_compliance_data {
unsigned long edid;
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
+#include <drm/dp/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_backlight.h"
+ #include "intel_combo_phy_regs.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
- #define DP_DPRX_ESI_LEN 14
-
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
i915->max_cdclk_freq * 48 /
intel_dp_mode_to_fec_clock(mode_clock);
- DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner);
+ drm_dbg_kms(&i915->drm, "Max big joiner bpp: %u\n", max_bpp_bigjoiner);
bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner);
}
return true;
}
- static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- int bpc)
+ static bool intel_dp_hdmi_bpc_possible(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int bpc)
{
- return intel_hdmi_deep_color_possible(crtc_state, bpc,
- intel_dp->has_hdmi_sink,
- intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
+ return intel_hdmi_bpc_possible(crtc_state, bpc, intel_dp->has_hdmi_sink,
+ intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
}
if (intel_dp->dfp.min_tmds_clock) {
for (; bpc >= 10; bpc -= 2) {
- if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
+ if (intel_dp_hdmi_bpc_possible(intel_dp, crtc_state, bpc))
break;
}
}
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
- pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count;
+ pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count;
if (!HAS_DDI(dev_priv))
g4x_dp_set_clock(encoder, pipe_config);
}
static bool
- intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
+ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
{
- return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
- sink_irq_vector, DP_DPRX_ESI_LEN) ==
- DP_DPRX_ESI_LEN;
+ return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4;
+ }
+
+ static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4])
+ {
+ int retry;
+
+ for (retry = 0; retry < 3; retry++) {
+ if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1,
+ &esi[1], 3) == 3)
+ return true;
+ }
+
+ return false;
}
bool
}
static ssize_t
- intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
+ intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915,
+ const struct hdmi_drm_infoframe *drm_infoframe,
struct dp_sdp *sdp,
size_t size)
{
len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
if (len < 0) {
- DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
+ drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n");
return -ENOSPC;
}
if (len != infoframe_size) {
- DRM_DEBUG_KMS("wrong static hdr metadata size\n");
+ drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n");
return -ENOSPC;
}
sizeof(sdp));
break;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
+ len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv,
+ &crtc_state->infoframes.drm.drm,
&sdp, sizeof(sdp));
break;
default:
switch (data->phy_pattern) {
case DP_PHY_TEST_PATTERN_NONE:
- DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
+ drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
break;
case DP_PHY_TEST_PATTERN_D10_2:
- DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
+ drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
break;
case DP_PHY_TEST_PATTERN_ERROR_COUNT:
- DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
+ drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE |
DDI_DP_COMP_CTL_SCRAMBLED_0);
break;
case DP_PHY_TEST_PATTERN_PRBS7:
- DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
+ drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n");
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
break;
* current firmware of DPR-100 could not set it, so hardcoding
* now for complaince test.
*/
- DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
pattern_val = 0x3e0f83e0;
intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
pattern_val = 0x0f83e0f8;
* current firmware of DPR-100 could not set it, so hardcoding
* now for complaince test.
*/
- DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
+ drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n");
pattern_val = 0xFB;
intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_dp_phy_test_params *data =
&intel_dp->compliance.test_data.phytest;
u8 link_status[DP_LINK_STATUS_SIZE];
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
link_status) < 0) {
- DRM_DEBUG_KMS("failed to get link status\n");
+ drm_dbg_kms(&i915->drm, "failed to get link status\n");
return;
}
static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct drm_dp_phy_test_params *data =
&intel_dp->compliance.test_data.phytest;
if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
- DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
+ drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n");
return DP_TEST_NAK;
}
}
static void
- intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled)
+ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
{
- drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled);
+ bool handled = false;
- if (esi[1] & DP_CP_IRQ) {
- intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
- *handled = true;
- }
+ drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
+ if (handled)
+ ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
+
+ if (esi[1] & DP_CP_IRQ) {
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
+ ack[1] |= DP_CP_IRQ;
+ }
+ }
+
+ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
+ {
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 link_status[DP_LINK_STATUS_SIZE] = {};
+ const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
+
+ if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
+ esi_link_status_size) != esi_link_status_size) {
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s] Failed to read link status\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] channel EQ not ok, retraining\n",
+ encoder->base.base.id, encoder->base.name);
+ return false;
+ }
+
+ return true;
}
/**
drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
for (;;) {
- /*
- * The +2 is because DP_DPRX_ESI_LEN is 14, but we then
- * pass in "esi+10" to drm_dp_channel_eq_ok(), which
- * takes a 6-byte array. So we actually need 16 bytes
- * here.
- *
- * Somebody who knows what the limits actually are
- * should check this, but for now this is at least
- * harmless and avoids a valid compiler warning about
- * using more of the array than we have allocated.
- */
- u8 esi[DP_DPRX_ESI_LEN+2] = {};
- bool handled;
- int retry;
+ u8 esi[4] = {};
+ u8 ack[4] = {};
if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
drm_dbg_kms(&i915->drm,
break;
}
- /* check link status - esi[10] = 0x200c */
+ drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi);
+
if (intel_dp->active_mst_links > 0 && link_ok &&
- !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
- drm_dbg_kms(&i915->drm,
- "channel EQ not ok, retraining\n");
- link_ok = false;
+ esi[3] & LINK_STATUS_CHANGED) {
+ if (!intel_dp_mst_link_status(intel_dp))
+ link_ok = false;
+ ack[3] |= LINK_STATUS_CHANGED;
}
- drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
+ intel_dp_mst_hpd_irq(intel_dp, esi, ack);
- intel_dp_mst_hpd_irq(intel_dp, esi, &handled);
-
- if (!handled)
+ if (!memchr_inv(ack, 0, sizeof(ack)))
break;
- for (retry = 0; retry < 3; retry++) {
- int wret;
-
- wret = drm_dp_dpcd_write(&intel_dp->aux,
- DP_SINK_COUNT_ESI+1,
- &esi[1], 3);
- if (wret == 3)
- break;
- }
+ if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
+ drm_dbg_kms(&i915->drm, "Failed to ack ESI\n");
}
return link_ok;
mutex_lock(&dev->mode_config.mutex);
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
+ if (!edid) {
+ /* Fallback to EDID from ACPI OpRegion, if any */
+ edid = intel_opregion_get_edid(intel_connector);
+ if (edid)
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] Using OpRegion EDID\n",
+ connector->base.id, connector->name);
+ }
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_connector_update_edid_property(connector, edid);
intel_connector = container_of(work, typeof(*intel_connector),
modeset_retry_work);
connector = &intel_connector->base;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
- connector->name);
+ drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id,
+ connector->name);
/* Grab the locks before changing connector property*/
mutex_lock(&connector->dev->mode_config.mutex);
else
frame->colorspace = HDMI_COLORSPACE_RGB;
- drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
+ drm_hdmi_avi_infoframe_colorimetry(frame, conn_state);
/* nonsense combination */
drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
if (ycbcr420_output)
return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_36;
else
- return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36;
+ return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_36;
case 10:
if (!has_hdmi_sink)
return false;
if (ycbcr420_output)
return hdmi->y420_dc_modes & DRM_EDID_YCBCR420_DC_30;
else
- return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_30;
+ return info->edid_hdmi_rgb444_dc_modes & DRM_EDID_HDMI_DC_30;
case 8:
return true;
default:
return intel_mode_valid_max_plane_size(dev_priv, mode, false);
}
- bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
- int bpc, bool has_hdmi_sink, bool ycbcr420_output)
+ bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
+ int bpc, bool has_hdmi_sink, bool ycbcr420_output)
{
struct drm_atomic_state *state = crtc_state->uapi.state;
struct drm_connector_state *connector_state;
struct drm_connector *connector;
int i;
- if (crtc_state->pipe_bpp < bpc * 3)
- return false;
-
for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->uapi.crtc)
continue;
return true;
}
- static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
- int bpc)
+ static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->uapi.crtc->dev);
* HDMI deep color affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
- if (crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI))
+ if (bpc > 8 && crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI))
return false;
/* Display Wa_1405510057:icl,ehl */
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
- return intel_hdmi_deep_color_possible(crtc_state, bpc,
- crtc_state->has_hdmi_sink,
- intel_hdmi_is_ycbcr420(crtc_state));
+ return intel_hdmi_bpc_possible(crtc_state, bpc, crtc_state->has_hdmi_sink,
+ intel_hdmi_is_ycbcr420(crtc_state));
}
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- int clock)
+ int clock, bool respect_downstream_limits)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state);
int bpc;
- for (bpc = 12; bpc >= 10; bpc -= 2) {
- if (hdmi_deep_color_possible(crtc_state, bpc) &&
- hdmi_port_clock_valid(intel_hdmi,
- intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output),
- true, crtc_state->has_hdmi_sink) == MODE_OK)
+ /*
+ * pipe_bpp could already be below 8bpc due to FDI
+ * bandwidth constraints. HDMI minimum is 8bpc however.
+ */
+ bpc = max(crtc_state->pipe_bpp / 3, 8);
+
+ /*
+ * We will never exceed downstream TMDS clock limits while
+ * attempting deep color. If the user insists on forcing an
+ * out of spec mode they will have to be satisfied with 8bpc.
+ */
+ if (!respect_downstream_limits)
+ bpc = 8;
+
+ for (; bpc >= 8; bpc -= 2) {
+ int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output);
+
+ if (hdmi_bpc_possible(crtc_state, bpc) &&
+ hdmi_port_clock_valid(intel_hdmi, tmds_clock,
+ respect_downstream_limits,
+ crtc_state->has_hdmi_sink) == MODE_OK)
return bpc;
}
- return 8;
+ return -EINVAL;
}
static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state,
+ bool respect_downstream_limits)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpc, clock = adjusted_mode->crtc_clock;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
clock *= 2;
- bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock);
+ bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock,
+ respect_downstream_limits);
+ if (bpc < 0)
+ return bpc;
- crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc,
- intel_hdmi_is_ycbcr420(crtc_state));
+ crtc_state->port_clock =
+ intel_hdmi_tmds_clock(clock, bpc, intel_hdmi_is_ycbcr420(crtc_state));
/*
* pipe_bpp could already be below 8bpc due to
* FDI bandwidth constraints. We shouldn't bump it
- * back up to 8bpc in that case.
+ * back up to the HDMI minimum 8bpc in that case.
*/
- if (crtc_state->pipe_bpp > bpc * 3)
- crtc_state->pipe_bpp = bpc * 3;
+ crtc_state->pipe_bpp = min(crtc_state->pipe_bpp, bpc * 3);
drm_dbg_kms(&i915->drm,
"picking %d bpc for HDMI output (pipe bpp: %d)\n",
bpc, crtc_state->pipe_bpp);
- if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
- false, crtc_state->has_hdmi_sink) != MODE_OK) {
- drm_dbg_kms(&i915->drm,
- "unsupported HDMI clock (%d kHz), rejecting mode\n",
- crtc_state->port_clock);
- return -EINVAL;
- }
-
return 0;
}
static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB;
}
- ret = intel_hdmi_compute_clock(encoder, crtc_state);
+ ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits);
if (ret) {
if (intel_hdmi_is_ycbcr420(crtc_state) ||
!connector->base.ycbcr_420_allowed ||
return ret;
crtc_state->output_format = intel_hdmi_output_format(connector, true);
- ret = intel_hdmi_compute_clock(encoder, crtc_state);
+ ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits);
}
return ret;
pipe_config->has_audio =
intel_hdmi_has_audio(encoder, pipe_config, conn_state);
- ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state);
+ /*
+ * Try to respect downstream TMDS clock limits first, if
+ * that fails assume the user might know something we don't.
+ */
+ ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, true);
if (ret)
+ ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, false);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "unsupported HDMI clock (%d kHz), rejecting mode\n",
+ pipe_config->hw.adjusted_mode.crtc_clock);
return ret;
+ }
if (intel_hdmi_is_ycbcr420(pipe_config)) {
ret = intel_panel_fitting(pipe_config, conn_state);
"DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
drm_dp_get_dual_mode_type_name(type),
hdmi->dp_dual_mode.max_tmds_clock);
+
+ /* Older VBTs are often buggy and can't be trusted :( Play it safe. */
+ if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
+ !intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n");
+ hdmi->dp_dual_mode.max_tmds_clock = 0;
+ }
}
static bool
*/
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_dp_dual_mode_helper.h>
+#include <drm/dp/drm_dp_dual_mode_helper.h>
#include <drm/drm_edid.h>
#include "intel_de.h"
static bool lspcon_detect_vendor(struct intel_lspcon *lspcon)
{
struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(dp);
struct drm_dp_dpcd_ident *ident;
u32 vendor_oui;
if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) {
- DRM_ERROR("Can't read description\n");
+ drm_err(&i915->drm, "Can't read description\n");
return false;
}
switch (vendor_oui) {
case LSPCON_VENDOR_MCA_OUI:
lspcon->vendor = LSPCON_VENDOR_MCA;
- DRM_DEBUG_KMS("Vendor: Mega Chips\n");
+ drm_dbg_kms(&i915->drm, "Vendor: Mega Chips\n");
break;
case LSPCON_VENDOR_PARADE_OUI:
lspcon->vendor = LSPCON_VENDOR_PARADE;
- DRM_DEBUG_KMS("Vendor: Parade Tech\n");
+ drm_dbg_kms(&i915->drm, "Vendor: Parade Tech\n");
break;
default:
- DRM_ERROR("Invalid/Unknown vendor OUI\n");
+ drm_err(&i915->drm, "Invalid/Unknown vendor OUI\n");
return false;
}
void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
{
- struct intel_digital_port *dig_port =
- container_of(lspcon, struct intel_digital_port, lspcon);
- struct drm_device *dev = dig_port->base.base.dev;
- struct intel_dp *dp = lspcon_to_intel_dp(lspcon);
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 hdr_caps;
int ret;
- ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon),
+ ret = drm_dp_dpcd_read(&intel_dp->aux, get_hdr_status_reg(lspcon),
&hdr_caps, 1);
if (ret < 0) {
- drm_dbg_kms(dev, "HDR capability detection failed\n");
+ drm_dbg_kms(&i915->drm, "HDR capability detection failed\n");
lspcon->hdr_supported = false;
} else if (hdr_caps & 0x1) {
- drm_dbg_kms(dev, "LSPCON capable of HDR\n");
+ drm_dbg_kms(&i915->drm, "LSPCON capable of HDR\n");
lspcon->hdr_supported = true;
}
}
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
enum drm_lspcon_mode current_mode;
struct i2c_adapter *adapter = &intel_dp->aux.ddc;
if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, ¤t_mode)) {
- DRM_DEBUG_KMS("Error reading LSPCON mode\n");
+ drm_dbg_kms(&i915->drm, "Error reading LSPCON mode\n");
return DRM_LSPCON_MODE_INVALID;
}
return current_mode;
static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode mode)
{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
enum drm_lspcon_mode current_mode;
current_mode = lspcon_get_current_mode(lspcon);
if (current_mode == mode)
goto out;
- DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n",
- lspcon_mode_name(mode));
+ drm_dbg_kms(&i915->drm, "Waiting for LSPCON mode %s to settle\n",
+ lspcon_mode_name(mode));
wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400);
if (current_mode != mode)
- DRM_ERROR("LSPCON mode hasn't settled\n");
+ drm_err(&i915->drm, "LSPCON mode hasn't settled\n");
out:
- DRM_DEBUG_KMS("Current LSPCON mode %s\n",
- lspcon_mode_name(current_mode));
+ drm_dbg_kms(&i915->drm, "Current LSPCON mode %s\n",
+ lspcon_mode_name(current_mode));
return current_mode;
}
enum drm_lspcon_mode mode)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int err;
enum drm_lspcon_mode current_mode;
struct i2c_adapter *adapter = &intel_dp->aux.ddc;
err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, ¤t_mode);
if (err) {
- DRM_ERROR("Error reading LSPCON mode\n");
+ drm_err(&i915->drm, "Error reading LSPCON mode\n");
return err;
}
if (current_mode == mode) {
- DRM_DEBUG_KMS("Current mode = desired LSPCON mode\n");
+ drm_dbg_kms(&i915->drm, "Current mode = desired LSPCON mode\n");
return 0;
}
err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, adapter, mode);
if (err < 0) {
- DRM_ERROR("LSPCON mode change failed\n");
+ drm_err(&i915->drm, "LSPCON mode change failed\n");
return err;
}
lspcon->mode = mode;
- DRM_DEBUG_KMS("LSPCON mode changed done\n");
+ drm_dbg_kms(&i915->drm, "LSPCON mode changed done\n");
return 0;
}
static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon)
{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 rev;
if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV,
&rev) != 1) {
- DRM_DEBUG_KMS("Native AUX CH down\n");
+ drm_dbg_kms(&i915->drm, "Native AUX CH down\n");
return false;
}
- DRM_DEBUG_KMS("Native AUX CH up, DPCD version: %d.%d\n",
- rev >> 4, rev & 0xf);
+ drm_dbg_kms(&i915->drm, "Native AUX CH up, DPCD version: %d.%d\n",
+ rev >> 4, rev & 0xf);
return true;
}
int retry;
enum drm_dp_dual_mode_type adaptor_type;
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct i2c_adapter *adapter = &intel_dp->aux.ddc;
enum drm_lspcon_mode expected_mode;
}
if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) {
- DRM_DEBUG_KMS("No LSPCON detected, found %s\n",
- drm_dp_get_dual_mode_type_name(adaptor_type));
+ drm_dbg_kms(&i915->drm, "No LSPCON detected, found %s\n",
+ drm_dp_get_dual_mode_type_name(adaptor_type));
return false;
}
/* Yay ... got a LSPCON device */
- DRM_DEBUG_KMS("LSPCON detected\n");
+ drm_dbg_kms(&i915->drm, "LSPCON detected\n");
lspcon->mode = lspcon_wait_mode(lspcon, expected_mode);
/*
*/
if (lspcon->mode != DRM_LSPCON_MODE_PCON) {
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) {
- DRM_ERROR("LSPCON mode change to PCON failed\n");
+ drm_err(&i915->drm, "LSPCON mode change to PCON failed\n");
return false;
}
}
static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
{
struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
unsigned long start = jiffies;
while (1) {
if (intel_digital_port_connected(&dig_port->base)) {
- DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
- jiffies_to_msecs(jiffies - start));
+ drm_dbg_kms(&i915->drm, "LSPCON recovering in PCON mode after %u ms\n",
+ jiffies_to_msecs(jiffies - start));
return;
}
usleep_range(10000, 15000);
}
- DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n");
+ drm_dbg_kms(&i915->drm, "LSPCON DP descriptor mismatch after resume\n");
}
static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux)
ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL,
&avi_if_ctrl, 1);
if (ret < 0) {
- DRM_ERROR("Failed to read AVI IF control\n");
+ drm_err(aux->drm_dev, "Failed to read AVI IF control\n");
return false;
}
return true;
}
- DRM_ERROR("Parade FW not ready to accept AVI IF\n");
+ drm_err(aux->drm_dev, "Parade FW not ready to accept AVI IF\n");
return false;
}
while (block_count < 4) {
if (!lspcon_parade_fw_ready(aux)) {
- DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n",
- block_count);
+ drm_dbg_kms(aux->drm_dev, "LSPCON FW not ready, block %d\n",
+ block_count);
return false;
}
data = avi_buf + block_count * 8;
ret = drm_dp_dpcd_write(aux, reg, data, 8);
if (ret < 0) {
- DRM_ERROR("Failed to write AVI IF block %d\n",
- block_count);
+ drm_err(aux->drm_dev, "Failed to write AVI IF block %d\n",
+ block_count);
return false;
}
avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count;
ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1);
if (ret < 0) {
- DRM_ERROR("Failed to update (0x%x), block %d\n",
- reg, block_count);
+ drm_err(aux->drm_dev, "Failed to update (0x%x), block %d\n",
+ reg, block_count);
return false;
}
block_count++;
}
- DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n");
+ drm_dbg_kms(aux->drm_dev, "Wrote AVI IF blocks successfully\n");
return true;
}
*/
if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) {
- DRM_ERROR("Invalid length of infoframes\n");
+ drm_err(aux->drm_dev, "Invalid length of infoframes\n");
return false;
}
memcpy(&avi_if[1], frame, len);
if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) {
- DRM_DEBUG_KMS("Failed to write infoframe blocks\n");
+ drm_dbg_kms(aux->drm_dev, "Failed to write infoframe blocks\n");
return false;
}
mdelay(50);
continue;
} else {
- DRM_ERROR("DPCD write failed at:0x%x\n", reg);
+ drm_err(aux->drm_dev, "DPCD write failed at:0x%x\n", reg);
return false;
}
}
reg = LSPCON_MCA_AVI_IF_CTRL;
ret = drm_dp_dpcd_read(aux, reg, &val, 1);
if (ret < 0) {
- DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
return false;
}
ret = drm_dp_dpcd_write(aux, reg, &val, 1);
if (ret < 0) {
- DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
return false;
}
val = 0;
ret = drm_dp_dpcd_read(aux, reg, &val, 1);
if (ret < 0) {
- DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
return false;
}
if (val == LSPCON_MCA_AVI_IF_HANDLED)
- DRM_DEBUG_KMS("AVI IF handled by FW\n");
+ drm_dbg_kms(aux->drm_dev, "AVI IF handled by FW\n");
return true;
}
{
bool ret = true;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
switch (type) {
frame, len);
break;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n");
+ drm_dbg_kms(&i915->drm, "Update HDR metadata for lspcon\n");
/* It uses the legacy hsw implementation for the same */
hsw_write_infoframe(encoder, crtc_state, type, frame, len);
break;
}
if (!ret) {
- DRM_ERROR("Failed to write infoframes\n");
+ drm_err(&i915->drm, "Failed to write infoframes\n");
return;
}
}
u8 buf[VIDEO_DIP_DATA_SIZE];
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_lspcon *lspcon = &dig_port->lspcon;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
if (!lspcon->active) {
- DRM_ERROR("Writing infoframes while LSPCON disabled ?\n");
+ drm_err(&i915->drm, "Writing infoframes while LSPCON disabled ?\n");
return;
}
conn_state->connector,
adjusted_mode);
if (ret < 0) {
- DRM_ERROR("couldn't fill AVI infoframe\n");
+ drm_err(&i915->drm, "couldn't fill AVI infoframe\n");
return;
}
frame.avi.colorspace = HDMI_COLORSPACE_RGB;
/* Set the Colorspace as per the HDMI spec */
- drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state);
+ drm_hdmi_avi_infoframe_colorimetry(&frame.avi, conn_state);
/* nonsense combination */
drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf));
if (ret < 0) {
- DRM_ERROR("Failed to pack AVI IF\n");
+ drm_err(&i915->drm, "Failed to pack AVI IF\n");
return;
}
ret = drm_dp_dpcd_read(aux, reg, &val, 1);
if (ret < 0) {
- DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
return false;
}
ret = drm_dp_dpcd_read(aux, reg, &val, 1);
if (ret < 0) {
- DRM_ERROR("DPCD read failed, address 0x%x\n", reg);
+ drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg);
return false;
}
bool lspcon_init(struct intel_digital_port *dig_port)
{
- struct intel_dp *dp = &dig_port->dp;
+ struct intel_dp *intel_dp = &dig_port->dp;
struct intel_lspcon *lspcon = &dig_port->lspcon;
- struct drm_connector *connector = &dp->attached_connector->base;
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct drm_connector *connector = &intel_dp->attached_connector->base;
lspcon->active = false;
lspcon->mode = DRM_LSPCON_MODE_INVALID;
if (!lspcon_probe(lspcon)) {
- DRM_ERROR("Failed to probe lspcon\n");
+ drm_err(&i915->drm, "Failed to probe lspcon\n");
return false;
}
- if (drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd) != 0) {
- DRM_ERROR("LSPCON DPCD read failed\n");
+ if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) {
+ drm_err(&i915->drm, "LSPCON DPCD read failed\n");
return false;
}
if (!lspcon_detect_vendor(lspcon)) {
- DRM_ERROR("LSPCON vendor detection failed\n");
+ drm_err(&i915->drm, "LSPCON vendor detection failed\n");
return false;
}
connector->ycbcr_420_allowed = true;
lspcon->active = true;
- DRM_DEBUG_KMS("Success: LSPCON init\n");
+ drm_dbg_kms(&i915->drm, "Success: LSPCON init\n");
return true;
}
{
struct intel_lspcon *lspcon = &dig_port->lspcon;
struct drm_device *dev = dig_port->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
enum drm_lspcon_mode expected_mode;
- if (!intel_bios_is_lspcon_present(dev_priv, dig_port->base.port))
+ if (!intel_bios_is_lspcon_present(i915, dig_port->base.port))
return;
if (!lspcon->active) {
if (!lspcon_init(dig_port)) {
- DRM_ERROR("LSPCON init failed on port %c\n",
- port_name(dig_port->base.port));
+ drm_err(&i915->drm, "LSPCON init failed on port %c\n",
+ port_name(dig_port->base.port));
return;
}
}
return;
if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON))
- DRM_ERROR("LSPCON resume failed\n");
+ drm_err(&i915->drm, "LSPCON resume failed\n");
else
- DRM_DEBUG_KMS("LSPCON resume success\n");
+ drm_dbg_kms(&i915->drm, "LSPCON resume success\n");
}
*/
#include <linux/delay.h>
- #include <linux/dma-buf-map.h>
+ #include <linux/iosys-map.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
WREG_GFX(3, 0x00);
WREG_GFX(4, 0x00);
WREG_GFX(5, 0x40);
- WREG_GFX(6, 0x05);
+ /* GCTL6 should be 0x05, but we configure memmapsl to 0xb8000 (text mode),
+ * so that it doesn't hang when running kexec/kdump on G200_SE rev42.
+ */
+ WREG_GFX(6, 0x0d);
WREG_GFX(7, 0x0f);
WREG_GFX(8, 0x0f);
static void
mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb,
- struct drm_rect *clip, const struct dma_buf_map *map)
+ struct drm_rect *clip, const struct iosys_map *map)
{
void __iomem *dst = mdev->vram;
void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
*/
- #include <linux/dma-buf-map.h>
+ #include <linux/iosys-map.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
return 0;
}
- static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_map *map,
+ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
+ const struct iosys_map *map,
struct drm_rect *rect)
{
struct cirrus_device *cirrus = to_cirrus(fb->dev);
return 0;
}
- static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map)
+ static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb,
+ const struct iosys_map *map)
{
struct drm_rect fullscreen = {
.x1 = 0,
.remove = cirrus_pci_remove,
};
-static int __init cirrus_init(void)
-{
- if (drm_firmware_drivers_only())
- return -EINVAL;
-
- return pci_register_driver(&cirrus_pci_driver);
-}
-
-static void __exit cirrus_exit(void)
-{
- pci_unregister_driver(&cirrus_pci_driver);
-}
-
-module_init(cirrus_init);
-module_exit(cirrus_exit);
+drm_module_pci_driver(cirrus_pci_driver)
MODULE_DEVICE_TABLE(pci, pciidlist);
MODULE_LICENSE("GPL");
#include <drm/ttm/ttm_placement.h>
#include <drm/drm_cache.h>
#include <drm/drm_vma_manager.h>
- #include <linux/dma-buf-map.h>
+ #include <linux/iosys-map.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
{
const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
- struct dma_buf_map src_map, dst_map;
+ struct iosys_map src_map, dst_map;
pgoff_t i;
/* Single TTM move. NOP */
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ }
+
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
}
EXPORT_SYMBOL(ttm_bo_kunmap);
- int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
+ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
int ret;
if (!vaddr_iomem)
return -ENOMEM;
- dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
+ iosys_map_set_vaddr_iomem(map, vaddr_iomem);
} else {
struct ttm_operation_ctx ctx = {
if (!vaddr)
return -ENOMEM;
- dma_buf_map_set_vaddr(map, vaddr);
+ iosys_map_set_vaddr(map, vaddr);
}
return 0;
}
EXPORT_SYMBOL(ttm_bo_vmap);
- void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
+ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
{
struct ttm_resource *mem = bo->resource;
- if (dma_buf_map_is_null(map))
+ if (iosys_map_is_null(map))
return;
if (!map->is_iomem)
vunmap(map->vaddr);
else if (!mem->bus.addr)
iounmap(map->vaddr_iomem);
- dma_buf_map_clear(map);
+ iosys_map_clear(map);
ttm_mem_io_free(bo->bdev, bo->resource);
}
ghost_obj->ttm = NULL;
else
bo->ttm = NULL;
- bo->resource = NULL;
dma_resv_unlock(&ghost_obj->base._resv);
ttm_bo_put(ghost_obj);
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
bo->ttm = ttm;
- bo->resource = NULL;
ttm_bo_assign_mem(bo, sys_res);
return 0;
* Authors: Christian König
*/
- #include <linux/dma-buf-map.h>
+ #include <linux/iosys-map.h>
#include <linux/io-mapping.h>
#include <linux/scatterlist.h>
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_bo_driver.h>
+/**
+ * ttm_resource_init - resource object constructure
+ * @bo: buffer object this resources is allocated for
+ * @place: placement of the resource
+ * @res: the resource object to inistilize
+ *
+ * Initialize a new resource object. Counterpart of &ttm_resource_fini.
+ */
void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res)
res->bus.offset = 0;
res->bus.is_iomem = false;
res->bus.caching = ttm_cached;
+ res->bo = bo;
}
EXPORT_SYMBOL(ttm_resource_init);
+/**
+ * ttm_resource_fini - resource destructor
+ * @man: the resource manager this resource belongs to
+ * @res: the resource to clean up
+ *
+ * Should be used by resource manager backends to clean up the TTM resource
+ * objects before freeing the underlying structure. Counterpart of
+ * &ttm_resource_init
+ */
+void ttm_resource_fini(struct ttm_resource_manager *man,
+ struct ttm_resource *res)
+{
+}
+EXPORT_SYMBOL(ttm_resource_fini);
+
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res_ptr)
}
EXPORT_SYMBOL(ttm_resource_compat);
+void ttm_resource_set_bo(struct ttm_resource *res,
+ struct ttm_buffer_object *bo)
+{
+ spin_lock(&bo->bdev->lru_lock);
+ res->bo = bo;
+ spin_unlock(&bo->bdev->lru_lock);
+}
+
/**
* ttm_resource_manager_init
*
* @man: memory manager object to init
+ * @bdev: ttm device this manager belongs to
* @p_size: size managed area in pages.
*
* Initialise core parts of a manager object.
*/
void ttm_resource_manager_init(struct ttm_resource_manager *man,
+ struct ttm_device *bdev,
unsigned long p_size)
{
unsigned i;
spin_lock_init(&man->move_lock);
+ man->bdev = bdev;
man->size = p_size;
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
EXPORT_SYMBOL(ttm_resource_manager_debug);
static void ttm_kmap_iter_iomap_map_local(struct ttm_kmap_iter *iter,
- struct dma_buf_map *dmap,
+ struct iosys_map *dmap,
pgoff_t i)
{
struct ttm_kmap_iter_iomap *iter_io =
addr = io_mapping_map_local_wc(iter_io->iomap, iter_io->cache.offs +
(((resource_size_t)i - iter_io->cache.i)
<< PAGE_SHIFT));
- dma_buf_map_set_vaddr_iomem(dmap, addr);
+ iosys_map_set_vaddr_iomem(dmap, addr);
}
static void ttm_kmap_iter_iomap_unmap_local(struct ttm_kmap_iter *iter,
- struct dma_buf_map *map)
+ struct iosys_map *map)
{
io_mapping_unmap_local(map->vaddr_iomem);
}
*/
static void ttm_kmap_iter_linear_io_map_local(struct ttm_kmap_iter *iter,
- struct dma_buf_map *dmap,
+ struct iosys_map *dmap,
pgoff_t i)
{
struct ttm_kmap_iter_linear_io *iter_io =
container_of(iter, typeof(*iter_io), base);
*dmap = iter_io->dmap;
- dma_buf_map_incr(dmap, i * PAGE_SIZE);
+ iosys_map_incr(dmap, i * PAGE_SIZE);
}
static const struct ttm_kmap_iter_ops ttm_kmap_iter_linear_io_ops = {
}
if (mem->bus.addr) {
- dma_buf_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
+ iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr);
iter_io->needs_unmap = false;
} else {
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
iter_io->needs_unmap = true;
memset(&iter_io->dmap, 0, sizeof(iter_io->dmap));
if (mem->bus.caching == ttm_write_combined)
- dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
- ioremap_wc(mem->bus.offset,
- bus_size));
+ iosys_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap_wc(mem->bus.offset,
+ bus_size));
else if (mem->bus.caching == ttm_cached)
- dma_buf_map_set_vaddr(&iter_io->dmap,
- memremap(mem->bus.offset, bus_size,
- MEMREMAP_WB |
- MEMREMAP_WT |
- MEMREMAP_WC));
+ iosys_map_set_vaddr(&iter_io->dmap,
+ memremap(mem->bus.offset, bus_size,
+ MEMREMAP_WB |
+ MEMREMAP_WT |
+ MEMREMAP_WC));
/* If uncached requested or if mapping cached or wc failed */
- if (dma_buf_map_is_null(&iter_io->dmap))
- dma_buf_map_set_vaddr_iomem(&iter_io->dmap,
- ioremap(mem->bus.offset,
- bus_size));
+ if (iosys_map_is_null(&iter_io->dmap))
+ iosys_map_set_vaddr_iomem(&iter_io->dmap,
+ ioremap(mem->bus.offset,
+ bus_size));
- if (dma_buf_map_is_null(&iter_io->dmap)) {
+ if (iosys_map_is_null(&iter_io->dmap)) {
ret = -ENOMEM;
goto out_io_free;
}
struct ttm_device *bdev,
struct ttm_resource *mem)
{
- if (iter_io->needs_unmap && dma_buf_map_is_set(&iter_io->dmap)) {
+ if (iter_io->needs_unmap && iosys_map_is_set(&iter_io->dmap)) {
if (iter_io->dmap.is_iomem)
iounmap(iter_io->dmap.vaddr_iomem);
else
#define XRES_MAX 8192
#define YRES_MAX 8192
+#define NUM_OVERLAY_PLANES 8
+
struct vkms_writeback_job {
- struct dma_buf_map map[DRM_FORMAT_MAX_PLANES];
- struct dma_buf_map data[DRM_FORMAT_MAX_PLANES];
+ struct iosys_map map[DRM_FORMAT_MAX_PLANES];
+ struct iosys_map data[DRM_FORMAT_MAX_PLANES];
};
struct vkms_composer {
struct drm_framebuffer fb;
struct drm_rect src, dst;
- struct dma_buf_map map[4];
+ struct iosys_map map[4];
unsigned int offset;
unsigned int pitch;
unsigned int cpp;
* Lookup table for converting pixel data after the color conversion
* matrix @ctm. See drm_crtc_enable_color_mgmt(). The blob (if not
* NULL) is an array of &struct drm_color_lut.
+ *
+ * Note that for mostly historical reasons stemming from Xorg heritage,
+ * this is also used to store the color map (also sometimes color lut,
+ * CLUT or color palette) for indexed formats like DRM_FORMAT_C8.
*/
struct drm_property_blob *gamma_lut;
/**
* @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
* by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint32_t gamma_size;
/**
* @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
* GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
+ *
+ * Note that atomic drivers need to instead use
+ * &drm_crtc_state.gamma_lut. See drm_crtc_enable_color_mgmt().
*/
uint16_t *gamma_store;
*/
spinlock_t commit_lock;
- #ifdef CONFIG_DEBUG_FS
/**
* @debugfs_entry:
*
* Debugfs directory for this CRTC.
*/
struct dentry *debugfs_entry;
- #endif
/**
* @crc:
#include <linux/types.h>
#include <linux/mutex.h>
- #include <linux/dma-buf-map.h>
+ #include <linux/iosys-map.h>
#include <linux/dma-fence.h>
#include <drm/drm_print.h>
#include <drm/ttm/ttm_caching.h>
struct ttm_place;
struct ttm_buffer_object;
struct ttm_placement;
- struct dma_buf_map;
+ struct iosys_map;
struct io_mapping;
struct sg_table;
struct scatterlist;
* @use_type: The memory type is enabled.
* @use_tt: If a TT object should be used for the backing store.
* @size: Size of the managed region.
+ * @bdev: ttm device this manager belongs to
* @func: structure pointer implementing the range manager. See above
* @move_lock: lock for move fence
- * static information. bdev::driver::io_mem_free is never used.
- * @lru: The lru list for this memory type.
* @move: The fence of the last pipelined move operation.
+ * @lru: The lru list for this memory type.
*
* This structure is used to identify and manage memory types for a device.
*/
*/
bool use_type;
bool use_tt;
+ struct ttm_device *bdev;
uint64_t size;
const struct ttm_resource_manager_func *func;
spinlock_t move_lock;
/*
- * Protected by the global->lru_lock.
+ * Protected by @move_lock.
*/
-
- struct list_head lru[TTM_MAX_BO_PRIORITY];
+ struct dma_fence *move;
/*
- * Protected by @move_lock.
+ * Protected by the global->lru_lock.
*/
- struct dma_fence *move;
+
+ struct list_head lru[TTM_MAX_BO_PRIORITY];
};
/**
* @mem_type: Resource type of the allocation.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
+ * @bo: weak reference to the BO, protected by ttm_device::lru_lock
*
* Structure indicating the placement and space resources used by a
* buffer object.
uint32_t mem_type;
uint32_t placement;
struct ttm_bus_placement bus;
+ struct ttm_buffer_object *bo;
};
/**
*/
struct ttm_kmap_iter_linear_io {
struct ttm_kmap_iter base;
- struct dma_buf_map dmap;
+ struct iosys_map dmap;
bool needs_unmap;
};
void ttm_resource_init(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *res);
+void ttm_resource_fini(struct ttm_resource_manager *man,
+ struct ttm_resource *res);
+
int ttm_resource_alloc(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource **res);
void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res);
bool ttm_resource_compat(struct ttm_resource *res,
struct ttm_placement *placement);
+void ttm_resource_set_bo(struct ttm_resource *res,
+ struct ttm_buffer_object *bo);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
+ struct ttm_device *bdev,
unsigned long p_size);
int ttm_resource_manager_evict_all(struct ttm_device *bdev,