# Please keep these build lists sorted!
# core driver code
-i915-y += i915_drv.o \
+i915-y += i915_driver.o \
i915_config.o \
i915_irq.o \
i915_getparam.o \
i915_active.o \
i915_buddy.o \
i915_cmd_parser.o \
+ i915_deps.o \
i915_gem_evict.o \
i915_gem_gtt.o \
i915_gem_ww.o \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
display/intel_overlay.o \
+ display/intel_pch_display.o \
+ display/intel_pch_refclk.o \
display/intel_plane_initial.o \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_crt.o \
display/intel_ddi.o \
display/intel_ddi_buf_trans.o \
+ display/intel_display_trace.o \
display/intel_dp.o \
display/intel_dp_aux.o \
display/intel_dp_aux_backlight.o \
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
-#include "i915_trace.h"
+#include "gt/intel_rps.h"
+
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
+#include "intel_display_trace.h"
#include "intel_display_types.h"
+#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_pm.h"
#include "intel_sprite.h"
-#include "gt/intel_rps.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
const struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
const struct intel_plane_state *new_master_plane_state;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, plane->pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, plane->pipe);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
return NULL;
}
-void intel_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+void intel_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- trace_intel_update_plane(&plane->base, crtc);
+ trace_intel_plane_update_noarm(&plane->base, crtc);
+
+ if (plane->update_noarm)
+ plane->update_noarm(plane, crtc_state, plane_state);
+}
+
+void intel_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ trace_intel_plane_update_arm(&plane->base, crtc);
if (crtc_state->uapi.async_flip && plane->async_flip)
plane->async_flip(plane, crtc_state, plane_state, true);
else
- plane->update_plane(plane, crtc_state, plane_state);
+ plane->update_arm(plane, crtc_state, plane_state);
}
-void intel_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+void intel_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc_state);
+ trace_intel_plane_disable_arm(&plane->base, crtc);
+ plane->disable_arm(plane, crtc_state);
+}
+
+void intel_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 update_mask = new_crtc_state->update_planes;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane *plane;
+ int i;
+
+ if (new_crtc_state->uapi.async_flip)
+ return;
+
+ /*
+ * Since we only write non-arming registers here,
+ * the order does not matter even for skl+.
+ */
+ for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
+ if (crtc->pipe != plane->pipe ||
+ !(update_mask & BIT(plane->id)))
+ continue;
+
+ /* TODO: for mailbox updates this should be skipped */
+ if (new_plane_state->uapi.visible ||
+ new_plane_state->planar_slave)
+ intel_plane_update_noarm(plane, new_crtc_state, new_plane_state);
+ }
}
-void skl_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void skl_arm_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_plane_state *new_plane_state =
intel_atomic_get_new_plane_state(state, plane);
+ /*
+ * TODO: for mailbox updates intel_plane_update_noarm()
+ * would have to be called here as well.
+ */
if (new_plane_state->uapi.visible ||
- new_plane_state->planar_slave) {
- intel_update_plane(plane, new_crtc_state, new_plane_state);
- } else {
- intel_disable_plane(plane, new_crtc_state);
- }
+ new_plane_state->planar_slave)
+ intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
+ else
+ intel_plane_disable_arm(plane, new_crtc_state);
}
}
-void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
!(update_mask & BIT(plane->id)))
continue;
+ /*
+ * TODO: for mailbox updates intel_plane_update_noarm()
+ * would have to be called here as well.
+ */
if (new_plane_state->uapi.visible)
- intel_update_plane(plane, new_crtc_state, new_plane_state);
+ intel_plane_update_arm(plane, new_crtc_state, new_plane_state);
else
- intel_disable_plane(plane, new_crtc_state);
+ intel_plane_disable_arm(plane, new_crtc_state);
}
}
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
if (!state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, true);
+ intel_rps_mark_interactive(&to_gt(dev_priv)->rps, true);
state->rps_interactive = true;
}
return;
if (state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, false);
+ intel_rps_mark_interactive(&to_gt(dev_priv)->rps, false);
state->rps_interactive = false;
}
#include <linux/module.h>
#include <linux/dma-resv.h>
#include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_privacy_screen_consumer.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include "gt/gen8_ppgtt.h"
-#include "pxp/intel_pxp.h"
-
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "i915_drv.h"
+#include "icl_dsi.h"
#include "intel_acpi.h"
#include "intel_atomic.h"
#include "intel_atomic_plane.h"
#include "intel_hotplug.h"
#include "intel_overlay.h"
#include "intel_panel.h"
+#include "intel_pch_display.h"
+#include "intel_pch_refclk.h"
#include "intel_pcode.h"
#include "intel_pipe_crc.h"
#include "intel_plane_initial.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
-#include "intel_sbi.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "vlv_dsi_pll.h"
#include "vlv_sideband.h"
-
-static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-static void ilk_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+#include "vlv_dsi.h"
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
is_trans_port_sync_slave(crtc_state);
}
+static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->bigjoiner_slave)
+ return crtc_state->bigjoiner_linked_crtc;
+ else
+ return to_intel_crtc(crtc_state->uapi.crtc);
+}
+
static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
assert_plane_disabled(plane);
}
-void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 val;
- bool enabled;
-
- val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
- enabled = !!(val & TRANS_ENABLE);
- I915_STATE_WARN(enabled,
- "transcoder assertion failed, should be off on pipe %c but is still active\n",
- pipe_name(pipe));
-}
-
-static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
- i915_reg_t dp_reg)
-{
- enum pipe port_pipe;
- bool state;
-
- state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
-
- I915_STATE_WARN(state && port_pipe == pipe,
- "PCH DP %c enabled on transcoder %c, should be disabled\n",
- port_name(port), pipe_name(pipe));
-
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
- "IBX PCH DP %c still using transcoder B\n",
- port_name(port));
-}
-
-static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum port port,
- i915_reg_t hdmi_reg)
-{
- enum pipe port_pipe;
- bool state;
-
- state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
-
- I915_STATE_WARN(state && port_pipe == pipe,
- "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
- port_name(port), pipe_name(pipe));
-
- I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
- "IBX PCH HDMI %c still using transcoder B\n",
- port_name(port));
-}
-
-static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- enum pipe port_pipe;
-
- assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
- assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
-
- I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
- port_pipe == pipe,
- "PCH VGA enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
-
- I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
- port_pipe == pipe,
- "PCH LVDS enabled on transcoder %c, should be disabled\n",
- pipe_name(pipe));
-
- /* PCH SDVOB multiplex with HDMIB */
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
- assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
-}
-
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
struct intel_digital_port *dig_port,
unsigned int expected_mask)
expected_mask);
}
-static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
- u32 val, pipeconf_val;
-
- /* Make sure PCH DPLL is enabled */
- assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
-
- /* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, pipe);
- assert_fdi_rx_enabled(dev_priv, pipe);
-
- if (HAS_PCH_CPT(dev_priv)) {
- reg = TRANS_CHICKEN2(pipe);
- val = intel_de_read(dev_priv, reg);
- /*
- * Workaround: Set the timing override bit
- * before enabling the pch transcoder.
- */
- val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
- /* Configure frame start delay to match the CPU */
- val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
- intel_de_write(dev_priv, reg, val);
- }
-
- reg = PCH_TRANSCONF(pipe);
- val = intel_de_read(dev_priv, reg);
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
-
- if (HAS_PCH_IBX(dev_priv)) {
- /* Configure frame start delay to match the CPU */
- val &= ~TRANS_FRAME_START_DELAY_MASK;
- val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
-
- /*
- * Make the BPC in transcoder be consistent with
- * that in pipeconf reg. For HDMI we must use 8bpc
- * here for both 8bpc and 12bpc.
- */
- val &= ~PIPECONF_BPC_MASK;
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- val |= PIPECONF_8BPC;
- else
- val |= pipeconf_val & PIPECONF_BPC_MASK;
- }
-
- val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
- if (HAS_PCH_IBX(dev_priv) &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
- val |= TRANS_LEGACY_INTERLACED_ILK;
- else
- val |= TRANS_INTERLACED;
- } else {
- val |= TRANS_PROGRESSIVE;
- }
-
- intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
- if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
- pipe_name(pipe));
-}
-
-static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
-{
- u32 val, pipeconf_val;
-
- /* FDI must be feeding us bits for PCH ports */
- assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
- assert_fdi_rx_enabled(dev_priv, PIPE_A);
-
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
- /* Workaround: set timing override bit. */
- val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
- /* Configure frame start delay to match the CPU */
- val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
- val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1);
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
-
- val = TRANS_ENABLE;
- pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
-
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
- PIPECONF_INTERLACED_ILK)
- val |= TRANS_INTERLACED;
- else
- val |= TRANS_PROGRESSIVE;
-
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
- if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
- TRANS_STATE_ENABLE, 100))
- drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
-}
-
-static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- i915_reg_t reg;
- u32 val;
-
- /* FDI relies on the transcoder */
- assert_fdi_tx_disabled(dev_priv, pipe);
- assert_fdi_rx_disabled(dev_priv, pipe);
-
- /* Ports must be off as well */
- assert_pch_ports_disabled(dev_priv, pipe);
-
- reg = PCH_TRANSCONF(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, reg, val);
- /* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
- pipe_name(pipe));
-
- if (HAS_PCH_CPT(dev_priv)) {
- /* Workaround: Clear the timing override chicken bit again. */
- reg = TRANS_CHICKEN2(pipe);
- val = intel_de_read(dev_priv, reg);
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, reg, val);
- }
-}
-
-void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = intel_de_read(dev_priv, LPT_TRANSCONF);
- val &= ~TRANS_ENABLE;
- intel_de_write(dev_priv, LPT_TRANSCONF, val);
- /* wait for PCH transcoder off, transcoder state */
- if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
- TRANS_STATE_ENABLE, 50))
- drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
-
- /* Workaround: clear timing override bit. */
- val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
- val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
-}
-
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
intel_wait_for_pipe_off(old_crtc_state);
}
-bool
-intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
- u64 modifier)
-{
- return info->is_yuv &&
- info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
-}
-
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
{
unsigned int size = 0;
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
unsigned int plane_size;
- plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+ if (rem_info->plane[i].linear)
+ plane_size = rem_info->plane[i].size;
+ else
+ plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height;
+
if (plane_size == 0)
continue;
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return DISPLAY_VER(dev_priv) < 4 ||
- (plane->has_fbc &&
+ (plane->fbc &&
plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
}
{
const struct drm_framebuffer *fb = state->hw.fb;
unsigned int cpp = fb->format->cpp[color_plane];
- unsigned int pitch = state->view.color_plane[color_plane].stride;
+ unsigned int pitch = state->view.color_plane[color_plane].mapping_stride;
return y * pitch + x * cpp;
}
*y += state->view.color_plane[color_plane].y;
}
-/*
- * From the Sky Lake PRM:
- * "The Color Control Surface (CCS) contains the compression status of
- * the cache-line pairs. The compression state of the cache-line pair
- * is specified by 2 bits in the CCS. Each CCS cache-line represents
- * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
- * cache-line-pairs. CCS is always Y tiled."
- *
- * Since cache line pairs refers to horizontally adjacent cache lines,
- * each cache line in the CCS corresponds to an area of 32x16 cache
- * lines on the main surface. Since each pixel is 4 bytes, this gives
- * us a ratio of one byte in the CCS for each 8x16 pixels in the
- * main surface.
- */
-static const struct drm_format_info skl_ccs_formats[] = {
- { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
- .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
- .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
- { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
- .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
- { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
- .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
-};
-
-/*
- * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the
- * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles
- * in the main surface. With 4 byte pixels and each Y-tile having dimensions of
- * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in
- * the main surface.
- */
-static const struct drm_format_info gen12_ccs_formats[] = {
- { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, },
- { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, },
- { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, .has_alpha = true },
- { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, .has_alpha = true },
- { .format = DRM_FORMAT_YUYV, .num_planes = 2,
- .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 2, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_YVYU, .num_planes = 2,
- .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 2, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_UYVY, .num_planes = 2,
- .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 2, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_VYUY, .num_planes = 2,
- .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 2, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_XYUV8888, .num_planes = 2,
- .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 },
- .hsub = 1, .vsub = 1, .is_yuv = true },
- { .format = DRM_FORMAT_NV12, .num_planes = 4,
- .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 },
- .hsub = 2, .vsub = 2, .is_yuv = true },
- { .format = DRM_FORMAT_P010, .num_planes = 4,
- .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
- .hsub = 2, .vsub = 2, .is_yuv = true },
- { .format = DRM_FORMAT_P012, .num_planes = 4,
- .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
- .hsub = 2, .vsub = 2, .is_yuv = true },
- { .format = DRM_FORMAT_P016, .num_planes = 4,
- .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 },
- .hsub = 2, .vsub = 2, .is_yuv = true },
-};
-
-/*
- * Same as gen12_ccs_formats[] above, but with additional surface used
- * to pass Clear Color information in plane 2 with 64 bits of data.
- */
-static const struct drm_format_info gen12_ccs_cc_formats[] = {
- { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
- .hsub = 1, .vsub = 1, },
- { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
- .hsub = 1, .vsub = 1, },
- { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
- .hsub = 1, .vsub = 1, .has_alpha = true },
- { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3,
- .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 },
- .hsub = 1, .vsub = 1, .has_alpha = true },
-};
-
-static const struct drm_format_info *
-lookup_format_info(const struct drm_format_info formats[],
- int num_formats, u32 format)
-{
- int i;
-
- for (i = 0; i < num_formats; i++) {
- if (formats[i].format == format)
- return &formats[i];
- }
-
- return NULL;
-}
-
-static const struct drm_format_info *
-intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
-{
- switch (cmd->modifier[0]) {
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- return lookup_format_info(skl_ccs_formats,
- ARRAY_SIZE(skl_ccs_formats),
- cmd->pixel_format);
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- return lookup_format_info(gen12_ccs_formats,
- ARRAY_SIZE(gen12_ccs_formats),
- cmd->pixel_format);
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- return lookup_format_info(gen12_ccs_cc_formats,
- ARRAY_SIZE(gen12_ccs_cc_formats),
- cmd->pixel_format);
- default:
- return NULL;
- }
-}
-
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
* the highest stride limits of them all,
* if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
- crtc = intel_get_first_crtc(dev_priv);
+ crtc = intel_first_crtc(dev_priv);
if (!crtc)
return 0;
*/
if (HAS_GMCH(dev_priv) &&
intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
/*
* Gen2 reports pipe underruns whenever all planes are disabled.
if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes)
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
- intel_disable_plane(plane, crtc_state);
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_plane_disable_arm(plane, crtc_state);
+ intel_crtc_wait_for_next_vblank(crtc);
}
unsigned int
static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
{
return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
- intel_has_gpu_reset(&dev_priv->gt));
+ intel_has_gpu_reset(to_gt(dev_priv)));
}
void intel_display_prepare_reset(struct drm_i915_private *dev_priv)
return;
/* We have a modeset vs reset deadlock, defensively unbreak it. */
- set_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
+ set_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
smp_mb__after_atomic();
- wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
+ wake_up_bit(&to_gt(dev_priv)->reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
drm_dbg_kms(&dev_priv->drm,
"Modeset potentially stuck, unbreaking through wedging\n");
- intel_gt_set_wedged(&dev_priv->gt);
+ intel_gt_set_wedged(to_gt(dev_priv));
}
/*
return;
/* reset doesn't touch the display */
- if (!test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
+ if (!test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
return;
state = fetch_and_zero(&dev_priv->modeset_restore_state);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
- clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
+ clear_bit_unlock(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags);
}
-static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
-{
- if (crtc_state->pch_pfit.enabled &&
- (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
- crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
- return false;
-
- if (crtc_state->dsc.compression_enable)
- return false;
-
- if (crtc_state->has_psr2)
- return false;
-
- if (crtc_state->splitter.enable)
- return false;
-
- return true;
-}
-
static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
*/
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
- if (IS_DG2(dev_priv)) {
- /*
- * Underrun recovery must always be disabled on DG2. However
- * the chicken bit meaning is inverted compared to other
- * platforms.
- */
+ /*
+ * Underrun recovery must always be disabled on display 13+.
+ * DG2 chicken bit meaning is inverted compared to other platforms.
+ */
+ if (IS_DG2(dev_priv))
tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
- } else if (DISPLAY_VER(dev_priv) >= 13) {
- if (underrun_recovery_supported(crtc_state))
- tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
- else
- tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
- }
+ else if (DISPLAY_VER(dev_priv) >= 13)
+ tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
+
+ /* Wa_14010547955:dg2 */
+ if (IS_DG2_DISPLAY_STEP(dev_priv, STEP_B0, STEP_FOREVER))
+ tmp |= DG2_RENDER_CCSTAG_4_3_EN;
intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
if (cleanup_done)
continue;
- drm_crtc_wait_one_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc));
return true;
}
return false;
}
-void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
-{
- u32 temp;
-
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
-
- mutex_lock(&dev_priv->sb_lock);
-
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
- temp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
-
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-/* Program iCLKIP clock to the desired frequency */
-static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int clock = crtc_state->hw.adjusted_mode.crtc_clock;
- u32 divsel, phaseinc, auxdiv, phasedir = 0;
- u32 temp;
-
- lpt_disable_iclkip(dev_priv);
-
- /* The iCLK virtual clock root frequency is in MHz,
- * but the adjusted_mode->crtc_clock in in KHz. To get the
- * divisors, it is necessary to divide one by another, so we
- * convert the virtual clock precision to KHz here for higher
- * precision.
- */
- for (auxdiv = 0; auxdiv < 2; auxdiv++) {
- u32 iclk_virtual_root_freq = 172800 * 1000;
- u32 iclk_pi_range = 64;
- u32 desired_divisor;
-
- desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
- clock << auxdiv);
- divsel = (desired_divisor / iclk_pi_range) - 2;
- phaseinc = desired_divisor % iclk_pi_range;
-
- /*
- * Near 20MHz is a corner case which is
- * out of range for the 7-bit divisor
- */
- if (divsel <= 0x7f)
- break;
- }
-
- /* This should not happen with any sane values */
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
- ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
- ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
-
- drm_dbg_kms(&dev_priv->drm,
- "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
- clock, auxdiv, divsel, phasedir, phaseinc);
-
- mutex_lock(&dev_priv->sb_lock);
-
- /* Program SSCDIVINTPHASE6 */
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
- temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
- temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
- temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
- temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
- temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
- temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
-
- /* Program SSCAUXDIV */
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
- temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
- temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
- intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
-
- /* Enable modulator and associated divider */
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
- temp &= ~SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
-
- mutex_unlock(&dev_priv->sb_lock);
-
- /* Wait for initialization time */
- udelay(24);
-
- intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
-}
-
-int lpt_get_iclkip(struct drm_i915_private *dev_priv)
-{
- u32 divsel, phaseinc, auxdiv;
- u32 iclk_virtual_root_freq = 172800 * 1000;
- u32 iclk_pi_range = 64;
- u32 desired_divisor;
- u32 temp;
-
- if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
- return 0;
-
- mutex_lock(&dev_priv->sb_lock);
-
- temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
- if (temp & SBI_SSCCTL_DISABLE) {
- mutex_unlock(&dev_priv->sb_lock);
- return 0;
- }
-
- temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
- divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
- SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
- phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
- SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
-
- temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
- auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
- SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
-
- mutex_unlock(&dev_priv->sb_lock);
-
- desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
-
- return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
- desired_divisor << auxdiv);
-}
-
-static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
- enum pipe pch_transcoder)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
- intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
- intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
- intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
- intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
-
- intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
- intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
- intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
- intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
- intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
-}
-
/*
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
const struct drm_connector_state *connector_state;
const struct drm_connector *connector;
struct intel_encoder *encoder = NULL;
+ struct intel_crtc *master_crtc;
int num_encoders = 0;
int i;
+ master_crtc = intel_master_crtc(crtc_state);
+
for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
- if (connector_state->crtc != &crtc->base)
+ if (connector_state->crtc != &master_crtc->base)
continue;
encoder = to_intel_encoder(connector_state->best_encoder);
drm_WARN(encoder->base.dev, num_encoders != 1,
"%d encoders for pipe %c\n",
- num_encoders, pipe_name(crtc->pipe));
+ num_encoders, pipe_name(master_crtc->pipe));
return encoder;
}
-/*
- * Enable PCH resources required for PCH ports:
- * - PCH PLLs
- * - FDI training & RX/TX
- * - update transcoder timings
- * - DP transcoding bits
- * - transcoder
- */
-static void ilk_pch_enable(const struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- u32 temp;
-
- assert_pch_transcoder_disabled(dev_priv, pipe);
-
- /* For PCH output, training FDI link */
- intel_fdi_link_train(crtc, crtc_state);
-
- /* We need to program the right clock selection before writing the pixel
- * mutliplier into the DPLL. */
- if (HAS_PCH_CPT(dev_priv)) {
- u32 sel;
-
- temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- temp |= TRANS_DPLL_ENABLE(pipe);
- sel = TRANS_DPLLB_SEL(pipe);
- if (crtc_state->shared_dpll ==
- intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
- temp |= sel;
- else
- temp &= ~sel;
- intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
- }
-
- /* XXX: pch pll's can be enabled any time before we enable the PCH
- * transcoder, and we actually should do this to not upset any PCH
- * transcoder that already use the clock when we share it.
- *
- * Note that enable_shared_dpll tries to do the right thing, but
- * get_shared_dpll unconditionally resets the pll - we need that to have
- * the right LVDS enable sequence. */
- intel_enable_shared_dpll(crtc_state);
-
- /* set transcoder timing, panel must allow it */
- assert_pps_unlocked(dev_priv, pipe);
- ilk_pch_transcoder_set_timings(crtc_state, pipe);
-
- intel_fdi_normal_train(crtc);
-
- /* For PCH DP, enable TRANS_DP_CTL */
- if (HAS_PCH_CPT(dev_priv) &&
- intel_crtc_has_dp_encoder(crtc_state)) {
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
- i915_reg_t reg = TRANS_DP_CTL(pipe);
- enum port port;
-
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK |
- TRANS_DP_BPC_MASK);
- temp |= TRANS_DP_OUTPUT_ENABLE;
- temp |= bpc << 9; /* same format but at 11:9 */
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
-
- port = intel_get_crtc_new_encoder(state, crtc_state)->port;
- drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
- temp |= TRANS_DP_PORT_SEL(port);
-
- intel_de_write(dev_priv, reg, temp);
- }
-
- ilk_enable_pch_transcoder(crtc_state);
-}
-
-void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
-
- assert_pch_transcoder_disabled(dev_priv, PIPE_A);
-
- lpt_program_iclkip(crtc_state);
-
- /* Set transcoder timing. */
- ilk_pch_transcoder_set_timings(crtc_state, PIPE_A);
-
- lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
-}
-
static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
}
/* We need to wait for a vblank before we can disable the plane. */
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
}
static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
}
if (need_vbl_wait)
- intel_wait_for_vblank(i915, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
}
static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
+ intel_psr_pre_plane_update(state, crtc);
+
if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
hsw_disable_ips(old_crtc_state);
if (intel_fbc_pre_update(state, crtc))
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
if (!needs_async_flip_vtd_wa(old_crtc_state) &&
needs_async_flip_vtd_wa(new_crtc_state))
*/
if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active &&
new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
/*
* IVB workaround: must disable low power watermarks for at least
*/
if (old_crtc_state->hw.active &&
new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv))
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
/*
* If we're doing a modeset we don't need to do any
!(update_mask & BIT(plane->id)))
continue;
- intel_disable_plane(plane, new_crtc_state);
+ intel_plane_disable_arm(plane, new_crtc_state);
if (old_plane_state->uapi.visible)
fb_bits |= plane->frontbuffer_bit;
static void intel_encoders_update_prepare(struct intel_atomic_state *state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc_state *new_crtc_state, *old_crtc_state;
+ struct intel_crtc *crtc;
struct drm_connector_state *new_conn_state;
struct drm_connector *connector;
int i;
+ /*
+ * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits.
+ * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook.
+ */
+ if (i915->dpll.mgr) {
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ new_crtc_state->shared_dpll = old_crtc_state->shared_dpll;
+ new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state;
+ }
+ }
+
+ if (!state->modeset)
+ return;
+
for_each_new_connector_in_state(&state->base, connector, new_conn_state,
i) {
struct intel_connector *intel_connector;
struct drm_connector *connector;
int i;
+ if (!state->modeset)
+ return;
+
for_each_new_connector_in_state(&state->base, connector, new_conn_state,
i) {
struct intel_connector *intel_connector;
}
}
-static void intel_encoders_pre_disable(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- const struct drm_connector_state *old_conn_state;
- struct drm_connector *conn;
- int i;
-
- for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
- struct intel_encoder *encoder =
- to_intel_encoder(old_conn_state->best_encoder);
-
- if (old_conn_state->crtc != &crtc->base)
- continue;
-
- if (encoder->pre_disable)
- encoder->pre_disable(state, encoder, old_crtc_state,
- old_conn_state);
- }
-}
-
static void intel_encoders_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
- plane->disable_plane(plane, crtc_state);
+ plane->disable_arm(plane, crtc_state);
}
static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_enable_transcoder(new_crtc_state);
if (new_crtc_state->has_pch_encoder)
- ilk_pch_enable(state, new_crtc_state);
+ ilk_pch_enable(state, crtc);
intel_crtc_vblank_on(new_crtc_state);
* in case there are more corner cases we don't know about.
*/
if (new_crtc_state->has_pch_encoder) {
- intel_wait_for_vblank(dev_priv, pipe);
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
+ intel_crtc_wait_for_next_vblank(crtc);
}
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(master->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *master_crtc_state;
+ struct intel_crtc *master_crtc;
struct drm_connector_state *conn_state;
struct drm_connector *conn;
struct intel_encoder *encoder = NULL;
int i;
- if (crtc_state->bigjoiner_slave)
- master = crtc_state->bigjoiner_linked_crtc;
-
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
+ master_crtc = intel_master_crtc(crtc_state);
+ master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
- if (conn_state->crtc != &master->base)
+ if (conn_state->crtc != &master_crtc->base)
continue;
encoder = to_intel_encoder(conn_state->best_encoder);
break;
}
- if (!crtc_state->bigjoiner_slave) {
- /* need to enable VDSC, which we skipped in pre-enable */
- intel_dsc_enable(encoder, crtc_state);
- } else {
- /*
- * Enable sequence steps 1-7 on bigjoiner master
- */
- intel_encoders_pre_pll_enable(state, master);
- if (master_crtc_state->shared_dpll)
- intel_enable_shared_dpll(master_crtc_state);
- intel_encoders_pre_enable(state, master);
+ /*
+ * Enable sequence steps 1-7 on bigjoiner master
+ */
+ if (crtc_state->bigjoiner_slave)
+ intel_encoders_pre_pll_enable(state, master_crtc);
- /* and DSC on slave */
- intel_dsc_enable(NULL, crtc_state);
- }
+ if (crtc_state->shared_dpll)
+ intel_enable_shared_dpll(crtc_state);
+
+ if (crtc_state->bigjoiner_slave)
+ intel_encoders_pre_enable(state, master_crtc);
+
+ /* need to enable VDSC, which we skipped in pre-enable */
+ intel_dsc_enable(crtc_state);
if (DISPLAY_VER(dev_priv) >= 13)
intel_uncompressed_joiner_enable(crtc_state);
intel_encoders_enable(state, crtc);
if (psl_clkgate_wa) {
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
}
* to change the workaround. */
hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe;
if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
- intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
- intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
+ struct intel_crtc *wa_crtc;
+
+ wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe);
+
+ intel_crtc_wait_for_next_vblank(wa_crtc);
+ intel_crtc_wait_for_next_vblank(wa_crtc);
}
}
ilk_pfit_disable(old_crtc_state);
if (old_crtc_state->has_pch_encoder)
- ilk_fdi_disable(crtc);
+ ilk_pch_disable(state, crtc);
intel_encoders_post_disable(state, crtc);
- if (old_crtc_state->has_pch_encoder) {
- ilk_disable_pch_transcoder(dev_priv, pipe);
-
- if (HAS_PCH_CPT(dev_priv)) {
- i915_reg_t reg;
- u32 temp;
-
- /* disable TRANS_DP_CTL */
- reg = TRANS_DP_CTL(pipe);
- temp = intel_de_read(dev_priv, reg);
- temp &= ~(TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_PORT_SEL_MASK);
- temp |= TRANS_DP_PORT_SEL_NONE;
- intel_de_write(dev_priv, reg, temp);
-
- /* disable DPLL_SEL */
- temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
- intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
- }
-
- ilk_fdi_pll_disable(crtc);
- }
+ if (old_crtc_state->has_pch_encoder)
+ ilk_pch_post_disable(state, crtc);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
static void hsw_crtc_disable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+
/*
* FIXME collapse everything to one hook.
* Need care with mst->ddi interactions.
*/
- intel_encoders_disable(state, crtc);
- intel_encoders_post_disable(state, crtc);
+ if (!old_crtc_state->bigjoiner_slave) {
+ intel_encoders_disable(state, crtc);
+ intel_encoders_post_disable(state, crtc);
+ }
}
static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
/* prevents spurious underruns */
if (DISPLAY_VER(dev_priv) == 2)
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
}
static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
* wait for planes to fully turn off before disabling the pipe.
*/
if (DISPLAY_VER(dev_priv) == 2)
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
intel_encoders_disable(state, crtc);
chv_crtc_clock_get(crtc, pipe_config);
else if (IS_VALLEYVIEW(dev_priv))
vlv_crtc_clock_get(crtc, pipe_config);
- else
- i9xx_crtc_clock_get(crtc, pipe_config);
-
- /*
- * Normally the dotclock is filled in by the encoder .get_config()
- * but in case the pipe is enabled w/o any ports we need a sane
- * default.
- */
- pipe_config->hw.adjusted_mode.crtc_clock =
- pipe_config->port_clock / pipe_config->pixel_multiplier;
-
- ret = true;
-
-out:
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return ret;
-}
-
-static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- int i;
- u32 val, final;
- bool has_lvds = false;
- bool has_cpu_edp = false;
- bool has_panel = false;
- bool has_ck505 = false;
- bool can_ssc = false;
- bool using_ssc_source = false;
-
- /* We need to take the global config into account */
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_LVDS:
- has_panel = true;
- has_lvds = true;
- break;
- case INTEL_OUTPUT_EDP:
- has_panel = true;
- if (encoder->port == PORT_A)
- has_cpu_edp = true;
- break;
- default:
- break;
- }
- }
-
- if (HAS_PCH_IBX(dev_priv)) {
- has_ck505 = dev_priv->vbt.display_clock_mode;
- can_ssc = has_ck505;
- } else {
- has_ck505 = false;
- can_ssc = true;
- }
-
- /* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
- u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
-
- if (!(temp & DPLL_VCO_ENABLE))
- continue;
-
- if ((temp & PLL_REF_INPUT_MASK) ==
- PLLB_REF_INPUT_SPREADSPECTRUMIN) {
- using_ssc_source = true;
- break;
- }
- }
-
- drm_dbg_kms(&dev_priv->drm,
- "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
- has_panel, has_lvds, has_ck505, using_ssc_source);
-
- /* Ironlake: try to setup display ref clock before DPLL
- * enabling. This is only under driver's control after
- * PCH B stepping, previous chipset stepping should be
- * ignoring this setting.
- */
- val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
-
- /* As we must carefully and slowly disable/enable each source in turn,
- * compute the final state we want first and check if we need to
- * make any changes at all.
- */
- final = val;
- final &= ~DREF_NONSPREAD_SOURCE_MASK;
- if (has_ck505)
- final |= DREF_NONSPREAD_CK505_ENABLE;
- else
- final |= DREF_NONSPREAD_SOURCE_ENABLE;
-
- final &= ~DREF_SSC_SOURCE_MASK;
- final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
- final &= ~DREF_SSC1_ENABLE;
-
- if (has_panel) {
- final |= DREF_SSC_SOURCE_ENABLE;
-
- if (intel_panel_use_ssc(dev_priv) && can_ssc)
- final |= DREF_SSC1_ENABLE;
-
- if (has_cpu_edp) {
- if (intel_panel_use_ssc(dev_priv) && can_ssc)
- final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- else
- final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- } else
- final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- } else if (using_ssc_source) {
- final |= DREF_SSC_SOURCE_ENABLE;
- final |= DREF_SSC1_ENABLE;
- }
-
- if (final == val)
- return;
-
- /* Always enable nonspread source */
- val &= ~DREF_NONSPREAD_SOURCE_MASK;
-
- if (has_ck505)
- val |= DREF_NONSPREAD_CK505_ENABLE;
- else
- val |= DREF_NONSPREAD_SOURCE_ENABLE;
-
- if (has_panel) {
- val &= ~DREF_SSC_SOURCE_MASK;
- val |= DREF_SSC_SOURCE_ENABLE;
-
- /* SSC must be turned on before enabling the CPU output */
- if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
- val |= DREF_SSC1_ENABLE;
- } else
- val &= ~DREF_SSC1_ENABLE;
-
- /* Get SSC going before enabling the outputs */
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
- udelay(200);
-
- val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-
- /* Enable CPU source on CPU attached eDP */
- if (has_cpu_edp) {
- if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- drm_dbg_kms(&dev_priv->drm,
- "Using SSC on eDP\n");
- val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
- } else
- val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
- } else
- val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
- udelay(200);
- } else {
- drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
-
- val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
-
- /* Turn off CPU output */
- val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
- udelay(200);
-
- if (!using_ssc_source) {
- drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
-
- /* Turn off the SSC source */
- val &= ~DREF_SSC_SOURCE_MASK;
- val |= DREF_SSC_SOURCE_DISABLE;
-
- /* Turn off SSC1 */
- val &= ~DREF_SSC1_ENABLE;
-
- intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
- intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
- udelay(200);
- }
- }
-
- BUG_ON(val != final);
-}
-
-/* Implements 3 different sequences from BSpec chapter "Display iCLK
- * Programming" based on the parameters passed:
- * - Sequence to enable CLKOUT_DP
- * - Sequence to enable CLKOUT_DP without spread
- * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
- */
-static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
- bool with_spread, bool with_fdi)
-{
- u32 reg, tmp;
-
- if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
- "FDI requires downspread\n"))
- with_spread = true;
- if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
- with_fdi, "LP PCH doesn't have FDI\n"))
- with_fdi = false;
-
- mutex_lock(&dev_priv->sb_lock);
-
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_DISABLE;
- tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
- udelay(24);
-
- if (with_spread) {
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
- if (with_fdi)
- lpt_fdi_program_mphy(dev_priv);
- }
-
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
- tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
-
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-/* Sequence to disable CLKOUT_DP */
-void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
-{
- u32 reg, tmp;
-
- mutex_lock(&dev_priv->sb_lock);
-
- reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
- tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
- tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
- intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
-
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- if (!(tmp & SBI_SSCCTL_DISABLE)) {
- if (!(tmp & SBI_SSCCTL_PATHALT)) {
- tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- udelay(32);
- }
- tmp |= SBI_SSCCTL_DISABLE;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- }
-
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-#define BEND_IDX(steps) ((50 + (steps)) / 5)
-
-static const u16 sscdivintphase[] = {
- [BEND_IDX( 50)] = 0x3B23,
- [BEND_IDX( 45)] = 0x3B23,
- [BEND_IDX( 40)] = 0x3C23,
- [BEND_IDX( 35)] = 0x3C23,
- [BEND_IDX( 30)] = 0x3D23,
- [BEND_IDX( 25)] = 0x3D23,
- [BEND_IDX( 20)] = 0x3E23,
- [BEND_IDX( 15)] = 0x3E23,
- [BEND_IDX( 10)] = 0x3F23,
- [BEND_IDX( 5)] = 0x3F23,
- [BEND_IDX( 0)] = 0x0025,
- [BEND_IDX( -5)] = 0x0025,
- [BEND_IDX(-10)] = 0x0125,
- [BEND_IDX(-15)] = 0x0125,
- [BEND_IDX(-20)] = 0x0225,
- [BEND_IDX(-25)] = 0x0225,
- [BEND_IDX(-30)] = 0x0325,
- [BEND_IDX(-35)] = 0x0325,
- [BEND_IDX(-40)] = 0x0425,
- [BEND_IDX(-45)] = 0x0425,
- [BEND_IDX(-50)] = 0x0525,
-};
-
-/*
- * Bend CLKOUT_DP
- * steps -50 to 50 inclusive, in steps of 5
- * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
- * change in clock period = -(steps / 10) * 5.787 ps
- */
-static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
-{
- u32 tmp;
- int idx = BEND_IDX(steps);
-
- if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
- return;
-
- if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
- return;
-
- mutex_lock(&dev_priv->sb_lock);
-
- if (steps % 10 != 0)
- tmp = 0xAAAAAAAB;
- else
- tmp = 0x00000000;
- intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
-
- tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
- tmp &= 0xffff0000;
- tmp |= sscdivintphase[idx];
- intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
-
- mutex_unlock(&dev_priv->sb_lock);
-}
-
-#undef BEND_IDX
-
-static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
-{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
-
- if ((ctl & SPLL_PLL_ENABLE) == 0)
- return false;
-
- if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
- (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
- return true;
-
- if (IS_BROADWELL(dev_priv) &&
- (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
- return true;
-
- return false;
-}
-
-static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
- enum intel_dpll_id id)
-{
- u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
- u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
-
- if ((ctl & WRPLL_PLL_ENABLE) == 0)
- return false;
-
- if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
- return true;
-
- if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
- (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
- (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
- return true;
-
- return false;
-}
-
-static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
-{
- struct intel_encoder *encoder;
- bool has_fdi = false;
-
- for_each_intel_encoder(&dev_priv->drm, encoder) {
- switch (encoder->type) {
- case INTEL_OUTPUT_ANALOG:
- has_fdi = true;
- break;
- default:
- break;
- }
- }
-
- /*
- * The BIOS may have decided to use the PCH SSC
- * reference so we must not disable it until the
- * relevant PLLs have stopped relying on it. We'll
- * just leave the PCH SSC reference enabled in case
- * any active PLL is using it. It will get disabled
- * after runtime suspend if we don't have FDI.
- *
- * TODO: Move the whole reference clock handling
- * to the modeset sequence proper so that we can
- * actually enable/disable/reconfigure these things
- * safely. To do that we need to introduce a real
- * clock hierarchy. That would also allow us to do
- * clock bending finally.
- */
- dev_priv->pch_ssc_use = 0;
-
- if (spll_uses_pch_ssc(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
- }
-
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
- }
+ else
+ i9xx_crtc_clock_get(crtc, pipe_config);
- if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
- dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
- }
+ /*
+ * Normally the dotclock is filled in by the encoder .get_config()
+ * but in case the pipe is enabled w/o any ports we need a sane
+ * default.
+ */
+ pipe_config->hw.adjusted_mode.crtc_clock =
+ pipe_config->port_clock / pipe_config->pixel_multiplier;
- if (dev_priv->pch_ssc_use)
- return;
+ ret = true;
- if (has_fdi) {
- lpt_bend_clkout_dp(dev_priv, 0);
- lpt_enable_clkout_dp(dev_priv, true, true);
- } else {
- lpt_disable_clkout_dp(dev_priv);
- }
-}
+out:
+ intel_display_power_put(dev_priv, power_domain, wakeref);
-/*
- * Initialize reference clocks when the driver loads
- */
-void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
- ilk_init_pch_refclk(dev_priv);
- else if (HAS_PCH_LPT(dev_priv))
- lpt_init_pch_refclk(dev_priv);
+ return ret;
}
static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
&pipe_config->dp_m2_n2);
}
-static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void ilk_get_fdi_m_n_config(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
&pipe_config->fdi_m_n, NULL);
i9xx_get_pipe_color_config(pipe_config);
intel_color_get_config(pipe_config);
- if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
- struct intel_shared_dpll *pll;
- enum intel_dpll_id pll_id;
- bool pll_active;
-
- pipe_config->has_pch_encoder = true;
-
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
- pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
- FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
- ilk_get_fdi_m_n_config(crtc, pipe_config);
-
- if (HAS_PCH_IBX(dev_priv)) {
- /*
- * The pipe->pch transcoder and pch transcoder->pll
- * mapping is fixed.
- */
- pll_id = (enum intel_dpll_id) crtc->pipe;
- } else {
- tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
- if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
- pll_id = DPLL_ID_PCH_PLL_B;
- else
- pll_id= DPLL_ID_PCH_PLL_A;
- }
-
- pipe_config->shared_dpll =
- intel_get_shared_dpll_by_id(dev_priv, pll_id);
- pll = pipe_config->shared_dpll;
-
- pll_active = intel_dpll_get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state);
- drm_WARN_ON(dev, !pll_active);
-
- tmp = pipe_config->dpll_hw_state.dpll;
- pipe_config->pixel_multiplier =
- ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
- >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
+ pipe_config->pixel_multiplier = 1;
- ilk_pch_clock_get(crtc, pipe_config);
- } else {
- pipe_config->pixel_multiplier = 1;
- }
+ ilk_pch_get_config(pipe_config);
intel_get_transcoder_timings(crtc, pipe_config);
intel_get_pipe_src_size(crtc, pipe_config);
return ret;
}
+static u8 bigjoiner_pipes(struct drm_i915_private *i915)
+{
+ if (DISPLAY_VER(i915) >= 12)
+ return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D);
+ else if (DISPLAY_VER(i915) >= 11)
+ return BIT(PIPE_B) | BIT(PIPE_C);
+ else
+ return 0;
+}
+
static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
return tmp & TRANS_DDI_FUNC_ENABLE;
}
+static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv)
+{
+ u8 master_pipes = 0, slave_pipes = 0;
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ enum intel_display_power_domain power_domain;
+ enum pipe pipe = crtc->pipe;
+ intel_wakeref_t wakeref;
+
+ if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0)
+ continue;
+
+ power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
+ u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+
+ if (!(tmp & BIG_JOINER_ENABLE))
+ continue;
+
+ if (tmp & MASTER_BIG_JOINER_ENABLE)
+ master_pipes |= BIT(pipe);
+ else
+ slave_pipes |= BIT(pipe);
+ }
+
+ if (DISPLAY_VER(dev_priv) < 13)
+ continue;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) {
+ u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+
+ if (tmp & UNCOMPRESSED_JOINER_MASTER)
+ master_pipes |= BIT(pipe);
+ if (tmp & UNCOMPRESSED_JOINER_SLAVE)
+ slave_pipes |= BIT(pipe);
+ }
+ }
+
+ /* Bigjoiner pipes should always be consecutive master and slave */
+ drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1,
+ "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n",
+ master_pipes, slave_pipes);
+
+ return slave_pipes;
+}
+
static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
{
u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
enabled_transcoders |= BIT(cpu_transcoder);
}
+ /* single pipe or bigjoiner master */
cpu_transcoder = (enum transcoder) crtc->pipe;
if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
enabled_transcoders |= BIT(cpu_transcoder);
+ /* bigjoiner slave -> consider the master pipe's transcoder as well */
+ if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) {
+ cpu_transcoder = (enum transcoder) crtc->pipe - 1;
+ if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ enabled_transcoders |= BIT(cpu_transcoder);
+ }
+
return enabled_transcoders;
}
return transcoder_is_dsi(pipe_config->cpu_transcoder);
}
-static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
- enum port port;
- u32 tmp;
-
- if (transcoder_is_dsi(cpu_transcoder)) {
- port = (cpu_transcoder == TRANSCODER_DSI_A) ?
- PORT_A : PORT_B;
- } else {
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder));
- if (!(tmp & TRANS_DDI_FUNC_ENABLE))
- return;
- if (DISPLAY_VER(dev_priv) >= 12)
- port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
- else
- port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
- }
-
- /*
- * Haswell has only FDI/PCH transcoder A. It is which is connected to
- * DDI E. So just check whether this pipe is wired to DDI E and whether
- * the PCH transcoder is on.
- */
- if (DISPLAY_VER(dev_priv) < 9 &&
- (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
- pipe_config->has_pch_encoder = true;
-
- tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
- pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
- FDI_DP_PORT_WIDTH_SHIFT) + 1;
-
- ilk_get_fdi_m_n_config(crtc, pipe_config);
- }
-}
-
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable)
intel_uncompressed_joiner_get_config(pipe_config);
- if (!active) {
- /* bigjoiner slave doesn't enable transcoder */
- if (!pipe_config->bigjoiner_slave)
- goto out;
-
- active = true;
- pipe_config->pixel_multiplier = 1;
+ if (!active)
+ goto out;
- /* we cannot read out most state, so don't bother.. */
- pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE;
- } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
- DISPLAY_VER(dev_priv) >= 11) {
- hsw_get_ddi_port_state(crtc, pipe_config);
+ if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
+ DISPLAY_VER(dev_priv) >= 11)
intel_get_transcoder_timings(crtc, pipe_config);
- }
if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder))
intel_vrr_get_config(crtc, pipe_config);
}
}
- if (pipe_config->bigjoiner_slave) {
- /* Cannot be read out as a slave, set to 0. */
- pipe_config->pixel_multiplier = 0;
- } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
+ if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
intel_de_read(dev_priv,
drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */
- intel_wait_for_vblank(dev_priv, crtc->pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
+
return true;
fail:
}
/* Returns the clock of the currently programmed mode of the given pipe. */
-static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
}
-static void ilk_pch_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- /* read out port_clock from the DPLL */
- i9xx_crtc_clock_get(crtc, pipe_config);
-
- /*
- * In case there is an active pipe without active ports,
- * we may need some idea for the dotclock anyway.
- * Calculate one based on the FDI configuration.
- */
- pipe_config->hw.adjusted_mode.crtc_clock =
- intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
- &pipe_config->fdi_m_n);
-}
-
/* Returns the currently programmed mode of the given encoder. */
struct drm_display_mode *
intel_encoder_current_mode(struct intel_encoder *encoder)
if (!encoder->get_hw_state(encoder, &pipe))
return NULL;
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
linked_state->color_ctl = plane_state->color_ctl;
linked_state->view = plane_state->view;
+ linked_state->decrypt = plane_state->decrypt;
intel_plane_copy_hw_state(linked_state, plane_state);
linked_state->uapi.src = plane_state->uapi.src;
if (icl_is_hdr_plane(dev_priv, plane->id)) {
if (linked->id == PLANE_SPRITE5)
- plane_state->cus_ctl |= PLANE_CUS_PLANE_7;
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_7_ICL;
else if (linked->id == PLANE_SPRITE4)
- plane_state->cus_ctl |= PLANE_CUS_PLANE_6;
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_6_ICL;
else if (linked->id == PLANE_SPRITE3)
- plane_state->cus_ctl |= PLANE_CUS_PLANE_5_RKL;
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_5_RKL;
else if (linked->id == PLANE_SPRITE2)
- plane_state->cus_ctl |= PLANE_CUS_PLANE_4_RKL;
+ plane_state->cus_ctl |= PLANE_CUS_Y_PLANE_4_RKL;
else
MISSING_CASE(linked->id);
}
crtc_state->update_wm_post = true;
if (mode_changed && crtc_state->hw.enable &&
- dev_priv->dpll_funcs &&
- !crtc_state->bigjoiner_slave &&
!drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
if (ret)
intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state)
{
- const struct intel_crtc_state *from_crtc_state = crtc_state;
-
- if (crtc_state->bigjoiner_slave) {
- from_crtc_state = intel_atomic_get_new_crtc_state(state,
- crtc_state->bigjoiner_linked_crtc);
+ const struct intel_crtc_state *master_crtc_state;
+ struct intel_crtc *master_crtc;
- /* No need to copy state if the master state is unchanged */
- if (!from_crtc_state)
- return;
- }
+ master_crtc = intel_master_crtc(crtc_state);
+ master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
- intel_crtc_copy_color_blobs(crtc_state, from_crtc_state);
+ /* No need to copy state if the master state is unchanged */
+ if (master_crtc_state)
+ intel_crtc_copy_color_blobs(crtc_state, master_crtc_state);
}
static void
const struct intel_crtc_state *from_crtc_state)
{
struct intel_crtc_state *saved_state;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL);
if (!saved_state)
crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0;
crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc);
crtc_state->bigjoiner_slave = true;
- crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe;
- crtc_state->has_audio = false;
+ crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder;
+ crtc_state->has_audio = from_crtc_state->has_audio;
return 0;
}
PIPE_CONF_CHECK_X(output_types);
- /* FIXME do the readout properly and get rid of this quirk */
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
-
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
-
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
-
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
- PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
-
- PIPE_CONF_CHECK_I(pixel_multiplier);
-
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end);
+
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end);
+
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end);
+
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start);
+ PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end);
+
+ PIPE_CONF_CHECK_I(pixel_multiplier);
+
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_INTERLACE);
+
+ if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_INTERLACE);
-
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
- PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_PHSYNC);
- PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_NHSYNC);
- PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_PVSYNC);
- PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
- DRM_MODE_FLAG_NVSYNC);
- }
+ DRM_MODE_FLAG_PHSYNC);
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_NHSYNC);
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_PVSYNC);
+ PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags,
+ DRM_MODE_FLAG_NVSYNC);
}
PIPE_CONF_CHECK_I(output_format);
PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_BOOL(has_infoframe);
- /* FIXME do the readout properly and get rid of this quirk */
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
- PIPE_CONF_CHECK_BOOL(fec_enable);
+ PIPE_CONF_CHECK_BOOL(fec_enable);
PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
}
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
- /* FIXME do the readout properly and get rid of this quirk */
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE))
- PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
PIPE_CONF_CHECK_X(gamma_mode);
if (IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_BOOL(double_wide);
- if (dev_priv->dpll.mgr)
+ if (dev_priv->dpll.mgr) {
PIPE_CONF_CHECK_P(shared_dpll);
- /* FIXME do the readout properly and get rid of this quirk */
- if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
}
- if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
- PIPE_CONF_CHECK_X(dsi_pll.ctrl);
- PIPE_CONF_CHECK_X(dsi_pll.div);
+ PIPE_CONF_CHECK_X(dsi_pll.ctrl);
+ PIPE_CONF_CHECK_X(dsi_pll.div);
- if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
- PIPE_CONF_CHECK_I(pipe_bpp);
+ if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5)
+ PIPE_CONF_CHECK_I(pipe_bpp);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
- PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
- PIPE_CONF_CHECK_I(min_voltage_level);
- }
+ PIPE_CONF_CHECK_I(min_voltage_level);
if (current_config->has_psr || pipe_config->has_psr)
PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
struct intel_encoder *encoder;
struct intel_crtc_state *pipe_config = old_crtc_state;
struct drm_atomic_state *state = old_crtc_state->uapi.state;
- struct intel_crtc *master = crtc;
+ struct intel_crtc *master_crtc;
__drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi);
intel_crtc_free_hw_state(old_crtc_state);
"(expected %i, found %i)\n",
new_crtc_state->hw.active, crtc->active);
- if (new_crtc_state->bigjoiner_slave)
- master = new_crtc_state->bigjoiner_linked_crtc;
+ master_crtc = intel_master_crtc(new_crtc_state);
- for_each_encoder_on_crtc(dev, &master->base, encoder) {
+ for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) {
enum pipe pipe;
bool active;
encoder->base.base.id, active,
new_crtc_state->hw.active);
- I915_STATE_WARN(active && master->pipe != pipe,
+ I915_STATE_WARN(active && master_crtc->pipe != pipe,
"Encoder connected to wrong pipe %c\n",
pipe_name(pipe));
if (!new_crtc_state->hw.active)
return;
- if (new_crtc_state->bigjoiner_slave)
- /* No PLLs set for slave */
- pipe_config->shared_dpll = NULL;
-
intel_pipe_config_sanity_check(dev_priv, pipe_config);
if (!intel_pipe_config_compare(new_crtc_state,
if (!new_crtc_state->hw.active)
return;
- if (new_crtc_state->bigjoiner_slave)
- return;
-
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
return 0;
}
-static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- return intel_pxp_key_check(&to_gt(i915)->pxp, obj, false) == 0;
-}
-
-static bool pxp_is_borked(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
-}
-
static int intel_atomic_check_planes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_plane_state *plane_state;
struct intel_plane *plane;
- struct intel_plane_state *new_plane_state;
- struct intel_plane_state *old_plane_state;
struct intel_crtc *crtc;
- const struct drm_framebuffer *fb;
int i, ret;
ret = icl_add_linked_planes(state);
return ret;
}
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- new_plane_state = intel_atomic_get_new_plane_state(state, plane);
- old_plane_state = intel_atomic_get_old_plane_state(state, plane);
- fb = new_plane_state->hw.fb;
- if (fb) {
- new_plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
- new_plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
- } else {
- new_plane_state->decrypt = old_plane_state->decrypt;
- new_plane_state->force_black = old_plane_state->force_black;
- }
- }
-
- return 0;
-}
-
-static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
- bool *need_cdclk_calc)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_cdclk_state *old_cdclk_state;
- const struct intel_cdclk_state *new_cdclk_state;
- struct intel_plane_state *plane_state;
- struct intel_bw_state *new_bw_state;
- struct intel_plane *plane;
- int min_cdclk = 0;
- enum pipe pipe;
- int ret;
- int i;
- /*
- * active_planes bitmask has been updated, and potentially
- * affected planes are part of the state. We can now
- * compute the minimum cdclk for each plane.
- */
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
- if (ret)
- return ret;
- }
-
- old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
- new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
-
- if (new_cdclk_state &&
- old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
- *need_cdclk_calc = true;
-
- ret = intel_cdclk_bw_calc_min_cdclk(state);
- if (ret)
- return ret;
-
- new_bw_state = intel_atomic_get_new_bw_state(state);
-
- if (!new_cdclk_state || !new_bw_state)
- return 0;
-
- for_each_pipe(dev_priv, pipe) {
- min_cdclk = max(new_cdclk_state->min_cdclk[pipe], min_cdclk);
-
- /*
- * Currently do this change only if we need to increase
- */
- if (new_bw_state->min_cdclk > min_cdclk)
- *need_cdclk_calc = true;
- }
-
return 0;
}
struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc_state *slave_crtc_state, *master_crtc_state;
- struct intel_crtc *slave, *master;
+ struct intel_crtc *slave_crtc, *master_crtc;
/* slave being enabled, is master is still claiming this crtc? */
if (old_crtc_state->bigjoiner_slave) {
- slave = crtc;
- master = old_crtc_state->bigjoiner_linked_crtc;
- master_crtc_state = intel_atomic_get_new_crtc_state(state, master);
+ slave_crtc = crtc;
+ master_crtc = old_crtc_state->bigjoiner_linked_crtc;
+ master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc);
if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state))
goto claimed;
}
if (!new_crtc_state->bigjoiner)
return 0;
- slave = intel_dsc_get_bigjoiner_secondary(crtc);
- if (!slave) {
+ slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
+ if (!slave_crtc) {
DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires "
"CRTC + 1 to be used, doesn't exist\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
- new_crtc_state->bigjoiner_linked_crtc = slave;
- slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave);
- master = crtc;
+ new_crtc_state->bigjoiner_linked_crtc = slave_crtc;
+ slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc);
+ master_crtc = crtc;
if (IS_ERR(slave_crtc_state))
return PTR_ERR(slave_crtc_state);
goto claimed;
DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n",
- slave->base.base.id, slave->base.name);
+ slave_crtc->base.base.id, slave_crtc->base.name);
return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state);
claimed:
DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but "
"[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n",
- slave->base.base.id, slave->base.name,
- master->base.base.id, master->base.name);
+ slave_crtc->base.base.id, slave_crtc->base.name,
+ master_crtc->base.base.id, master_crtc->base.name);
return -EINVAL;
}
* correspond to the last vblank and have no relation to the actual time when
* the flip done event was sent.
*/
-static int intel_atomic_check_async(struct intel_atomic_state *state)
+static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state, *new_crtc_state;
const struct intel_plane_state *new_plane_state, *old_plane_state;
- struct intel_crtc *crtc;
struct intel_plane *plane;
int i;
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (intel_crtc_needs_modeset(new_crtc_state)) {
- drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
- return -EINVAL;
- }
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
- if (!new_crtc_state->hw.active) {
- drm_dbg_kms(&i915->drm, "CRTC inactive\n");
- return -EINVAL;
- }
- if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
- drm_dbg_kms(&i915->drm,
- "Active planes cannot be changed during async flip\n");
- return -EINVAL;
- }
+ if (intel_crtc_needs_modeset(new_crtc_state)) {
+ drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n");
+ return -EINVAL;
+ }
+
+ if (!new_crtc_state->hw.active) {
+ drm_dbg_kms(&i915->drm, "CRTC inactive\n");
+ return -EINVAL;
+ }
+ if (old_crtc_state->active_planes != new_crtc_state->active_planes) {
+ drm_dbg_kms(&i915->drm,
+ "Active planes cannot be changed during async flip\n");
+ return -EINVAL;
}
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
+ if (plane->pipe != crtc->pipe)
+ continue;
+
/*
* TODO: Async flip is only supported through the page flip IOCTL
* as of now. So support currently added for primary plane only.
return -EINVAL;
}
- if (old_plane_state->view.color_plane[0].stride !=
- new_plane_state->view.color_plane[0].stride) {
+ if (new_plane_state->hw.fb->format->num_planes > 1) {
+ drm_dbg_kms(&i915->drm,
+ "Planar formats not supported with async flips\n");
+ return -EINVAL;
+ }
+
+ if (old_plane_state->view.color_plane[0].mapping_stride !=
+ new_plane_state->view.color_plane[0].mapping_stride) {
drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n");
return -EINVAL;
}
if (ret)
goto fail;
- intel_fbc_choose_crtc(dev_priv, state);
ret = intel_compute_global_watermarks(state);
if (ret)
goto fail;
if (ret)
goto fail;
- ret = intel_atomic_check_cdclk(state, &any_ms);
+ ret = intel_cdclk_atomic_check(state, &any_ms);
if (ret)
goto fail;
if (ret)
goto fail;
+ ret = intel_fbc_atomic_check(state);
+ if (ret)
+ goto fail;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip) {
- ret = intel_atomic_check_async(state);
+ ret = intel_atomic_check_async(state, crtc);
if (ret)
goto fail;
}
intel_fbc_update(state, crtc);
+ intel_update_planes_on_crtc(state, crtc);
+
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
commit_pipe_pre_planes(state, crtc);
if (DISPLAY_VER(dev_priv) >= 9)
- skl_update_planes_on_crtc(state, crtc);
+ skl_arm_planes_on_crtc(state, crtc);
else
- i9xx_update_planes_on_crtc(state, crtc);
+ i9xx_arm_planes_on_crtc(state, crtc);
commit_pipe_post_planes(state, crtc);
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
-
- intel_encoders_pre_disable(state, crtc);
-
- intel_crtc_disable_planes(state, crtc);
-
- /*
- * We still need special handling for disabling bigjoiner master
- * and slaves since for slave we do not have encoder or plls
- * so we dont need to disable those.
- */
- if (old_crtc_state->bigjoiner) {
- intel_crtc_disable_planes(state,
- old_crtc_state->bigjoiner_linked_crtc);
- old_crtc_state->bigjoiner_linked_crtc->active = false;
- }
-
/*
* We need to disable pipe CRC before disabling the pipe,
* or we race against vblank off.
u32 handled = 0;
int i;
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (!old_crtc_state->hw.active)
+ continue;
+
+ intel_pre_plane_update(state, crtc);
+ intel_crtc_disable_planes(state, crtc);
+ }
+
/* Only disable port sync and MST slaves */
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner)
+ if (!intel_crtc_needs_modeset(new_crtc_state))
continue;
if (!old_crtc_state->hw.active)
* Slave vblanks are masked till Master Vblanks.
*/
if (!is_trans_port_sync_slave(old_crtc_state) &&
- !intel_dp_mst_is_slave_trans(old_crtc_state))
+ !intel_dp_mst_is_slave_trans(old_crtc_state) &&
+ !old_crtc_state->bigjoiner_slave)
continue;
- intel_pre_plane_update(state, crtc);
intel_old_crtc_state_disables(state, old_crtc_state,
new_crtc_state, crtc);
handled |= BIT(crtc->pipe);
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!intel_crtc_needs_modeset(new_crtc_state) ||
- (handled & BIT(crtc->pipe)) ||
- old_crtc_state->bigjoiner_slave)
+ (handled & BIT(crtc->pipe)))
continue;
- intel_pre_plane_update(state, crtc);
- if (old_crtc_state->bigjoiner) {
- struct intel_crtc *slave =
- old_crtc_state->bigjoiner_linked_crtc;
-
- intel_pre_plane_update(state, slave);
- }
+ if (!old_crtc_state->hw.active)
+ continue;
- if (old_crtc_state->hw.active)
- intel_old_crtc_state_disables(state, old_crtc_state,
- new_crtc_state, crtc);
+ intel_old_crtc_state_disables(state, old_crtc_state,
+ new_crtc_state, crtc);
}
}
if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb,
&old_crtc_state->wm.skl.ddb) &&
(update_pipes | modeset_pipes))
- intel_wait_for_vblank(dev_priv, pipe);
+ intel_crtc_wait_for_next_vblank(crtc);
}
}
for (;;) {
prepare_to_wait(&intel_state->commit_ready.wait,
&wait_fence, TASK_UNINTERRUPTIBLE);
- prepare_to_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset, TASK_UNINTERRUPTIBLE);
if (i915_sw_fence_done(&intel_state->commit_ready) ||
- test_bit(I915_RESET_MODESET, &dev_priv->gt.reset.flags))
+ test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
break;
schedule();
}
finish_wait(&intel_state->commit_ready.wait, &wait_fence);
- finish_wait(bit_waitqueue(&dev_priv->gt.reset.flags,
+ finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
I915_RESET_MODESET),
&wait_reset);
}
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
struct drm_framebuffer *fb = plane_state->hw.fb;
+ int cc_plane;
int ret;
- if (!fb ||
- fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
+ if (!fb)
+ continue;
+
+ cc_plane = intel_fb_rc_ccs_cc_plane(fb);
+ if (cc_plane < 0)
continue;
/*
* GPU write on it.
*/
ret = i915_gem_object_read_from_page(intel_fb_obj(fb),
- fb->offsets[2] + 16,
+ fb->offsets[cc_plane] + 16,
&plane_state->ccval,
sizeof(plane_state->ccval));
/* The above could only fail if the FB obj has an unexpected backing store type. */
}
}
- if (state->modeset)
- intel_encoders_update_prepare(state);
+ intel_encoders_update_prepare(state);
intel_dbuf_pre_plane_update(state);
- intel_psr_pre_plane_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display->commit_modeset_enables(state);
- if (state->modeset) {
- intel_encoders_update_complete(state);
+ intel_encoders_update_complete(state);
+ if (state->modeset)
intel_set_cdclk_post_plane_update(state);
- }
+
+ intel_wait_for_vblank_workers(state);
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
intel_crtc_disable_flip_done(state, crtc);
-
- if (new_crtc_state->hw.active &&
- !intel_crtc_needs_modeset(new_crtc_state) &&
- !new_crtc_state->preload_luts &&
- (new_crtc_state->uapi.color_mgmt_changed ||
- new_crtc_state->update_pipe))
- intel_color_load_luts(new_crtc_state);
}
/*
struct intel_plane *plane;
for_each_intel_plane(&dev_priv->drm, plane) {
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
- plane->pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv,
+ plane->pipe);
plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
}
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = intel_user_framebuffer_create,
- .get_format_info = intel_get_format_info,
+ .get_format_info = intel_fb_get_format_info,
.output_poll_changed = intel_fbdev_output_poll_changed,
.mode_valid = intel_mode_valid,
.atomic_check = intel_atomic_check,
return;
intel_init_cdclk_hooks(dev_priv);
- intel_init_audio_hooks(dev_priv);
+ intel_audio_hooks_init(dev_priv);
intel_dpll_init_clock_hook(dev_priv);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
/* 640x480@60Hz, ~25175 kHz */
struct dpll clock = {
.m1 = 18,
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
pipe_name(pipe));
"[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
plane->base.base.id, plane->base.name);
- plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ plane_crtc = intel_crtc_for_pipe(dev_priv, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
}
}
visible = plane->get_hw_state(plane, &pipe);
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
intel_set_plane_visible(crtc_state, plane_state, visible);
pipe = 0;
if (encoder->get_hw_state(encoder, &pipe)) {
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ crtc = intel_crtc_for_pipe(dev_priv, pipe);
crtc_state = to_intel_crtc_state(crtc->base.state);
encoder->base.crtc = &crtc->base;
struct intel_plane *plane;
int min_cdclk = 0;
- if (crtc_state->bigjoiner_slave)
- continue;
-
if (crtc_state->hw.active) {
/*
* The initial mode needs to be set in order to keep
intel_bw_crtc_update(bw_state, crtc_state);
intel_pipe_config_sanity_check(dev_priv, crtc_state);
-
- /* discard our incomplete slave state, copy it from master */
- if (crtc_state->bigjoiner && crtc_state->hw.active) {
- struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc;
- struct intel_crtc_state *slave_crtc_state =
- to_intel_crtc_state(slave->base.state);
-
- copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state);
- slave->base.mode = crtc->base.mode;
-
- cdclk_state->min_cdclk[slave->pipe] = min_cdclk;
- cdclk_state->min_voltage_level[slave->pipe] =
- crtc_state->min_voltage_level;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) {
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- /*
- * FIXME don't have the fb yet, so can't
- * use intel_plane_data_rate() :(
- */
- if (plane_state->uapi.visible)
- crtc_state->data_rate[plane->id] =
- 4 * crtc_state->pixel_rate;
- else
- crtc_state->data_rate[plane->id] = 0;
- }
-
- intel_bw_crtc_update(bw_state, slave_crtc_state);
- drm_calc_timestamping_constants(&slave->base,
- &slave_crtc_state->hw.adjusted_mode);
- }
}
}
destroy_workqueue(i915->flip_wq);
destroy_workqueue(i915->modeset_wq);
- intel_fbc_cleanup_cfb(i915);
+ intel_fbc_cleanup(i915);
}
/* part #3: call after gem init */
intel_bios_driver_remove(i915);
}
+bool intel_modeset_probe_defer(struct pci_dev *pdev)
+{
+ struct drm_privacy_screen *privacy_screen;
+
+ /*
+ * apple-gmux is needed on dual GPU MacBook Pro
+ * to probe the panel if we're the inactive GPU.
+ */
+ if (vga_switcheroo_client_probe_defer(pdev))
+ return true;
+
+ /* If the LCD panel has a privacy-screen, wait for it */
+ privacy_screen = drm_privacy_screen_get(&pdev->dev, NULL);
+ if (IS_ERR(privacy_screen) && PTR_ERR(privacy_screen) == -EPROBE_DEFER)
+ return true;
+
+ drm_privacy_screen_put(privacy_screen);
+
+ return false;
+}
+
void intel_display_driver_register(struct drm_i915_private *i915)
{
if (!HAS_DISPLAY(i915))
i915_vma_put(dpt->vma);
}
+/**
+ * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
+ * @i915: device instance
+ *
+ * Restore the memory mapping during system resume for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table. The content of these page
+ * tables are not stored in the hibernation image during S4 and S3RST->S4
+ * transitions, so here we reprogram the PTE entries in those tables.
+ *
+ * This function must be called after the mappings in GGTT have been restored calling
+ * i915_ggtt_resume().
+ */
+void intel_dpt_resume(struct drm_i915_private *i915)
+{
+ struct drm_framebuffer *drm_fb;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ mutex_lock(&i915->drm.mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, &i915->drm) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+
+ if (fb->dpt_vm)
+ i915_ggtt_resume_vm(fb->dpt_vm);
+ }
+ mutex_unlock(&i915->drm.mode_config.fb_lock);
+}
+
+/**
+ * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
+ * @i915: device instance
+ *
+ * Suspend the memory mapping during system suspend for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table.
+ *
+ * This function must be called before the mappings in GGTT are suspended calling
+ * i915_ggtt_suspend().
+ */
+void intel_dpt_suspend(struct drm_i915_private *i915)
+{
+ struct drm_framebuffer *drm_fb;
+
+ if (!HAS_DISPLAY(i915))
+ return;
+
+ mutex_lock(&i915->drm.mode_config.fb_lock);
+
+ drm_for_each_fb(drm_fb, &i915->drm) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+
+ if (fb->dpt_vm)
+ i915_ggtt_suspend_vm(fb->dpt_vm);
+ }
+
+ mutex_unlock(&i915->drm.mode_config.fb_lock);
+}
+
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb)
{
vm = &dpt->vm;
- vm->gt = &i915->gt;
+ vm->gt = to_gt(i915);
vm->i915 = i915;
vm->dma = i915->drm.dev;
vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
vm->vma_ops.bind_vma = dpt_bind_vma;
vm->vma_ops.unbind_vma = dpt_unbind_vma;
- vm->vma_ops.set_pages = ggtt_set_pages;
- vm->vma_ops.clear_pages = clear_pages;
vm->pte_encode = gen8_ggtt_pte_encode;
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fb.h"
+#include "intel_fbc.h"
#include "intel_pm.h"
#include "intel_psr.h"
#include "intel_sprite.h"
DRM_FORMAT_XVYU16161616,
};
-static const u64 skl_plane_format_modifiers_noccs[] = {
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 skl_plane_format_modifiers_ccs[] = {
- I915_FORMAT_MOD_Yf_TILED_CCS,
- I915_FORMAT_MOD_Y_TILED_CCS,
- I915_FORMAT_MOD_Yf_TILED,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 gen12_plane_format_modifiers_mc_ccs[] = {
- I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 gen12_plane_format_modifiers_rc_ccs[] = {
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS,
- I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC,
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
-static const u64 adlp_step_a_plane_format_modifiers[] = {
- I915_FORMAT_MOD_Y_TILED,
- I915_FORMAT_MOD_X_TILED,
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
-};
-
int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
{
switch (format) {
}
}
-static int icl_plane_max_width(const struct drm_framebuffer *fb,
- int color_plane,
- unsigned int rotation)
+static int icl_hdr_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
+{
+ if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier))
+ return 4096;
+ else
+ return 5120;
+}
+
+static int icl_sdr_plane_max_width(const struct drm_framebuffer *fb,
+ int color_plane,
+ unsigned int rotation)
{
return 5120;
}
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
- u32 stride = plane_state->view.color_plane[color_plane].stride;
+ u32 stride = plane_state->view.color_plane[color_plane].scanout_stride;
if (color_plane >= fb->format->num_planes)
return 0;
}
static void
-skl_disable_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
+skl_plane_disable_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
case DRM_FORMAT_XYUV8888:
return PLANE_CTL_FORMAT_XYUV;
case DRM_FORMAT_YUYV:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YUYV;
case DRM_FORMAT_YVYU:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_YVYU;
case DRM_FORMAT_UYVY:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_UYVY;
case DRM_FORMAT_VYUY:
- return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
+ return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_ORDER_VYUY;
case DRM_FORMAT_NV12:
return PLANE_CTL_FORMAT_NV12;
case DRM_FORMAT_P010:
plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
}
+ if (plane_state->force_black)
+ plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
+
return plane_color_ctl;
}
}
}
-static void intel_load_plane_csc_black(struct intel_plane *intel_plane)
+static u32 skl_plane_surf(const struct intel_plane_state *plane_state,
+ int color_plane)
{
- struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
- enum pipe pipe = intel_plane->pipe;
- enum plane_id plane = intel_plane->id;
- u16 postoff = 0;
+ u32 plane_surf;
- drm_dbg_kms(&dev_priv->drm, "plane color CTM to black %s:%d\n",
- intel_plane->base.name, plane);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 0), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 1), 0);
+ plane_surf = intel_plane_ggtt_offset(plane_state) +
+ skl_surf_address(plane_state, color_plane);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 2), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 3), 0);
+ if (plane_state->decrypt)
+ plane_surf |= PLANE_SURF_DECRYPT;
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 4), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 5), 0);
+ return plane_surf;
+}
- intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
+static void icl_plane_csc_load_black(struct intel_plane *plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0);
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0);
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0);
+ intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0);
- intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 0), postoff);
- intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 1), postoff);
- intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 2), postoff);
+ intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0);
+ intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0);
+ intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0);
+
+ intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0);
+ intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0);
+ intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0);
}
static void
-skl_program_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
+skl_program_plane_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- u32 surf_addr = skl_surf_address(plane_state, color_plane);
u32 stride = skl_plane_stride(plane_state, color_plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- int aux_plane = skl_main_to_aux_plane(fb, color_plane);
int crtc_x = plane_state->uapi.dst.x1;
int crtc_y = plane_state->uapi.dst.y1;
- u32 x = plane_state->view.color_plane[color_plane].x;
- u32 y = plane_state->view.color_plane[color_plane].y;
u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- u8 alpha = plane_state->hw.alpha >> 8;
- u32 plane_color_ctl = 0, aux_dist = 0;
unsigned long irqflags;
- u32 keymsk, keymax, plane_surf;
- u32 plane_ctl = plane_state->ctl;
-
- plane_ctl |= skl_plane_ctl_crtc(crtc_state);
-
- if (DISPLAY_VER(dev_priv) >= 10)
- plane_color_ctl = plane_state->color_ctl |
- glk_plane_color_ctl_crtc(crtc_state);
-
- /* Sizes are 0 based */
- src_w--;
- src_h--;
-
- keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
-
- keymsk = key->channel_mask & 0x7ffffff;
- if (alpha < 0xff)
- keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
/* The scaler will handle the output position */
if (plane_state->scaler_id >= 0) {
crtc_y = 0;
}
- if (aux_plane) {
- aux_dist = skl_surf_address(plane_state, aux_plane) - surf_addr;
-
- if (DISPLAY_VER(dev_priv) < 12)
- aux_dist |= skl_plane_stride(plane_state, aux_plane);
- }
-
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ /*
+ * FIXME: pxp session invalidation can hit any time even at time of commit
+ * or after the commit, display content will be garbage.
+ */
+ if (plane_state->force_black)
+ icl_plane_csc_load_black(plane);
+
intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
(crtc_y << 16) | crtc_x);
intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
- (src_h << 16) | src_w);
+ ((src_h - 1) << 16) | (src_w - 1));
- intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+ if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) {
+ intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0),
+ lower_32_bits(plane_state->ccval));
+ intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 1),
+ upper_32_bits(plane_state->ccval));
+ }
if (icl_is_hdr_plane(dev_priv, plane_id))
intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
plane_state->cus_ctl);
- if (DISPLAY_VER(dev_priv) >= 10)
- intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
- plane_color_ctl);
-
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
- if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)
- intel_uncore_write64_fw(&dev_priv->uncore,
- PLANE_CC_VAL(pipe, plane_id), plane_state->ccval);
-
skl_write_plane_wm(plane, crtc_state);
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+static void
+skl_program_plane_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum plane_id plane_id = plane->id;
+ enum pipe pipe = plane->pipe;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ int aux_plane = skl_main_to_aux_plane(fb, color_plane);
+ u32 x = plane_state->view.color_plane[color_plane].x;
+ u32 y = plane_state->view.color_plane[color_plane].y;
+ u32 keymsk, keymax, aux_dist = 0, plane_color_ctl = 0;
+ u8 alpha = plane_state->hw.alpha >> 8;
+ u32 plane_ctl = plane_state->ctl;
+ unsigned long irqflags;
+
+ plane_ctl |= skl_plane_ctl_crtc(crtc_state);
+
+ if (DISPLAY_VER(dev_priv) >= 10)
+ plane_color_ctl = plane_state->color_ctl |
+ glk_plane_color_ctl_crtc(crtc_state);
+
+ keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha);
+
+ keymsk = key->channel_mask & 0x7ffffff;
+ if (alpha < 0xff)
+ keymsk |= PLANE_KEYMSK_ALPHA_ENABLE;
+
+ if (aux_plane) {
+ aux_dist = skl_surf_address(plane_state, aux_plane) -
+ skl_surf_address(plane_state, color_plane);
+
+ if (DISPLAY_VER(dev_priv) < 12)
+ aux_dist |= skl_plane_stride(plane_state, aux_plane);
+ }
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
key->min_value);
intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
(y << 16) | x);
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+
if (DISPLAY_VER(dev_priv) < 11)
intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
(plane_state->view.color_plane[1].y << 16) |
plane_state->view.color_plane[1].x);
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+ if (DISPLAY_VER(dev_priv) >= 10)
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
/*
* Enable the scaler before the plane so that we don't
* get a catastrophic underrun even if the two operations
* end up happening in two different frames.
+ *
+ * TODO: split into noarm+arm pair
*/
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
* the control register just before the surface register.
*/
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- plane_surf = intel_plane_ggtt_offset(plane_state) + surf_addr;
- plane_color_ctl = intel_de_read_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id));
-
- /*
- * FIXME: pxp session invalidation can hit any time even at time of commit
- * or after the commit, display content will be garbage.
- */
- if (plane_state->decrypt) {
- plane_surf |= PLANE_SURF_DECRYPT;
- } else if (plane_state->force_black) {
- intel_load_plane_csc_black(plane);
- plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
- }
-
- intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
- plane_color_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), plane_surf);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ skl_plane_surf(plane_state, color_plane));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
unsigned long irqflags;
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
- u32 surf_addr = plane_state->view.color_plane[0].offset;
u32 plane_ctl = plane_state->ctl;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
+ skl_plane_surf(plane_state, 0));
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
-skl_update_plane(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+skl_plane_update_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
int color_plane = 0;
/* Program the UV plane on planar master */
color_plane = 1;
- skl_program_plane(plane, crtc_state, plane_state, color_plane);
+ skl_program_plane_noarm(plane, crtc_state, plane_state, color_plane);
+}
+
+static void
+skl_plane_update_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ int color_plane = 0;
+
+ if (plane_state->planar_linked_plane && !plane_state->planar_slave)
+ /* Program the UV plane on planar master */
+ color_plane = 1;
+
+ skl_program_plane_arm(plane, crtc_state, plane_state, color_plane);
}
static bool intel_format_is_p01x(u32 format)
return 0;
if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
- is_ccs_modifier(fb->modifier)) {
+ intel_fb_is_ccs_modifier(fb->modifier)) {
drm_dbg_kms(&dev_priv->drm,
"RC support only with 0/180 degree rotation (%x)\n",
rotation);
/* Y-tiling is not supported in IF-ID Interlace mode */
if (crtc_state->hw.enable &&
crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE &&
- (fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) {
+ fb->modifier != DRM_FORMAT_MOD_LINEAR &&
+ fb->modifier != I915_FORMAT_MOD_X_TILED) {
drm_dbg_kms(&dev_priv->drm,
"Y/Yf tiling not supported in IF-ID mode\n");
return -EINVAL;
if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
int cpp = fb->format->cpp[0];
- while ((*x + w) * cpp > plane_state->view.color_plane[0].stride) {
+ while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) {
if (*offset == 0) {
drm_dbg_kms(&dev_priv->drm,
"Unable to find suitable display surface offset due to X-tiling\n");
* CCS AUX surface doesn't have its own x/y offsets, we must make sure
* they match with the main surface x/y offsets.
*/
- if (is_ccs_modifier(fb->modifier)) {
+ if (intel_fb_is_ccs_modifier(fb->modifier)) {
while (!skl_check_main_ccs_coordinates(plane_state, x, y,
offset, aux_plane)) {
if (offset == 0)
offset = intel_plane_compute_aligned_offset(&x, &y,
plane_state, uv_plane);
- if (is_ccs_modifier(fb->modifier)) {
+ if (intel_fb_is_ccs_modifier(fb->modifier)) {
int ccs_plane = main_to_ccs_plane(fb, uv_plane);
u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset;
u32 alignment = intel_surf_alignment(fb, uv_plane);
int hsub, vsub;
int x, y;
- if (!is_ccs_plane(fb, ccs_plane) ||
- is_gen12_ccs_cc_plane(fb, ccs_plane))
+ if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane))
continue;
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb,
* Handle the AUX surface first since the main surface setup depends on
* it.
*/
- if (is_ccs_modifier(fb->modifier)) {
+ if (intel_fb_is_ccs_modifier(fb->modifier)) {
ret = skl_check_ccs_aux_surface(plane_state);
if (ret)
return ret;
}
}
- return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0;
+static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
++ return intel_pxp_key_check(&to_gt(i915)->pxp, obj, false) == 0;
+}
+
+static bool pxp_is_borked(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
+}
+
static int skl_plane_check(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
if (ret)
return ret;
+ if (DISPLAY_VER(dev_priv) >= 11) {
+ plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
+ plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
+ }
+
/* HW only has 8 bits pixel precision, disable plane if invisible */
if (!(plane_state->hw.alpha >> 8))
plane_state->uapi.visible = false;
return pipe == PIPE_A && plane_id == PLANE_PRIMARY;
}
+static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ if (skl_plane_has_fbc(dev_priv, pipe, plane_id))
+ return dev_priv->fbc;
+ else
+ return NULL;
+}
+
static bool skl_plane_has_planar(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
{
}
}
-static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id)
-{
- if (plane_id == PLANE_CURSOR)
- return false;
-
- if (DISPLAY_VER(dev_priv) >= 11)
- return true;
-
- if (IS_GEMINILAKE(dev_priv))
- return pipe != PIPE_C;
-
- return pipe != PIPE_C &&
- (plane_id == PLANE_PRIMARY ||
- plane_id == PLANE_SPRITE0);
-}
-
static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
struct intel_plane *plane = to_intel_plane(_plane);
- switch (modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- break;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (!plane->has_ccs)
- return false;
- break;
- default:
+ if (!intel_fb_plane_supports_modifier(plane, modifier))
return false;
- }
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
+ if (intel_fb_is_ccs_modifier(modifier))
return true;
fallthrough;
case DRM_FORMAT_RGB565:
}
}
-static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
- if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
- IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D0))
- return false;
-
- /* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- return false;
-
- return plane_id < PLANE_SPRITE4;
-}
-
static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
u32 format, u64 modifier)
{
- struct drm_i915_private *dev_priv = to_i915(_plane->dev);
struct intel_plane *plane = to_intel_plane(_plane);
- switch (modifier) {
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id))
- return false;
- fallthrough;
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- break;
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- /* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- return false;
- break;
- default:
+ if (!intel_fb_plane_supports_modifier(plane, modifier))
return false;
- }
switch (format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
- if (is_ccs_modifier(modifier))
+ if (intel_fb_is_ccs_modifier(modifier))
return true;
fallthrough;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_P010:
case DRM_FORMAT_P012:
case DRM_FORMAT_P016:
- if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)
+ if (intel_fb_is_mc_ccs_modifier(modifier))
return true;
fallthrough;
case DRM_FORMAT_RGB565:
}
}
-static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
- enum plane_id plane_id)
-{
- /* Wa_22011186057 */
- if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
- return adlp_step_a_plane_format_modifiers;
- else if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
- return gen12_plane_format_modifiers_mc_ccs;
- else
- return gen12_plane_format_modifiers_rc_ccs;
-}
-
static const struct drm_plane_funcs skl_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
spin_unlock_irq(&i915->irq_lock);
}
+static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ /* Wa_22011186057 */
+ if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ return false;
+
+ if (DISPLAY_VER(i915) >= 11)
+ return true;
+
+ if (IS_GEMINILAKE(i915))
+ return pipe != PIPE_C;
+
+ return pipe != PIPE_C &&
+ (plane_id == PLANE_PRIMARY ||
+ plane_id == PLANE_SPRITE0);
+}
+
+static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915,
+ enum plane_id plane_id)
+{
+ if (DISPLAY_VER(i915) < 12)
+ return false;
+
+ /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
+ if (IS_DG1(i915) || IS_ROCKETLAKE(i915) ||
+ IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0))
+ return false;
+
+ /* Wa_22011186057 */
+ if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0))
+ return false;
+
+ return plane_id < PLANE_SPRITE4;
+}
+
+static u8 skl_get_plane_caps(struct drm_i915_private *i915,
+ enum pipe pipe, enum plane_id plane_id)
+{
+ u8 caps = INTEL_PLANE_CAP_TILING_X;
+
+ if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915))
+ caps |= INTEL_PLANE_CAP_TILING_Y;
+ if (DISPLAY_VER(i915) < 12)
+ caps |= INTEL_PLANE_CAP_TILING_Yf;
+
+ if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
+ caps |= INTEL_PLANE_CAP_CCS_RC;
+ if (DISPLAY_VER(i915) >= 12)
+ caps |= INTEL_PLANE_CAP_CCS_RC_CC;
+ }
+
+ if (gen12_plane_has_mc_ccs(i915, plane_id))
+ caps |= INTEL_PLANE_CAP_CCS_MC;
+
+ return caps;
+}
+
struct intel_plane *
skl_universal_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe, enum plane_id plane_id)
plane->id = plane_id;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id);
- plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id);
- if (plane->has_fbc) {
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
- }
+ intel_fbc_add_plane(skl_plane_fbc(dev_priv, pipe, plane_id), plane);
if (DISPLAY_VER(dev_priv) >= 11) {
plane->min_width = icl_plane_min_width;
- plane->max_width = icl_plane_max_width;
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ plane->max_width = icl_hdr_plane_max_width;
+ else
+ plane->max_width = icl_sdr_plane_max_width;
plane->max_height = icl_plane_max_height;
plane->min_cdclk = icl_plane_min_cdclk;
} else if (DISPLAY_VER(dev_priv) >= 10) {
}
plane->max_stride = skl_plane_max_stride;
- plane->update_plane = skl_update_plane;
- plane->disable_plane = skl_disable_plane;
+ plane->update_noarm = skl_plane_update_noarm;
+ plane->update_arm = skl_plane_update_arm;
+ plane->disable_arm = skl_plane_disable_arm;
plane->get_hw_state = skl_plane_get_hw_state;
plane->check_plane = skl_plane_check;
formats = skl_get_plane_formats(dev_priv, pipe,
plane_id, &num_formats);
- plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id);
- if (DISPLAY_VER(dev_priv) >= 12) {
- modifiers = gen12_get_plane_modifiers(dev_priv, plane_id);
+ if (DISPLAY_VER(dev_priv) >= 12)
plane_funcs = &gen12_plane_funcs;
- } else {
- if (plane->has_ccs)
- modifiers = skl_plane_format_modifiers_ccs;
- else
- modifiers = skl_plane_format_modifiers_noccs;
+ else
plane_funcs = &skl_plane_funcs;
- }
if (plane_id == PLANE_PRIMARY)
plane_type = DRM_PLANE_TYPE_PRIMARY;
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
+ modifiers = intel_fb_plane_get_modifiers(dev_priv,
+ skl_get_plane_caps(dev_priv, pipe, plane_id));
+
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
0, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
pipe_name(pipe));
+
+ kfree(modifiers);
+
if (ret)
goto fail;
* colateral damage, and we should not pretend we can by
* exposing the interface.
*/
- if (!intel_has_reset_engine(&i915->gt))
+ if (!intel_has_reset_engine(to_gt(i915)))
return -ENODEV;
pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
if (!protected) {
pc->uses_protected_content = false;
- } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) {
+ } else if (!intel_pxp_is_enabled(&to_gt(i915)->pxp)) {
ret = -ENODEV;
} else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
!(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
*/
pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- if (!intel_pxp_is_active(&i915->gt.pxp))
- ret = intel_pxp_start(&i915->gt.pxp);
+ if (!intel_pxp_is_active(&to_gt(i915)->pxp))
+ ret = intel_pxp_start(&to_gt(i915)->pxp);
}
return ret;
if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
!IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
drm_dbg(&i915->drm,
- "Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n");
+ "Bonding not supported on this platform\n");
return -ENODEV;
}
intel_engine_mask_t prev_mask;
/* FIXME: This is NIY for execlists */
- if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
+ if (!(intel_uc_uses_guc_submission(&to_gt(i915)->uc)))
return -ENODEV;
if (get_user(slot, &ext->engine_index))
sseu = &pc->legacy_rcs_sseu;
}
- ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
+ ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu);
if (ret)
return ret;
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
struct intel_sseu rcs_sseu)
{
- const struct intel_gt *gt = &ctx->i915->gt;
+ const struct intel_gt *gt = to_gt(ctx->i915);
struct intel_engine_cs *engine;
struct i915_gem_engines *e, *err;
enum intel_engine_id id;
* colateral damage, and we should not pretend we can by
* exposing the interface.
*/
- if (!intel_has_reset_engine(&ctx->i915->gt))
+ if (!intel_has_reset_engine(to_gt(ctx->i915)))
return -ENODEV;
i915_gem_context_clear_persistence(ctx);
} else if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(&i915->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt)) {
drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
if (args->flags)
return -EINVAL;
- ppgtt = i915_ppgtt_create(&i915->gt, 0);
+ ppgtt = i915_ppgtt_create(to_gt(i915), 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
return -EINVAL;
- ret = intel_gt_terminally_wedged(&i915->gt);
+ ret = intel_gt_terminally_wedged(to_gt(i915));
if (ret)
return ret;
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
#include "i915_gem_mman.h"
+#include "i915_mm.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
#include "i915_gem_ttm.h"
if (args->flags & ~(I915_MMAP_WC))
return -EINVAL;
- if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
+ if (args->flags & I915_MMAP_WC && !pat_enabled())
return -ENODEV;
obj = i915_gem_object_lookup(file, args->handle);
goto insert;
/* Attempt to reap some mmap space from dead objects */
- err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
+ err = intel_gt_retire_requests_timeout(to_gt(i915), MAX_SCHEDULE_TIMEOUT,
NULL);
if (err)
goto err;
if (HAS_LMEM(to_i915(dev)))
mmap_type = I915_MMAP_TYPE_FIXED;
- else if (boot_cpu_has(X86_FEATURE_PAT))
+ else if (pat_enabled())
mmap_type = I915_MMAP_TYPE_WC;
else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
return -ENODEV;
break;
case I915_MMAP_OFFSET_WC:
- if (!boot_cpu_has(X86_FEATURE_PAT))
+ if (!pat_enabled())
return -ENODEV;
type = I915_MMAP_TYPE_WC;
break;
break;
case I915_MMAP_OFFSET_UC:
- if (!boot_cpu_has(X86_FEATURE_PAT))
+ if (!pat_enabled())
return -ENODEV;
type = I915_MMAP_TYPE_UC;
break;
* Copyright © 2020 Intel Corporation
*/
+#include <linux/agp_backend.h>
#include <linux/stop_machine.h>
#include <asm/set_memory.h>
#include <asm/smp.h>
#include <drm/i915_drm.h>
+#include <drm/intel-gtt.h>
#include "gem/i915_gem_lmem.h"
#include "intel_gtt.h"
#include "gen8_ppgtt.h"
- static int
- i915_get_ggtt_vma_pages(struct i915_vma *vma);
-
static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
unsigned long color,
u64 *start,
return false;
}
-void i915_ggtt_suspend(struct i915_ggtt *ggtt)
+/**
+ * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to suspend the mappings for
+ *
+ * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ */
+void i915_ggtt_suspend_vm(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
int open;
- mutex_lock(&ggtt->vm.mutex);
+ drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
+
+ mutex_lock(&vm->mutex);
/* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&ggtt->vm.open, 0);
+ open = atomic_xchg(&vm->open, 0);
- list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
+ list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
i915_vma_wait_for_bind(vma);
}
}
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
- ggtt->invalidate(ggtt);
- atomic_set(&ggtt->vm.open, open);
+ vm->clear_range(vm, 0, vm->total);
- mutex_unlock(&ggtt->vm.mutex);
+ atomic_set(&vm->open, open);
+
+ mutex_unlock(&vm->mutex);
+}
+
+void i915_ggtt_suspend(struct i915_ggtt *ggtt)
+{
+ i915_ggtt_suspend_vm(&ggtt->vm);
+ ggtt->invalidate(ggtt);
intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
return 0;
}
- int ggtt_set_pages(struct i915_vma *vma)
- {
- int ret;
-
- GEM_BUG_ON(vma->pages);
-
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
-
- vma->page_sizes = vma->obj->mm.page_sizes;
-
- return 0;
- }
-
static void gen6_gmch_remove(struct i915_address_space *vm)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
size = gen8_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
ggtt->vm.clear_range = nop_clear_range;
if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
return ggtt_probe_common(ggtt, size);
}
(struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
if (needs_idle_maps(i915)) {
drm_notice(&i915->drm,
ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
- ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
- ggtt->vm.vma_ops.clear_pages = clear_pages;
if (unlikely(ggtt->do_idle_maps))
drm_notice(&i915->drm,
{
int ret;
- ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
+ ret = ggtt_probe_hw(&i915->ggtt, to_gt(i915));
if (ret)
return ret;
ggtt->invalidate(ggtt);
}
-void i915_ggtt_resume(struct i915_ggtt *ggtt)
+/**
+ * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
+ * @vm: The VM to restore the mappings for
+ *
+ * Restore the memory mappings for all objects mapped to HW via the GGTT or a
+ * DPT page table.
+ *
+ * Returns %true if restoring the mapping for any object that was in a write
+ * domain before suspend.
+ */
+bool i915_ggtt_resume_vm(struct i915_address_space *vm)
{
struct i915_vma *vma;
- bool flush = false;
+ bool write_domain_objs = false;
int open;
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
/* First fill our portion of the GTT with scratch pages */
- ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
+ vm->clear_range(vm, 0, vm->total);
/* Skip rewriting PTE on VMA unbind. */
- open = atomic_xchg(&ggtt->vm.open, 0);
+ open = atomic_xchg(&vm->open, 0);
/* clflush objects bound into the GGTT and rebind them. */
- list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
+ list_for_each_entry(vma, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
unsigned int was_bound =
atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
GEM_BUG_ON(!was_bound);
- vma->ops->bind_vma(&ggtt->vm, NULL, vma,
+ vma->ops->bind_vma(vm, NULL, vma,
obj ? obj->cache_level : 0,
was_bound);
if (obj) { /* only used during resume => exclusive access */
- flush |= fetch_and_zero(&obj->write_domain);
+ write_domain_objs |= fetch_and_zero(&obj->write_domain);
obj->read_domains |= I915_GEM_DOMAIN_GTT;
}
}
- atomic_set(&ggtt->vm.open, open);
+ atomic_set(&vm->open, open);
+
+ return write_domain_objs;
+}
+
+void i915_ggtt_resume(struct i915_ggtt *ggtt)
+{
+ bool flush;
+
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
+
+ flush = i915_ggtt_resume_vm(&ggtt->vm);
+
ggtt->invalidate(ggtt);
if (flush)
intel_ggtt_restore_fences(ggtt);
}
-
- static struct scatterlist *
- rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg)
- {
- unsigned int column, row;
- unsigned int src_idx;
-
- for (column = 0; column < width; column++) {
- unsigned int left;
-
- src_idx = src_stride * (height - 1) + column + offset;
- for (row = 0; row < height; row++) {
- st->nents++;
- /*
- * We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
- sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) =
- i915_gem_object_get_dma_address(obj, src_idx);
- sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
- sg = sg_next(sg);
- src_idx -= src_stride;
- }
-
- left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
-
- if (!left)
- continue;
-
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a conenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, left, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = left;
- sg = sg_next(sg);
- }
-
- return sg;
- }
-
- static noinline struct sg_table *
- intel_rotate_pages(struct intel_rotation_info *rot_info,
- struct drm_i915_gem_object *obj)
- {
- unsigned int size = intel_rotation_info_size(rot_info);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
- sg = rotate_pages(obj, rot_info->plane[i].offset,
- rot_info->plane[i].width, rot_info->plane[i].height,
- rot_info->plane[i].src_stride,
- rot_info->plane[i].dst_stride,
- st, sg);
-
- return st;
-
- err_sg_alloc:
- kfree(st);
- err_st_alloc:
-
- drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width,
- rot_info->plane[0].height, size);
-
- return ERR_PTR(ret);
- }
-
- static struct scatterlist *
- add_padding_pages(unsigned int count,
- struct sg_table *st, struct scatterlist *sg)
- {
- st->nents++;
-
- /*
- * The DE ignores the PTEs for the padding tiles, the sg entry
- * here is just a convenience to indicate how many padding PTEs
- * to insert at this spot.
- */
- sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
- sg_dma_address(sg) = 0;
- sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
- sg = sg_next(sg);
-
- return sg;
- }
-
- static struct scatterlist *
- remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
- unsigned int offset, unsigned int alignment_pad,
- unsigned int width, unsigned int height,
- unsigned int src_stride, unsigned int dst_stride,
- struct sg_table *st, struct scatterlist *sg,
- unsigned int *gtt_offset)
- {
- unsigned int row;
-
- if (!width || !height)
- return sg;
-
- if (alignment_pad)
- sg = add_padding_pages(alignment_pad, st, sg);
-
- for (row = 0; row < height; row++) {
- unsigned int left = width * I915_GTT_PAGE_SIZE;
-
- while (left) {
- dma_addr_t addr;
- unsigned int length;
-
- /*
- * We don't need the pages, but need to initialize
- * the entries so the sg list can be happily traversed.
- * The only thing we need are DMA addresses.
- */
-
- addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
-
- length = min(left, length);
-
- st->nents++;
-
- sg_set_page(sg, NULL, length, 0);
- sg_dma_address(sg) = addr;
- sg_dma_len(sg) = length;
- sg = sg_next(sg);
-
- offset += length / I915_GTT_PAGE_SIZE;
- left -= length;
- }
-
- offset += src_stride - width;
-
- left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
-
- if (!left)
- continue;
-
- sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
- }
-
- *gtt_offset += alignment_pad + dst_stride * height;
-
- return sg;
- }
-
- static struct scatterlist *
- remap_contiguous_pages(struct drm_i915_gem_object *obj,
- unsigned int obj_offset,
- unsigned int count,
- struct sg_table *st, struct scatterlist *sg)
- {
- struct scatterlist *iter;
- unsigned int offset;
-
- iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
- GEM_BUG_ON(!iter);
-
- do {
- unsigned int len;
-
- len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
- count << PAGE_SHIFT);
- sg_set_page(sg, NULL, len, 0);
- sg_dma_address(sg) =
- sg_dma_address(iter) + (offset << PAGE_SHIFT);
- sg_dma_len(sg) = len;
-
- st->nents++;
- count -= len >> PAGE_SHIFT;
- if (count == 0)
- return sg;
-
- sg = __sg_next(sg);
- iter = __sg_next(iter);
- offset = 0;
- } while (1);
- }
-
- static struct scatterlist *
- remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
- unsigned int obj_offset, unsigned int alignment_pad,
- unsigned int size,
- struct sg_table *st, struct scatterlist *sg,
- unsigned int *gtt_offset)
- {
- if (!size)
- return sg;
-
- if (alignment_pad)
- sg = add_padding_pages(alignment_pad, st, sg);
-
- sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
- sg = sg_next(sg);
-
- *gtt_offset += alignment_pad + size;
-
- return sg;
- }
-
- static struct scatterlist *
- remap_color_plane_pages(const struct intel_remapped_info *rem_info,
- struct drm_i915_gem_object *obj,
- int color_plane,
- struct sg_table *st, struct scatterlist *sg,
- unsigned int *gtt_offset)
- {
- unsigned int alignment_pad = 0;
-
- if (rem_info->plane_alignment)
- alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
-
- if (rem_info->plane[color_plane].linear)
- sg = remap_linear_color_plane_pages(obj,
- rem_info->plane[color_plane].offset,
- alignment_pad,
- rem_info->plane[color_plane].size,
- st, sg,
- gtt_offset);
-
- else
- sg = remap_tiled_color_plane_pages(obj,
- rem_info->plane[color_plane].offset,
- alignment_pad,
- rem_info->plane[color_plane].width,
- rem_info->plane[color_plane].height,
- rem_info->plane[color_plane].src_stride,
- rem_info->plane[color_plane].dst_stride,
- st, sg,
- gtt_offset);
-
- return sg;
- }
-
- static noinline struct sg_table *
- intel_remap_pages(struct intel_remapped_info *rem_info,
- struct drm_i915_gem_object *obj)
- {
- unsigned int size = intel_remapped_info_size(rem_info);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int gtt_offset = 0;
- int ret = -ENOMEM;
- int i;
-
- /* Allocate target SG list. */
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, size, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
- sg = st->sgl;
-
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
- sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset);
-
- i915_sg_trim(st);
-
- return st;
-
- err_sg_alloc:
- kfree(st);
- err_st_alloc:
-
- drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width,
- rem_info->plane[0].height, size);
-
- return ERR_PTR(ret);
- }
-
- static noinline struct sg_table *
- intel_partial_pages(const struct i915_ggtt_view *view,
- struct drm_i915_gem_object *obj)
- {
- struct sg_table *st;
- struct scatterlist *sg;
- unsigned int count = view->partial.size;
- int ret = -ENOMEM;
-
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st)
- goto err_st_alloc;
-
- ret = sg_alloc_table(st, count, GFP_KERNEL);
- if (ret)
- goto err_sg_alloc;
-
- st->nents = 0;
-
- sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
-
- sg_mark_end(sg);
- i915_sg_trim(st); /* Drop any unused tail entries. */
-
- return st;
-
- err_sg_alloc:
- kfree(st);
- err_st_alloc:
- return ERR_PTR(ret);
- }
-
- static int
- i915_get_ggtt_vma_pages(struct i915_vma *vma)
- {
- int ret;
-
- /*
- * The vma->pages are only valid within the lifespan of the borrowed
- * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
- * must be the vma->pages. A simple rule is that vma->pages must only
- * be accessed when the obj->mm.pages are pinned.
- */
- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
-
- switch (vma->ggtt_view.type) {
- default:
- GEM_BUG_ON(vma->ggtt_view.type);
- fallthrough;
- case I915_GGTT_VIEW_NORMAL:
- vma->pages = vma->obj->mm.pages;
- return 0;
-
- case I915_GGTT_VIEW_ROTATED:
- vma->pages =
- intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
- break;
-
- case I915_GGTT_VIEW_REMAPPED:
- vma->pages =
- intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
- break;
-
- case I915_GGTT_VIEW_PARTIAL:
- vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
- break;
- }
-
- ret = 0;
- if (IS_ERR(vma->pages)) {
- ret = PTR_ERR(vma->pages);
- vma->pages = NULL;
- drm_err(&vma->vm->i915->drm,
- "Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
- }
- return ret;
- }
* Copyright © 2019 Intel Corporation
*/
+#include <drm/intel-gtt.h>
+
#include "intel_gt_debugfs.h"
#include "gem/i915_gem_lmem.h"
#include "shmem_utils.h"
#include "pxp/intel_pxp.h"
- void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+ void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
- gt->i915 = i915;
- gt->uncore = &i915->uncore;
-
spin_lock_init(>->irq_lock);
INIT_LIST_HEAD(>->closed_vma);
intel_rps_init_early(>->rps);
}
+ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
+ {
+ gt->i915 = i915;
+ gt->uncore = &i915->uncore;
+ }
+
int intel_gt_probe_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
#include <linux/slab.h> /* fault-inject.h is not standalone! */
#include <linux/fault-inject.h>
+#include <linux/sched/mm.h>
#include <drm/drm_cache.h>
INIT_LIST_HEAD(&vm->bound_list);
}
- void clear_pages(struct i915_vma *vma)
- {
- GEM_BUG_ON(!vma->pages);
-
- if (vma->pages != vma->obj->mm.pages) {
- sg_free_table(vma->pages);
- kfree(vma->pages);
- }
- vma->pages = NULL;
-
- memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
- }
-
void *__px_vaddr(struct drm_i915_gem_object *p)
{
enum i915_map_type type;
do {
struct drm_i915_gem_object *obj;
- obj = vm->alloc_pt_dma(vm, size);
+ obj = vm->alloc_scratch_dma(vm, size);
if (IS_ERR(obj))
goto skip;
if (size == I915_GTT_PAGE_SIZE_4K)
return -ENOMEM;
+ /*
+ * If we need 64K minimum GTT pages for device local-memory,
+ * like on XEHPSDV, then we need to fail the allocation here,
+ * otherwise we can't safely support the insertion of
+ * local-memory pages for this vm, since the HW expects the
+ * correct physical alignment and size when the page-table is
+ * operating in 64K GTT mode, which includes any scratch PTEs,
+ * since userspace can still touch them.
+ */
+ if (HAS_64K_PAGES(vm->i915))
+ return -ENOMEM;
+
size = I915_GTT_PAGE_SIZE_4K;
} while (1);
}
*/
void (*unbind_vma)(struct i915_address_space *vm,
struct i915_vma *vma);
-
- int (*set_pages)(struct i915_vma *vma);
- void (*clear_pages)(struct i915_vma *vma);
};
struct i915_address_space {
struct drm_i915_gem_object *
(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
+ struct drm_i915_gem_object *
+ (*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
u64 (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
unsigned long lmem_pt_obj_flags);
+void i915_ggtt_suspend_vm(struct i915_address_space *vm);
+bool i915_ggtt_resume_vm(struct i915_address_space *vm);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
const struct drm_i915_gem_object * const scratch);
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt);
- int ggtt_set_pages(struct i915_vma *vma);
- int ppgtt_set_pages(struct i915_vma *vma);
- void clear_pages(struct i915_vma *vma);
-
void ppgtt_bind_vma(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash,
struct i915_vma *vma,
int i;
for (i = 0; i < 5; i++) {
- preempt_disable();
+ local_irq_disable();
cycles[i] = -ENGINE_READ_FW(engine, RING_TIMESTAMP);
dt[i] = ktime_get();
dt[i] = ktime_sub(ktime_get(), dt[i]);
cycles[i] += ENGINE_READ_FW(engine, RING_TIMESTAMP);
- preempt_enable();
+ local_irq_enable();
}
/* Use the median of both cycle/dt; close enough */
SUBTEST(live_gt_resume),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
int intel_gt_pm_late_selftests(struct drm_i915_private *i915)
SUBTEST(live_rc6_ctx_wa),
};
- if (intel_gt_is_wedged(&i915->gt))
+ if (intel_gt_is_wedged(to_gt(i915)))
return 0;
- return intel_gt_live_subtests(tests, &i915->gt);
+ return intel_gt_live_subtests(tests, to_gt(i915));
}
#include "intel_guc.h"
#include "intel_guc_ads.h"
#include "intel_guc_submission.h"
+ #include "gt/intel_rps.h"
#include "intel_uc.h"
#include "i915_drv.h"
}
/* Intermediate platforms are HuC authentication only */
- if (IS_ALDERLAKE_S(i915)) {
+ if (IS_ALDERLAKE_S(i915) && !IS_ADLS_RPLS(i915)) {
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
return;
}
else
attempts = 1;
+ intel_rps_raise_unslice(&uc_to_gt(uc)->rps);
+
while (attempts--) {
/*
* Always reset the GuC just before (re)loading, so
ret = intel_guc_slpc_enable(&guc->slpc);
if (ret)
goto err_submission;
+ } else {
+ /* Restore GT back to RPn for non-SLPC path */
+ intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
}
drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
err_log_capture:
__uc_capture_load_err_log(uc);
err_out:
+ /* Return GT back to RPn */
+ intel_rps_lower_unslice(&uc_to_gt(uc)->rps);
+
__uc_sanitize(uc);
if (!ret) {
#include "i915_debugfs_params.h"
#include "i915_irq.h"
#include "i915_scheduler.h"
-#include "i915_trace.h"
#include "intel_pm.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
intel_device_info_print_static(INTEL_INFO(i915), &p);
intel_device_info_print_runtime(RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
- intel_gt_info_print(&i915->gt.info, &p);
+ intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
gpu = NULL;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_gpu_coredump(&i915->gt, ALL_ENGINES);
+ gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES);
if (IS_ERR(gpu))
return PTR_ERR(gpu);
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
struct drm_printer p = drm_seq_file_printer(m);
intel_gt_pm_frequency_dump(gt, &p);
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_rps *rps = &dev_priv->gt.rps;
+ struct intel_rps *rps = &to_gt(dev_priv)->rps;
seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps)));
seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps)));
- seq_printf(m, "GPU busy? %s\n", yesno(dev_priv->gt.awake));
+ seq_printf(m, "GPU busy? %s\n", yesno(to_gt(dev_priv)->awake));
seq_printf(m, "Boosts outstanding? %d\n",
atomic_read(&rps->num_waiters));
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
seq_printf(m, "Runtime power status: %s\n",
enableddisabled(!dev_priv->power_domains.init_wakeref));
- seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
+ seq_printf(m, "GPU idle: %s\n", yesno(!to_gt(dev_priv)->awake));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "GT awake? %s [%d], %llums\n",
- yesno(i915->gt.awake),
- atomic_read(&i915->gt.wakeref.count),
- ktime_to_ms(intel_gt_get_awake_time(&i915->gt)));
+ yesno(to_gt(i915)->awake),
+ atomic_read(&to_gt(i915)->wakeref.count),
+ ktime_to_ms(intel_gt_get_awake_time(to_gt(i915))));
seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n",
- i915->gt.clock_frequency,
- i915->gt.clock_period_ns);
+ to_gt(i915)->clock_frequency,
+ to_gt(i915)->clock_period_ns);
p = drm_seq_file_printer(m);
for_each_uabi_engine(engine, i915)
intel_engine_dump(engine, &p, "%s\n", engine->name);
- intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule);
+ intel_gt_show_timelines(to_gt(i915), &p, i915_request_show_with_schedule);
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
{
struct drm_i915_private *i915 = data;
- return intel_gt_debugfs_reset_show(&i915->gt, val);
+ return intel_gt_debugfs_reset_show(to_gt(i915), val);
}
static int i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- return intel_gt_debugfs_reset_store(&i915->gt, val);
+ return intel_gt_debugfs_reset_store(to_gt(i915), val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
* This would lead to infinite waits as we're doing timestamp
* difference on the CS with only 32bits.
*/
- if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX)
+ if (intel_gt_ns_to_clock_interval(to_gt(i915), val) > U32_MAX)
return -EINVAL;
atomic64_set(&i915->perf.noa_programming_delay, val);
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
+ unsigned int flags;
int ret;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
- ret = gt_drop_caches(&i915->gt, val);
+ ret = gt_drop_caches(to_gt(i915), val);
if (ret)
return ret;
fs_reclaim_acquire(GFP_KERNEL);
+ flags = memalloc_noreclaim_save();
if (val & DROP_BOUND)
i915_gem_shrink(NULL, i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
if (val & DROP_SHRINK_ALL)
i915_gem_shrink_all(i915);
+ memalloc_noreclaim_restore(flags);
fs_reclaim_release(GFP_KERNEL);
if (val & DROP_RCU)
static int i915_sseu_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
return intel_sseu_status(m, gt);
}
{
struct drm_i915_private *i915 = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_open(&i915->gt);
+ return intel_gt_pm_debugfs_forcewake_user_open(to_gt(i915));
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- return intel_gt_pm_debugfs_forcewake_user_release(&i915->gt);
+ return intel_gt_pm_debugfs_forcewake_user_release(to_gt(i915));
}
static const struct file_operations i915_forcewake_fops = {
--- /dev/null
- __intel_gt_reset(&i915->gt, ALL_ENGINES);
+/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/oom.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/pnp.h>
+#include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
+#include <linux/vt.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_probe_helper.h>
+
+#include "display/intel_acpi.h"
+#include "display/intel_bw.h"
+#include "display/intel_cdclk.h"
+#include "display/intel_display_types.h"
+#include "display/intel_dmc.h"
+#include "display/intel_dp.h"
+#include "display/intel_dpt.h"
+#include "display/intel_fbdev.h"
+#include "display/intel_hotplug.h"
+#include "display/intel_overlay.h"
+#include "display/intel_pch_refclk.h"
+#include "display/intel_pipe_crc.h"
+#include "display/intel_pps.h"
+#include "display/intel_sprite.h"
+#include "display/intel_vga.h"
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_ioctls.h"
+#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
+#include "gt/intel_rc6.h"
+
+#include "pxp/intel_pxp_pm.h"
+
+#include "i915_debugfs.h"
+#include "i915_driver.h"
+#include "i915_drv.h"
+#include "i915_ioc32.h"
+#include "i915_irq.h"
+#include "i915_memcpy.h"
+#include "i915_perf.h"
+#include "i915_query.h"
+#include "i915_suspend.h"
+#include "i915_switcheroo.h"
+#include "i915_sysfs.h"
+#include "i915_vgpu.h"
+#include "intel_dram.h"
+#include "intel_gvt.h"
+#include "intel_memory_region.h"
+#include "intel_pcode.h"
+#include "intel_pm.h"
+#include "intel_region_ttm.h"
+#include "vlv_suspend.h"
+
+static const struct drm_driver i915_drm_driver;
+
+static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
+{
+ int domain = pci_domain_nr(to_pci_dev(dev_priv->drm.dev)->bus);
+
+ dev_priv->bridge_dev =
+ pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
+ if (!dev_priv->bridge_dev) {
+ drm_err(&dev_priv->drm, "bridge device not found\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
+{
+ int reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret;
+
+ if (GRAPHICS_VER(dev_priv) >= 4)
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
+ return 0;
+#endif
+
+ /* Get some space for it */
+ dev_priv->mch_res.name = "i915 MCHBAR";
+ dev_priv->mch_res.flags = IORESOURCE_MEM;
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
+ &dev_priv->mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ dev_priv->bridge_dev);
+ if (ret) {
+ drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
+ dev_priv->mch_res.start = 0;
+ return ret;
+ }
+
+ if (GRAPHICS_VER(dev_priv) >= 4)
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ upper_32_bits(dev_priv->mch_res.start));
+
+ pci_write_config_dword(dev_priv->bridge_dev, reg,
+ lower_32_bits(dev_priv->mch_res.start));
+ return 0;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_i915_private *dev_priv)
+{
+ int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return;
+
+ dev_priv->mchbar_need_disable = false;
+
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
+
+ if (intel_alloc_mchbar_resource(dev_priv))
+ return;
+
+ dev_priv->mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+ }
+}
+
+static void
+intel_teardown_mchbar(struct drm_i915_private *dev_priv)
+{
+ int mchbar_reg = GRAPHICS_VER(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+
+ if (dev_priv->mchbar_need_disable) {
+ if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
+ u32 deven_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
+ &deven_val);
+ deven_val &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
+ deven_val);
+ } else {
+ u32 mchbar_val;
+
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ &mchbar_val);
+ mchbar_val &= ~1;
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
+ mchbar_val);
+ }
+ }
+
+ if (dev_priv->mch_res.start)
+ release_resource(&dev_priv->mch_res);
+}
+
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+ /*
+ * The i915 workqueue is primarily used for batched retirement of
+ * requests (and thus managing bo) once the task has been completed
+ * by the GPU. i915_retire_requests() is called directly when we
+ * need high-priority retirement, such as waiting for an explicit
+ * bo.
+ *
+ * It is also used for periodic low-priority events, such as
+ * idle-timers and recording error state.
+ *
+ * All tasks on the workqueue are expected to acquire the dev mutex
+ * so there is no point in running more than one instance of the
+ * workqueue at any time. Use an ordered one.
+ */
+ dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+ if (dev_priv->wq == NULL)
+ goto out_err;
+
+ dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+ if (dev_priv->hotplug.dp_wq == NULL)
+ goto out_free_wq;
+
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(dev_priv->wq);
+out_err:
+ drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
+
+ return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+ destroy_workqueue(dev_priv->hotplug.dp_wq);
+ destroy_workqueue(dev_priv->wq);
+}
+
+/*
+ * We don't keep the workarounds for pre-production hardware, so we expect our
+ * driver to fail on these machines in one way or another. A little warning on
+ * dmesg may help both the user and the bug triagers.
+ *
+ * Our policy for removing pre-production workarounds is to keep the
+ * current gen workarounds as a guide to the bring-up of the next gen
+ * (workarounds have a habit of persisting!). Anything older than that
+ * should be removed along with the complications they introduce.
+ */
+static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
+{
+ bool pre = false;
+
+ pre |= IS_HSW_EARLY_SDV(dev_priv);
+ pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
+ pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
+ pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+ pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
+ pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
+
+ if (pre) {
+ drm_err(&dev_priv->drm, "This is a pre-production stepping. "
+ "It may not be fully functional.\n");
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+ }
+}
+
+static void sanitize_gpu(struct drm_i915_private *i915)
+{
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
- intel_uncore_init_early(&dev_priv->uncore, dev_priv);
++ __intel_gt_reset(to_gt(i915), ALL_ENGINES);
+}
+
+/**
+ * i915_driver_early_probe - setup state not requiring device access
+ * @dev_priv: device private
+ *
+ * Initialize everything that is a "SW-only" state, that is state not
+ * requiring accessing the device or exposing the driver via kernel internal
+ * or userspace interfaces. Example steps belonging here: lock initialization,
+ * system memory allocation, setting up device specific attributes and
+ * function hooks not requiring accessing the device.
+ */
+static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (i915_inject_probe_failure(dev_priv))
+ return -ENODEV;
+
+ intel_device_info_subplatform_init(dev_priv);
+ intel_step_init(dev_priv);
+
++ intel_gt_init_early(to_gt(dev_priv), dev_priv);
+ intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
- intel_gt_init_early(&dev_priv->gt, dev_priv);
++ intel_uncore_init_early(&dev_priv->uncore, to_gt(dev_priv));
+
+ spin_lock_init(&dev_priv->irq_lock);
+ spin_lock_init(&dev_priv->gpu_error.lock);
+ mutex_init(&dev_priv->backlight_lock);
+
+ mutex_init(&dev_priv->sb_lock);
+ cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
+
+ mutex_init(&dev_priv->audio.mutex);
+ mutex_init(&dev_priv->wm.wm_mutex);
+ mutex_init(&dev_priv->pps_mutex);
+ mutex_init(&dev_priv->hdcp_comp_mutex);
+
+ i915_memcpy_init_early(dev_priv);
+ intel_runtime_pm_init_early(&dev_priv->runtime_pm);
+
+ ret = i915_workqueues_init(dev_priv);
+ if (ret < 0)
+ return ret;
+
+ ret = vlv_suspend_init(dev_priv);
+ if (ret < 0)
+ goto err_workqueues;
+
+ ret = intel_region_ttm_device_init(dev_priv);
+ if (ret)
+ goto err_ttm;
+
+ intel_wopcm_init_early(&dev_priv->wopcm);
+
- intel_gt_driver_late_release(&dev_priv->gt);
++ __intel_gt_init_early(to_gt(dev_priv), dev_priv);
+
+ i915_gem_init_early(dev_priv);
+
+ /* This must be called before any calls to HAS_PCH_* */
+ intel_detect_pch(dev_priv);
+
+ intel_pm_setup(dev_priv);
+ ret = intel_power_domains_init(dev_priv);
+ if (ret < 0)
+ goto err_gem;
+ intel_irq_init(dev_priv);
+ intel_init_display_hooks(dev_priv);
+ intel_init_clock_gating_hooks(dev_priv);
+
+ intel_detect_preproduction_hw(dev_priv);
+
+ return 0;
+
+err_gem:
+ i915_gem_cleanup_early(dev_priv);
- intel_gt_driver_late_release(&dev_priv->gt);
++ intel_gt_driver_late_release(to_gt(dev_priv));
+ intel_region_ttm_device_fini(dev_priv);
+err_ttm:
+ vlv_suspend_cleanup(dev_priv);
+err_workqueues:
+ i915_workqueues_cleanup(dev_priv);
+ return ret;
+}
+
+/**
+ * i915_driver_late_release - cleanup the setup done in
+ * i915_driver_early_probe()
+ * @dev_priv: device private
+ */
+static void i915_driver_late_release(struct drm_i915_private *dev_priv)
+{
+ intel_irq_fini(dev_priv);
+ intel_power_domains_cleanup(dev_priv);
+ i915_gem_cleanup_early(dev_priv);
- ret = intel_gt_init_mmio(&dev_priv->gt);
++ intel_gt_driver_late_release(to_gt(dev_priv));
+ intel_region_ttm_device_fini(dev_priv);
+ vlv_suspend_cleanup(dev_priv);
+ i915_workqueues_cleanup(dev_priv);
+
+ cpu_latency_qos_remove_request(&dev_priv->sb_qos);
+ mutex_destroy(&dev_priv->sb_lock);
+
+ i915_params_free(&dev_priv->params);
+}
+
+/**
+ * i915_driver_mmio_probe - setup device MMIO
+ * @dev_priv: device private
+ *
+ * Setup minimal device state necessary for MMIO accesses later in the
+ * initialization sequence. The setup here should avoid any other device-wide
+ * side effects or exposing the driver via kernel internal or user space
+ * interfaces.
+ */
+static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (i915_inject_probe_failure(dev_priv))
+ return -ENODEV;
+
+ ret = i915_get_bridge_dev(dev_priv);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_uncore_setup_mmio(&dev_priv->uncore);
+ if (ret < 0)
+ goto err_bridge;
+
+ ret = intel_uncore_init_mmio(&dev_priv->uncore);
+ if (ret)
+ goto err_mmio;
+
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev_priv);
+ intel_device_info_runtime_init(dev_priv);
+
- intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt);
++ ret = intel_gt_init_mmio(to_gt(dev_priv));
+ if (ret)
+ goto err_uncore;
+
+ /* As early as possible, scrub existing GPU state before clobbering */
+ sanitize_gpu(dev_priv);
+
+ return 0;
+
+err_uncore:
+ intel_teardown_mchbar(dev_priv);
+ intel_uncore_fini_mmio(&dev_priv->uncore);
+err_mmio:
+ intel_uncore_cleanup_mmio(&dev_priv->uncore);
+err_bridge:
+ pci_dev_put(dev_priv->bridge_dev);
+
+ return ret;
+}
+
+/**
+ * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
+ * @dev_priv: device private
+ */
+static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
+{
+ intel_teardown_mchbar(dev_priv);
+ intel_uncore_fini_mmio(&dev_priv->uncore);
+ intel_uncore_cleanup_mmio(&dev_priv->uncore);
+ pci_dev_put(dev_priv->bridge_dev);
+}
+
+static void intel_sanitize_options(struct drm_i915_private *dev_priv)
+{
+ intel_gvt_sanitize_options(dev_priv);
+}
+
+/**
+ * i915_set_dma_info - set all relevant PCI dma info as configured for the
+ * platform
+ * @i915: valid i915 instance
+ *
+ * Set the dma max segment size, device and coherent masks. The dma mask set
+ * needs to occur before i915_ggtt_probe_hw.
+ *
+ * A couple of platforms have special needs. Address them as well.
+ *
+ */
+static int i915_set_dma_info(struct drm_i915_private *i915)
+{
+ unsigned int mask_size = INTEL_INFO(i915)->dma_mask_size;
+ int ret;
+
+ GEM_BUG_ON(!mask_size);
+
+ /*
+ * We don't have a max segment size, so set it to the max so sg's
+ * debugging layer doesn't complain
+ */
+ dma_set_max_seg_size(i915->drm.dev, UINT_MAX);
+
+ ret = dma_set_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
+ if (ret)
+ goto mask_err;
+
+ /* overlay on gen2 is broken and can't address above 1G */
+ if (GRAPHICS_VER(i915) == 2)
+ mask_size = 30;
+
+ /*
+ * 965GM sometimes incorrectly writes to hardware status page (HWS)
+ * using 32bit addressing, overwriting memory if HWS is located
+ * above 4GB.
+ *
+ * The documentation also mentions an issue with undefined
+ * behaviour if any general state is accessed within a page above 4GB,
+ * which also needs to be handled carefully.
+ */
+ if (IS_I965G(i915) || IS_I965GM(i915))
+ mask_size = 32;
+
+ ret = dma_set_coherent_mask(i915->drm.dev, DMA_BIT_MASK(mask_size));
+ if (ret)
+ goto mask_err;
+
+ return 0;
+
+mask_err:
+ drm_err(&i915->drm, "Can't set DMA mask/consistent mask (%d)\n", ret);
+ return ret;
+}
+
+/**
+ * i915_driver_hw_probe - setup state requiring device access
+ * @dev_priv: device private
+ *
+ * Setup state that requires accessing the device, but doesn't require
+ * exposing the driver via kernel internal or userspace interfaces.
+ */
+static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ int ret;
+
+ if (i915_inject_probe_failure(dev_priv))
+ return -ENODEV;
+
+ if (HAS_PPGTT(dev_priv)) {
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_full_ppgtt(dev_priv)) {
+ i915_report_error(dev_priv,
+ "incompatible vGPU found, support for isolated ppGTT required\n");
+ return -ENXIO;
+ }
+ }
+
+ if (HAS_EXECLISTS(dev_priv)) {
+ /*
+ * Older GVT emulation depends upon intercepting CSB mmio,
+ * which we no longer use, preferring to use the HWSP cache
+ * instead.
+ */
+ if (intel_vgpu_active(dev_priv) &&
+ !intel_vgpu_has_hwsp_emulation(dev_priv)) {
+ i915_report_error(dev_priv,
+ "old vGPU host found, support for HWSP emulation required\n");
+ return -ENXIO;
+ }
+ }
+
+ intel_sanitize_options(dev_priv);
+
+ /* needs to be done before ggtt probe */
+ intel_dram_edram_detect(dev_priv);
+
+ ret = i915_set_dma_info(dev_priv);
+ if (ret)
+ return ret;
+
+ i915_perf_init(dev_priv);
+
+ ret = i915_ggtt_probe_hw(dev_priv);
+ if (ret)
+ goto err_perf;
+
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, dev_priv->drm.driver);
+ if (ret)
+ goto err_ggtt;
+
+ ret = i915_ggtt_init_hw(dev_priv);
+ if (ret)
+ goto err_ggtt;
+
+ ret = intel_memory_regions_hw_probe(dev_priv);
+ if (ret)
+ goto err_ggtt;
+
- ret = intel_gt_probe_lmem(&dev_priv->gt);
++ intel_gt_init_hw_early(to_gt(dev_priv), &dev_priv->ggtt);
+
- intel_gt_driver_register(&dev_priv->gt);
++ ret = intel_gt_probe_lmem(to_gt(dev_priv));
+ if (ret)
+ goto err_mem_regions;
+
+ ret = i915_ggtt_enable_hw(dev_priv);
+ if (ret) {
+ drm_err(&dev_priv->drm, "failed to enable GGTT\n");
+ goto err_mem_regions;
+ }
+
+ pci_set_master(pdev);
+
+ /* On the 945G/GM, the chipset reports the MSI capability on the
+ * integrated graphics even though the support isn't actually there
+ * according to the published specs. It doesn't appear to function
+ * correctly in testing on 945G.
+ * This may be a side effect of MSI having been made available for PEG
+ * and the registers being closely associated.
+ *
+ * According to chipset errata, on the 965GM, MSI interrupts may
+ * be lost or delayed, and was defeatured. MSI interrupts seem to
+ * get lost on g4x as well, and interrupt delivery seems to stay
+ * properly dead afterwards. So we'll just disable them for all
+ * pre-gen5 chipsets.
+ *
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy
+ * interrupts even when in MSI mode. This results in spurious
+ * interrupt warnings if the legacy irq no. is shared with another
+ * device. The kernel then disables that interrupt source and so
+ * prevents the other device from working properly.
+ */
+ if (GRAPHICS_VER(dev_priv) >= 5) {
+ if (pci_enable_msi(pdev) < 0)
+ drm_dbg(&dev_priv->drm, "can't enable MSI");
+ }
+
+ ret = intel_gvt_init(dev_priv);
+ if (ret)
+ goto err_msi;
+
+ intel_opregion_setup(dev_priv);
+
+ ret = intel_pcode_init(dev_priv);
+ if (ret)
+ goto err_msi;
+
+ /*
+ * Fill the dram structure to get the system dram info. This will be
+ * used for memory latency calculation.
+ */
+ intel_dram_detect(dev_priv);
+
+ intel_bw_init_hw(dev_priv);
+
+ return 0;
+
+err_msi:
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+err_mem_regions:
+ intel_memory_regions_driver_release(dev_priv);
+err_ggtt:
+ i915_ggtt_driver_release(dev_priv);
+ i915_gem_drain_freed_objects(dev_priv);
+ i915_ggtt_driver_late_release(dev_priv);
+err_perf:
+ i915_perf_fini(dev_priv);
+ return ret;
+}
+
+/**
+ * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
+ * @dev_priv: device private
+ */
+static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
+{
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+
+ i915_perf_fini(dev_priv);
+
+ if (pdev->msi_enabled)
+ pci_disable_msi(pdev);
+}
+
+/**
+ * i915_driver_register - register the driver with the rest of the system
+ * @dev_priv: device private
+ *
+ * Perform any steps necessary to make the driver available via kernel
+ * internal or userspace interfaces.
+ */
+static void i915_driver_register(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+
+ i915_gem_driver_register(dev_priv);
+ i915_pmu_register(dev_priv);
+
+ intel_vgpu_register(dev_priv);
+
+ /* Reveal our presence to userspace */
+ if (drm_dev_register(dev, 0)) {
+ drm_err(&dev_priv->drm,
+ "Failed to register driver for userspace access!\n");
+ return;
+ }
+
+ i915_debugfs_register(dev_priv);
+ i915_setup_sysfs(dev_priv);
+
+ /* Depends on sysfs having been initialized */
+ i915_perf_register(dev_priv);
+
- intel_gt_driver_unregister(&dev_priv->gt);
++ intel_gt_driver_register(to_gt(dev_priv));
+
+ intel_display_driver_register(dev_priv);
+
+ intel_power_domains_enable(dev_priv);
+ intel_runtime_pm_enable(&dev_priv->runtime_pm);
+
+ intel_register_dsm_handler();
+
+ if (i915_switcheroo_register(dev_priv))
+ drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
+}
+
+/**
+ * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
+ * @dev_priv: device private
+ */
+static void i915_driver_unregister(struct drm_i915_private *dev_priv)
+{
+ i915_switcheroo_unregister(dev_priv);
+
+ intel_unregister_dsm_handler();
+
+ intel_runtime_pm_disable(&dev_priv->runtime_pm);
+ intel_power_domains_disable(dev_priv);
+
+ intel_display_driver_unregister(dev_priv);
+
- intel_gt_info_print(&dev_priv->gt.info, &p);
++ intel_gt_driver_unregister(to_gt(dev_priv));
+
+ i915_perf_unregister(dev_priv);
+ i915_pmu_unregister(dev_priv);
+
+ i915_teardown_sysfs(dev_priv);
+ drm_dev_unplug(&dev_priv->drm);
+
+ i915_gem_driver_unregister(dev_priv);
+}
+
+void
+i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p)
+{
+ drm_printf(p, "iommu: %s\n", enableddisabled(intel_vtd_active(i915)));
+}
+
+static void i915_welcome_messages(struct drm_i915_private *dev_priv)
+{
+ if (drm_debug_enabled(DRM_UT_DRIVER)) {
+ struct drm_printer p = drm_debug_printer("i915 device info:");
+
+ drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
+ INTEL_DEVID(dev_priv),
+ INTEL_REVID(dev_priv),
+ intel_platform_name(INTEL_INFO(dev_priv)->platform),
+ intel_subplatform(RUNTIME_INFO(dev_priv),
+ INTEL_INFO(dev_priv)->platform),
+ GRAPHICS_VER(dev_priv));
+
+ intel_device_info_print_static(INTEL_INFO(dev_priv), &p);
+ intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p);
+ i915_print_iommu_status(dev_priv, &p);
- intel_gt_check_and_clear_faults(&dev_priv->gt);
++ intel_gt_info_print(&to_gt(dev_priv)->info, &p);
+ }
+
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
+ drm_info(&dev_priv->drm,
+ "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
+}
+
+static struct drm_i915_private *
+i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct intel_device_info *match_info =
+ (struct intel_device_info *)ent->driver_data;
+ struct intel_device_info *device_info;
+ struct drm_i915_private *i915;
+
+ i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver,
+ struct drm_i915_private, drm);
+ if (IS_ERR(i915))
+ return i915;
+
+ pci_set_drvdata(pdev, i915);
+
+ /* Device parameters start as a copy of module parameters. */
+ i915_params_copy(&i915->params, &i915_modparams);
+
+ /* Setup the write-once "constant" device info */
+ device_info = mkwrite_device_info(i915);
+ memcpy(device_info, match_info, sizeof(*device_info));
+ RUNTIME_INFO(i915)->device_id = pdev->device;
+
+ return i915;
+}
+
+/**
+ * i915_driver_probe - setup chip and create an initial config
+ * @pdev: PCI device
+ * @ent: matching PCI ID entry
+ *
+ * The driver probe routine has to do several things:
+ * - drive output discovery via intel_modeset_init()
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct intel_device_info *match_info =
+ (struct intel_device_info *)ent->driver_data;
+ struct drm_i915_private *i915;
+ int ret;
+
+ i915 = i915_driver_create(pdev, ent);
+ if (IS_ERR(i915))
+ return PTR_ERR(i915);
+
+ /* Disable nuclear pageflip by default on pre-ILK */
+ if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5)
+ i915->drm.driver_features &= ~DRIVER_ATOMIC;
+
+ /*
+ * Check if we support fake LMEM -- for now we only unleash this for
+ * the live selftests(test-and-exit).
+ */
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
+ if (GRAPHICS_VER(i915) >= 9 && i915_selftest.live < 0 &&
+ i915->params.fake_lmem_start) {
+ mkwrite_device_info(i915)->memory_regions =
+ REGION_SMEM | REGION_LMEM | REGION_STOLEN_SMEM;
+ GEM_BUG_ON(!HAS_LMEM(i915));
+ }
+ }
+#endif
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto out_fini;
+
+ ret = i915_driver_early_probe(i915);
+ if (ret < 0)
+ goto out_pci_disable;
+
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+ intel_vgpu_detect(i915);
+
+ ret = i915_driver_mmio_probe(i915);
+ if (ret < 0)
+ goto out_runtime_pm_put;
+
+ ret = i915_driver_hw_probe(i915);
+ if (ret < 0)
+ goto out_cleanup_mmio;
+
+ ret = intel_modeset_init_noirq(i915);
+ if (ret < 0)
+ goto out_cleanup_hw;
+
+ ret = intel_irq_install(i915);
+ if (ret)
+ goto out_cleanup_modeset;
+
+ ret = intel_modeset_init_nogem(i915);
+ if (ret)
+ goto out_cleanup_irq;
+
+ ret = i915_gem_init(i915);
+ if (ret)
+ goto out_cleanup_modeset2;
+
+ ret = intel_modeset_init(i915);
+ if (ret)
+ goto out_cleanup_gem;
+
+ i915_driver_register(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+ i915_welcome_messages(i915);
+
+ i915->do_release = true;
+
+ return 0;
+
+out_cleanup_gem:
+ i915_gem_suspend(i915);
+ i915_gem_driver_remove(i915);
+ i915_gem_driver_release(i915);
+out_cleanup_modeset2:
+ /* FIXME clean up the error path */
+ intel_modeset_driver_remove(i915);
+ intel_irq_uninstall(i915);
+ intel_modeset_driver_remove_noirq(i915);
+ goto out_cleanup_modeset;
+out_cleanup_irq:
+ intel_irq_uninstall(i915);
+out_cleanup_modeset:
+ intel_modeset_driver_remove_nogem(i915);
+out_cleanup_hw:
+ i915_driver_hw_remove(i915);
+ intel_memory_regions_driver_release(i915);
+ i915_ggtt_driver_release(i915);
+ i915_gem_drain_freed_objects(i915);
+ i915_ggtt_driver_late_release(i915);
+out_cleanup_mmio:
+ i915_driver_mmio_release(i915);
+out_runtime_pm_put:
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ i915_driver_late_release(i915);
+out_pci_disable:
+ pci_disable_device(pdev);
+out_fini:
+ i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
+ return ret;
+}
+
+void i915_driver_remove(struct drm_i915_private *i915)
+{
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+ i915_driver_unregister(i915);
+
+ /* Flush any external code that still may be under the RCU lock */
+ synchronize_rcu();
+
+ i915_gem_suspend(i915);
+
+ intel_gvt_driver_remove(i915);
+
+ intel_modeset_driver_remove(i915);
+
+ intel_irq_uninstall(i915);
+
+ intel_modeset_driver_remove_noirq(i915);
+
+ i915_reset_error_state(i915);
+ i915_gem_driver_remove(i915);
+
+ intel_modeset_driver_remove_nogem(i915);
+
+ i915_driver_hw_remove(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+}
+
+static void i915_driver_release(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+
+ if (!dev_priv->do_release)
+ return;
+
+ disable_rpm_wakeref_asserts(rpm);
+
+ i915_gem_driver_release(dev_priv);
+
+ intel_memory_regions_driver_release(dev_priv);
+ i915_ggtt_driver_release(dev_priv);
+ i915_gem_drain_freed_objects(dev_priv);
+ i915_ggtt_driver_late_release(dev_priv);
+
+ i915_driver_mmio_release(dev_priv);
+
+ enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_driver_release(rpm);
+
+ i915_driver_late_release(dev_priv);
+}
+
+static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ int ret;
+
+ ret = i915_gem_open(i915, file);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * i915_driver_lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ *
+ * Additionally, in the non-mode setting case, we'll tear down the GTT
+ * and DMA structures, since the kernel won't be using them, and clea
+ * up any GEM state.
+ */
+static void i915_driver_lastclose(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ intel_fbdev_restore_mode(dev);
+
+ if (HAS_DISPLAY(i915))
+ vga_switcheroo_process_delayed_switch();
+}
+
+static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
+ i915_gem_context_close(file);
+
+ kfree_rcu(file_priv, rcu);
+
+ /* Catch up with all the deferred frees from "this" client */
+ i915_gem_flush_free_objects(to_i915(dev));
+}
+
+static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_encoder(dev, encoder)
+ if (encoder->suspend)
+ encoder->suspend(encoder);
+ drm_modeset_unlock_all(dev);
+}
+
+static void intel_shutdown_encoders(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_encoder *encoder;
+
+ if (!HAS_DISPLAY(dev_priv))
+ return;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_encoder(dev, encoder)
+ if (encoder->shutdown)
+ encoder->shutdown(encoder);
+ drm_modeset_unlock_all(dev);
+}
+
+void i915_driver_shutdown(struct drm_i915_private *i915)
+{
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
+ intel_runtime_pm_disable(&i915->runtime_pm);
+ intel_power_domains_disable(i915);
+
+ i915_gem_suspend(i915);
+
+ if (HAS_DISPLAY(i915)) {
+ drm_kms_helper_poll_disable(&i915->drm);
+
+ drm_atomic_helper_shutdown(&i915->drm);
+ }
+
+ intel_dp_mst_suspend(i915);
+
+ intel_runtime_pm_disable_interrupts(i915);
+ intel_hpd_cancel_work(i915);
+
+ intel_suspend_encoders(i915);
+ intel_shutdown_encoders(i915);
+
+ intel_dmc_ucode_suspend(i915);
+
+ /*
+ * The only requirement is to reboot with display DC states disabled,
+ * for now leaving all display power wells in the INIT power domain
+ * enabled.
+ *
+ * TODO:
+ * - unify the pci_driver::shutdown sequence here with the
+ * pci_driver.driver.pm.poweroff,poweroff_late sequence.
+ * - unify the driver remove and system/runtime suspend sequences with
+ * the above unified shutdown/poweroff sequence.
+ */
+ intel_power_domains_driver_remove(i915);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+
+ intel_runtime_pm_driver_release(&i915->runtime_pm);
+}
+
+static bool suspend_to_idle(struct drm_i915_private *dev_priv)
+{
+#if IS_ENABLED(CONFIG_ACPI_SLEEP)
+ if (acpi_target_system_state() < ACPI_STATE_S3)
+ return true;
+#endif
+ return false;
+}
+
+static int i915_drm_prepare(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ /*
+ * NB intel_display_suspend() may issue new requests after we've
+ * ostensibly marked the GPU as ready-to-sleep here. We need to
+ * split out that work and pull it forward so that after point,
+ * the GPU is not woken again.
+ */
+ return i915_gem_backup_suspend(i915);
+}
+
+static int i915_drm_suspend(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ pci_power_t opregion_target_state;
+
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ /* We do a lot of poking in a lot of registers, make sure they work
+ * properly. */
+ intel_power_domains_disable(dev_priv);
+ if (HAS_DISPLAY(dev_priv))
+ drm_kms_helper_poll_disable(dev);
+
+ pci_save_state(pdev);
+
+ intel_display_suspend(dev);
+
+ intel_dp_mst_suspend(dev_priv);
+
+ intel_runtime_pm_disable_interrupts(dev_priv);
+ intel_hpd_cancel_work(dev_priv);
+
+ intel_suspend_encoders(dev_priv);
+
+ intel_suspend_hw(dev_priv);
+
+ /* Must be called before GGTT is suspended. */
+ intel_dpt_suspend(dev_priv);
+ i915_ggtt_suspend(&dev_priv->ggtt);
+
+ i915_save_display(dev_priv);
+
+ opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
+ intel_opregion_suspend(dev_priv, opregion_target_state);
+
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
+
+ dev_priv->suspend_count++;
+
+ intel_dmc_ucode_suspend(dev_priv);
+
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ return 0;
+}
+
+static enum i915_drm_suspend_mode
+get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
+{
+ if (hibernate)
+ return I915_DRM_SUSPEND_HIBERNATE;
+
+ if (suspend_to_idle(dev_priv))
+ return I915_DRM_SUSPEND_IDLE;
+
+ return I915_DRM_SUSPEND_MEM;
+}
+
+static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ int ret;
+
+ disable_rpm_wakeref_asserts(rpm);
+
+ i915_gem_suspend_late(dev_priv);
+
+ intel_uncore_suspend(&dev_priv->uncore);
+
+ intel_power_domains_suspend(dev_priv,
+ get_suspend_mode(dev_priv, hibernation));
+
+ intel_display_power_suspend_late(dev_priv);
+
+ ret = vlv_suspend_complete(dev_priv);
+ if (ret) {
+ drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
+ intel_power_domains_resume(dev_priv);
+
+ goto out;
+ }
+
+ /*
+ * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+ * This should be totally removed when we handle the pci states properly
+ * on runtime PM and on s2idle cases.
+ */
+ if (suspend_to_idle(dev_priv))
+ pci_d3cold_disable(pdev);
+
+ pci_disable_device(pdev);
+ /*
+ * During hibernation on some platforms the BIOS may try to access
+ * the device even though it's already in D3 and hang the machine. So
+ * leave the device in D0 on those platforms and hope the BIOS will
+ * power down the device properly. The issue was seen on multiple old
+ * GENs with different BIOS vendors, so having an explicit blacklist
+ * is inpractical; apply the workaround on everything pre GEN6. The
+ * platforms where the issue was seen:
+ * Lenovo Thinkpad X301, X61s, X60, T60, X41
+ * Fujitsu FSC S7110
+ * Acer Aspire 1830T
+ */
+ if (!(hibernation && GRAPHICS_VER(dev_priv) < 6))
+ pci_set_power_state(pdev, PCI_D3hot);
+
+out:
+ enable_rpm_wakeref_asserts(rpm);
+ if (!dev_priv->uncore.user_forcewake_count)
+ intel_runtime_pm_driver_release(rpm);
+
+ return ret;
+}
+
+int i915_driver_suspend_switcheroo(struct drm_i915_private *i915,
+ pm_message_t state)
+{
+ int error;
+
+ if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
+ state.event != PM_EVENT_FREEZE))
+ return -EINVAL;
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ error = i915_drm_suspend(&i915->drm);
+ if (error)
+ return error;
+
+ return i915_drm_suspend_late(&i915->drm, false);
+}
+
+static int i915_drm_resume(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ int ret;
+
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ ret = intel_pcode_init(dev_priv);
+ if (ret)
+ return ret;
+
+ sanitize_gpu(dev_priv);
+
+ ret = i915_ggtt_enable_hw(dev_priv);
+ if (ret)
+ drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
+
+ i915_ggtt_resume(&dev_priv->ggtt);
+ /* Must be called after GGTT is resumed. */
+ intel_dpt_resume(dev_priv);
+
+ intel_dmc_ucode_resume(dev_priv);
+
+ i915_restore_display(dev_priv);
+ intel_pps_unlock_regs_wa(dev_priv);
+
+ intel_init_pch_refclk(dev_priv);
+
+ /*
+ * Interrupts have to be enabled before any batches are run. If not the
+ * GPU will hang. i915_gem_init_hw() will initiate batches to
+ * update/restore the context.
+ *
+ * drm_mode_config_reset() needs AUX interrupts.
+ *
+ * Modeset enabling in intel_modeset_init_hw() also needs working
+ * interrupts.
+ */
+ intel_runtime_pm_enable_interrupts(dev_priv);
+
+ if (HAS_DISPLAY(dev_priv))
+ drm_mode_config_reset(dev);
+
+ i915_gem_resume(dev_priv);
+
+ intel_modeset_init_hw(dev_priv);
+ intel_init_clock_gating(dev_priv);
+ intel_hpd_init(dev_priv);
+
+ /* MST sideband requires HPD interrupts enabled */
+ intel_dp_mst_resume(dev_priv);
+ intel_display_resume(dev);
+
+ intel_hpd_poll_disable(dev_priv);
+ if (HAS_DISPLAY(dev_priv))
+ drm_kms_helper_poll_enable(dev);
+
+ intel_opregion_resume(dev_priv);
+
+ intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
+
+ intel_power_domains_enable(dev_priv);
+
+ intel_gvt_resume(dev_priv);
+
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ return 0;
+}
+
+static int i915_drm_resume_early(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ int ret;
+
+ /*
+ * We have a resume ordering issue with the snd-hda driver also
+ * requiring our device to be power up. Due to the lack of a
+ * parent/child relationship we currently solve this with an early
+ * resume hook.
+ *
+ * FIXME: This should be solved with a special hdmi sink device or
+ * similar so that power domains can be employed.
+ */
+
+ /*
+ * Note that we need to set the power state explicitly, since we
+ * powered off the device during freeze and the PCI core won't power
+ * it back up for us during thaw. Powering off the device during
+ * freeze is not a hard requirement though, and during the
+ * suspend/resume phases the PCI core makes sure we get here with the
+ * device powered on. So in case we change our freeze logic and keep
+ * the device powered we can also remove the following set power state
+ * call.
+ */
+ ret = pci_set_power_state(pdev, PCI_D0);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "failed to set PCI D0 power state (%d)\n", ret);
+ return ret;
+ }
+
+ /*
+ * Note that pci_enable_device() first enables any parent bridge
+ * device and only then sets the power state for this device. The
+ * bridge enabling is a nop though, since bridge devices are resumed
+ * first. The order of enabling power and enabling the device is
+ * imposed by the PCI core as described above, so here we preserve the
+ * same order for the freeze/thaw phases.
+ *
+ * TODO: eventually we should remove pci_disable_device() /
+ * pci_enable_enable_device() from suspend/resume. Due to how they
+ * depend on the device enable refcount we can't anyway depend on them
+ * disabling/enabling the device.
+ */
+ if (pci_enable_device(pdev))
+ return -EIO;
+
+ pci_set_master(pdev);
+
+ pci_d3cold_enable(pdev);
+
+ disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ ret = vlv_resume_prepare(dev_priv, false);
+ if (ret)
+ drm_err(&dev_priv->drm,
+ "Resume prepare failed: %d, continuing anyway\n", ret);
+
+ intel_uncore_resume_early(&dev_priv->uncore);
+
- intel_gt_runtime_suspend(&dev_priv->gt);
++ intel_gt_check_and_clear_faults(to_gt(dev_priv));
+
+ intel_display_power_resume_early(dev_priv);
+
+ intel_power_domains_resume(dev_priv);
+
+ enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+
+ return ret;
+}
+
+int i915_driver_resume_switcheroo(struct drm_i915_private *i915)
+{
+ int ret;
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ ret = i915_drm_resume_early(&i915->drm);
+ if (ret)
+ return ret;
+
+ return i915_drm_resume(&i915->drm);
+}
+
+static int i915_pm_prepare(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (!i915) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_prepare(&i915->drm);
+}
+
+static int i915_pm_suspend(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (!i915) {
+ dev_err(kdev, "DRM not initialized, aborting suspend.\n");
+ return -ENODEV;
+ }
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend(&i915->drm);
+}
+
+static int i915_pm_suspend_late(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ /*
+ * We have a suspend ordering issue with the snd-hda driver also
+ * requiring our device to be power up. Due to the lack of a
+ * parent/child relationship we currently solve this with an late
+ * suspend hook.
+ *
+ * FIXME: This should be solved with a special hdmi sink device or
+ * similar so that power domains can be employed.
+ */
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_late(&i915->drm, false);
+}
+
+static int i915_pm_poweroff_late(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_suspend_late(&i915->drm, true);
+}
+
+static int i915_pm_resume_early(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_resume_early(&i915->drm);
+}
+
+static int i915_pm_resume(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+
+ if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
+ return 0;
+
+ return i915_drm_resume(&i915->drm);
+}
+
+/* freeze: before creating the hibernation_image */
+static int i915_pm_freeze(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ int ret;
+
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend(&i915->drm);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_freeze(i915);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int i915_pm_freeze_late(struct device *kdev)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ int ret;
+
+ if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
+ ret = i915_drm_suspend_late(&i915->drm, true);
+ if (ret)
+ return ret;
+ }
+
+ ret = i915_gem_freeze_late(i915);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* thaw: called after creating the hibernation image, but before turning off. */
+static int i915_pm_thaw_early(struct device *kdev)
+{
+ return i915_pm_resume_early(kdev);
+}
+
+static int i915_pm_thaw(struct device *kdev)
+{
+ return i915_pm_resume(kdev);
+}
+
+/* restore: called after loading the hibernation image. */
+static int i915_pm_restore_early(struct device *kdev)
+{
+ return i915_pm_resume_early(kdev);
+}
+
+static int i915_pm_restore(struct device *kdev)
+{
+ return i915_pm_resume(kdev);
+}
+
+static int intel_runtime_suspend(struct device *kdev)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ int ret;
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
+ return -ENODEV;
+
+ drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
+
+ disable_rpm_wakeref_asserts(rpm);
+
+ /*
+ * We are safe here against re-faults, since the fault handler takes
+ * an RPM reference.
+ */
+ i915_gem_runtime_suspend(dev_priv);
+
- intel_gt_runtime_resume(&dev_priv->gt);
++ intel_gt_runtime_suspend(to_gt(dev_priv));
+
+ intel_runtime_pm_disable_interrupts(dev_priv);
+
+ intel_uncore_suspend(&dev_priv->uncore);
+
+ intel_display_power_suspend(dev_priv);
+
+ ret = vlv_suspend_complete(dev_priv);
+ if (ret) {
+ drm_err(&dev_priv->drm,
+ "Runtime suspend failed, disabling it (%d)\n", ret);
+ intel_uncore_runtime_resume(&dev_priv->uncore);
+
+ intel_runtime_pm_enable_interrupts(dev_priv);
+
- intel_gt_runtime_resume(&dev_priv->gt);
++ intel_gt_runtime_resume(to_gt(dev_priv));
+
+ enable_rpm_wakeref_asserts(rpm);
+
+ return ret;
+ }
+
+ enable_rpm_wakeref_asserts(rpm);
+ intel_runtime_pm_driver_release(rpm);
+
+ if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
+ drm_err(&dev_priv->drm,
+ "Unclaimed access detected prior to suspending\n");
+
+ /*
+ * FIXME: Temporary hammer to avoid freezing the machine on our DGFX
+ * This should be totally removed when we handle the pci states properly
+ * on runtime PM and on s2idle cases.
+ */
+ pci_d3cold_disable(pdev);
+ rpm->suspended = true;
+
+ /*
+ * FIXME: We really should find a document that references the arguments
+ * used below!
+ */
+ if (IS_BROADWELL(dev_priv)) {
+ /*
+ * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
+ * being detected, and the call we do at intel_runtime_resume()
+ * won't be able to restore them. Since PCI_D3hot matches the
+ * actual specification and appears to be working, use it.
+ */
+ intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
+ } else {
+ /*
+ * current versions of firmware which depend on this opregion
+ * notification have repurposed the D1 definition to mean
+ * "runtime suspended" vs. what you would normally expect (D3)
+ * to distinguish it from notifications that might be sent via
+ * the suspend path.
+ */
+ intel_opregion_notify_adapter(dev_priv, PCI_D1);
+ }
+
+ assert_forcewakes_inactive(&dev_priv->uncore);
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ intel_hpd_poll_enable(dev_priv);
+
+ drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
+ return 0;
+}
+
+static int intel_runtime_resume(struct device *kdev)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
+ int ret;
+
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
+ return -ENODEV;
+
+ drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
+
+ drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
+ disable_rpm_wakeref_asserts(rpm);
+
+ intel_opregion_notify_adapter(dev_priv, PCI_D0);
+ rpm->suspended = false;
+ pci_d3cold_enable(pdev);
+ if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
+ drm_dbg(&dev_priv->drm,
+ "Unclaimed access during suspend, bios?\n");
+
+ intel_display_power_resume(dev_priv);
+
+ ret = vlv_resume_prepare(dev_priv, true);
+
+ intel_uncore_runtime_resume(&dev_priv->uncore);
+
+ intel_runtime_pm_enable_interrupts(dev_priv);
+
+ /*
+ * No point of rolling back things in case of an error, as the best
+ * we can do is to hope that things will still work (and disable RPM).
+ */
++ intel_gt_runtime_resume(to_gt(dev_priv));
+
+ /*
+ * On VLV/CHV display interrupts are part of the display
+ * power well, so hpd is reinitialized from there. For
+ * everyone else do it here.
+ */
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
+ intel_hpd_init(dev_priv);
+ intel_hpd_poll_disable(dev_priv);
+ }
+
+ intel_enable_ipc(dev_priv);
+
+ enable_rpm_wakeref_asserts(rpm);
+
+ if (ret)
+ drm_err(&dev_priv->drm,
+ "Runtime resume failed, disabling it (%d)\n", ret);
+ else
+ drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
+
+ return ret;
+}
+
+const struct dev_pm_ops i915_pm_ops = {
+ /*
+ * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
+ * PMSG_RESUME]
+ */
+ .prepare = i915_pm_prepare,
+ .suspend = i915_pm_suspend,
+ .suspend_late = i915_pm_suspend_late,
+ .resume_early = i915_pm_resume_early,
+ .resume = i915_pm_resume,
+
+ /*
+ * S4 event handlers
+ * @freeze, @freeze_late : called (1) before creating the
+ * hibernation image [PMSG_FREEZE] and
+ * (2) after rebooting, before restoring
+ * the image [PMSG_QUIESCE]
+ * @thaw, @thaw_early : called (1) after creating the hibernation
+ * image, before writing it [PMSG_THAW]
+ * and (2) after failing to create or
+ * restore the image [PMSG_RECOVER]
+ * @poweroff, @poweroff_late: called after writing the hibernation
+ * image, before rebooting [PMSG_HIBERNATE]
+ * @restore, @restore_early : called after rebooting and restoring the
+ * hibernation image [PMSG_RESTORE]
+ */
+ .freeze = i915_pm_freeze,
+ .freeze_late = i915_pm_freeze_late,
+ .thaw_early = i915_pm_thaw_early,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_suspend,
+ .poweroff_late = i915_pm_poweroff_late,
+ .restore_early = i915_pm_restore_early,
+ .restore = i915_pm_restore,
+
+ /* S0ix (via runtime suspend) event handlers */
+ .runtime_suspend = intel_runtime_suspend,
+ .runtime_resume = intel_runtime_resume,
+};
+
+static const struct file_operations i915_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release_noglobal,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = i915_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .compat_ioctl = i915_ioc32_compat_ioctl,
+ .llseek = noop_llseek,
+};
+
+static int
+i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return -ENODEV;
+}
+
+static const struct drm_ioctl_desc i915_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, drm_invalid_op, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE_EXT, i915_gem_create_ext_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_OFFSET, i915_gem_mmap_offset_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
+};
+
+static const struct drm_driver i915_drm_driver = {
+ /* Don't use MTRRs here; the Xserver or userspace app should
+ * deal with them for Intel hardware.
+ */
+ .driver_features =
+ DRIVER_GEM |
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ |
+ DRIVER_SYNCOBJ_TIMELINE,
+ .release = i915_driver_release,
+ .open = i915_driver_open,
+ .lastclose = i915_driver_lastclose,
+ .postclose = i915_driver_postclose,
+
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = i915_gem_prime_import,
+
+ .dumb_create = i915_gem_dumb_create,
+ .dumb_map_offset = i915_gem_dumb_mmap_offset,
+
+ .ioctls = i915_ioctls,
+ .num_ioctls = ARRAY_SIZE(i915_ioctls),
+ .fops = &i915_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
#include <linux/stackdepot.h>
#include <linux/xarray.h>
-#include <drm/intel-gtt.h>
#include <drm/drm_gem.h>
#include <drm/drm_auth.h>
#include <drm/drm_cache.h>
#include "intel_device_info.h"
#include "intel_memory_region.h"
#include "intel_pch.h"
+#include "intel_pm_types.h"
#include "intel_runtime_pm.h"
#include "intel_step.h"
#include "intel_uncore.h"
struct drm_i915_gem_object;
-enum hpd_pin {
- HPD_NONE = 0,
- HPD_TV = HPD_NONE, /* TV is known to be unreliable */
- HPD_CRT,
- HPD_SDVO_B,
- HPD_SDVO_C,
- HPD_PORT_A,
- HPD_PORT_B,
- HPD_PORT_C,
- HPD_PORT_D,
- HPD_PORT_E,
- HPD_PORT_TC1,
- HPD_PORT_TC2,
- HPD_PORT_TC3,
- HPD_PORT_TC4,
- HPD_PORT_TC5,
- HPD_PORT_TC6,
-
- HPD_NUM_PINS
-};
-
-#define for_each_hpd_pin(__pin) \
- for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
-
/* Threshold == 5 for long IRQs, 50 for short */
#define HPD_STORM_DEFAULT_THRESHOLD 50
void (*read_luts)(struct intel_crtc_state *crtc_state);
};
-struct intel_audio_funcs {
- void (*audio_codec_enable)(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- void (*audio_codec_disable)(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
-};
-
struct intel_cdclk_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config);
void (*commit_modeset_enables)(struct intel_atomic_state *state);
};
-
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-struct intel_fbc {
- /* This is always the inner lock when overlapping with struct_mutex and
- * it's the outer lock when overlapping with stolen_lock. */
- struct mutex lock;
- unsigned int possible_framebuffer_bits;
- unsigned int busy_bits;
- struct intel_crtc *crtc;
-
- struct drm_mm_node compressed_fb;
- struct drm_mm_node compressed_llb;
-
- u8 limit;
-
- bool false_color;
-
- bool active;
- bool activated;
- bool flip_pending;
-
- bool underrun_detected;
- struct work_struct underrun_work;
-
- /*
- * Due to the atomic rules we can't access some structures without the
- * appropriate locking, so we cache information here in order to avoid
- * these problems.
- */
- struct intel_fbc_state_cache {
- struct {
- unsigned int mode_flags;
- u32 hsw_bdw_pixel_rate;
- } crtc;
-
- struct {
- unsigned int rotation;
- int src_w;
- int src_h;
- bool visible;
- /*
- * Display surface base address adjustement for
- * pageflips. Note that on gen4+ this only adjusts up
- * to a tile, offsets within a tile are handled in
- * the hw itself (with the TILEOFF register).
- */
- int adjusted_x;
- int adjusted_y;
-
- u16 pixel_blend_mode;
- } plane;
-
- struct {
- const struct drm_format_info *format;
- unsigned int stride;
- u64 modifier;
- } fb;
-
- unsigned int fence_y_offset;
- u16 interval;
- s8 fence_id;
- bool psr2_active;
- } state_cache;
-
- /*
- * This structure contains everything that's relevant to program the
- * hardware registers. When we want to figure out if we need to disable
- * and re-enable FBC for a new configuration we just check if there's
- * something different in the struct. The genx_fbc_activate functions
- * are supposed to read from it in order to program the registers.
- */
- struct intel_fbc_reg_params {
- struct {
- enum pipe pipe;
- enum i9xx_plane_id i9xx_plane;
- } crtc;
-
- struct {
- const struct drm_format_info *format;
- unsigned int stride;
- u64 modifier;
- } fb;
-
- unsigned int cfb_stride;
- unsigned int cfb_size;
- unsigned int fence_y_offset;
- u16 override_cfb_stride;
- u16 interval;
- s8 fence_id;
- bool plane_visible;
- } params;
-
- const char *no_fbc_reason;
-};
-
/*
* HIGH_RR is the highest eDP panel refresh rate read from EDID
* LOW_RR is the lowest eDP panel refresh rate found from EDID
#define QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK (1<<8)
struct intel_fbdev;
-struct intel_fbc_work;
struct intel_gmbus {
struct i2c_adapter adapter;
* List of objects which are pending destruction.
*/
struct llist_head free_list;
- struct work_struct free_work;
+ struct delayed_work free_work;
/**
* Count of objects pending destructions. Used to skip needlessly
* waiting on an RCU barrier if no objects are waiting to be freed.
struct sdvo_device_mapping sdvo_mappings[2];
};
-enum intel_ddb_partitioning {
- INTEL_DDB_PART_1_2,
- INTEL_DDB_PART_5_6, /* IVB+ */
-};
-
-struct ilk_wm_values {
- u32 wm_pipe[3];
- u32 wm_lp[3];
- u32 wm_lp_spr[3];
- bool enable_fbc_wm;
- enum intel_ddb_partitioning partitioning;
-};
-
-struct g4x_pipe_wm {
- u16 plane[I915_MAX_PLANES];
- u16 fbc;
-};
-
-struct g4x_sr_wm {
- u16 plane;
- u16 cursor;
- u16 fbc;
-};
-
-struct vlv_wm_ddl_values {
- u8 plane[I915_MAX_PLANES];
-};
-
-struct vlv_wm_values {
- struct g4x_pipe_wm pipe[3];
- struct g4x_sr_wm sr;
- struct vlv_wm_ddl_values ddl[3];
- u8 level;
- bool cxsr;
-};
-
-struct g4x_wm_values {
- struct g4x_pipe_wm pipe[2];
- struct g4x_sr_wm sr;
- struct g4x_sr_wm hpll;
- bool cxsr;
- bool hpll_en;
- bool fbc_en;
-};
-
-struct skl_ddb_entry {
- u16 start, end; /* in number of blocks, 'end' is exclusive */
-};
-
-static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
-{
- return entry->end - entry->start;
-}
-
-static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
- const struct skl_ddb_entry *e2)
-{
- if (e1->start == e2->start && e1->end == e2->end)
- return true;
-
- return false;
-}
-
struct i915_frontbuffer_tracking {
spinlock_t lock;
struct ida mock_region_instances;
};
+/* intel_audio.c private */
+struct intel_audio_funcs;
+struct intel_audio_private {
+ /* Display internal audio functions */
+ const struct intel_audio_funcs *funcs;
+
+ /* hda/i915 audio component */
+ struct i915_audio_component *component;
+ bool component_registered;
+ /* mutex for audio/video sync */
+ struct mutex mutex;
+ int power_refcount;
+ u32 freq_cntrl;
+
+ /* Used to save the pipe-to-encoder mapping for audio */
+ struct intel_encoder *encoder_map[I915_MAX_PIPES];
+
+ /* necessary resource sharing with HDMI LPE audio driver. */
+ struct {
+ struct platform_device *platdev;
+ int irq;
+ } lpe;
+};
+
struct drm_i915_private {
struct drm_device drm;
u32 pipestat_irq_mask[I915_MAX_PIPES];
struct i915_hotplug hotplug;
- struct intel_fbc fbc;
+ struct intel_fbc *fbc;
struct i915_drrs drrs;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
/* Display internal color functions */
const struct intel_color_funcs *color_funcs;
- /* Display internal audio functions */
- const struct intel_audio_funcs *audio_funcs;
-
/* Display CDCLK functions */
const struct intel_cdclk_funcs *cdclk_funcs;
/* Kernel Modesetting */
- struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
- struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
-
/**
* dpll and cdclk state is protected by connection_mutex
* dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
- /* hda/i915 audio component */
- struct i915_audio_component *audio_component;
- bool audio_component_registered;
- /**
- * av_mutex - mutex for audio/video sync
- *
- */
- struct mutex av_mutex;
- int audio_power_refcount;
- u32 audio_freq_cntrl;
-
u32 fdi_rx_config;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
struct i915_perf perf;
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
- struct intel_gt gt;
+ struct intel_gt gt0;
struct {
struct i915_gem_contexts {
bool ipc_enabled;
- /* Used to save the pipe-to-encoder mapping for audio */
- struct intel_encoder *av_enc_map[I915_MAX_PIPES];
-
- /* necessary resource sharing with HDMI LPE audio driver. */
- struct {
- struct platform_device *platdev;
- int irq;
- } lpe_audio;
+ struct intel_audio_private audio;
struct i915_pmu pmu;
return pci_get_drvdata(pdev);
}
+ static inline struct intel_gt *to_gt(struct drm_i915_private *i915)
+ {
+ return &i915->gt0;
+ }
+
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
#define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
-#define IS_CANNONLAKE(dev_priv) 0
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
#define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
#define IS_DG2_G11(dev_priv) \
IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
+#define IS_ADLS_RPLS(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev_priv) \
(IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
IS_GRAPHICS_STEP(__i915, since, until))
-#define IS_DG2_DISP_STEP(__i915, since, until) \
+#define IS_DG2_DISPLAY_STEP(__i915, since, until) \
(IS_DG2(__i915) && \
IS_DISPLAY_STEP(__i915, since, until))
#define HAS_PSR_HW_TRACKING(dev_priv) \
(INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12)
-#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
+#define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
#define HAS_MSLICES(dev_priv) \
(INTEL_INFO(dev_priv)->has_mslices)
+ /*
+ * Set this flag, when platform requires 64K GTT page sizes or larger for
+ * device local memory access. Also this flag implies that we require or
+ * at least support the compact PT layout for the ppGTT when using the 64K
+ * GTT pages.
+ */
+ #define HAS_64K_PAGES(dev_priv) (INTEL_INFO(dev_priv)->has_64k_pages)
+
#define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc)
#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
#define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
INTEL_INFO(dev_priv)->has_pxp) && \
- VDBOX_MASK(&dev_priv->gt))
+ VDBOX_MASK(to_gt(dev_priv)))
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
#define GT_FREQUENCY_MULTIPLIER 50
#define GEN9_FREQ_SCALER 3
-#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
+#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->display.pipe_mask))
-#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
+#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0)
-#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12)
+#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11)
#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915);
}
-/* i915_drv.c */
-extern const struct dev_pm_ops i915_pm_ops;
-
-int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-void i915_driver_remove(struct drm_i915_private *i915);
-void i915_driver_shutdown(struct drm_i915_private *i915);
-
-int i915_resume_switcheroo(struct drm_i915_private *i915);
-int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
-
+/* i915_getparam.c */
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
* armed the work again.
*/
while (atomic_read(&i915->mm.free_count)) {
- flush_work(&i915->mm.free_work);
+ flush_delayed_work(&i915->mm.free_work);
flush_delayed_work(&i915->bdev.wq);
rcu_barrier();
}
const struct i915_ggtt_view *view,
u64 size, u64 alignment, u64 flags);
- static inline struct i915_vma * __must_check
+ struct i915_vma * __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view,
- u64 size, u64 alignment, u64 flags)
- {
- return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags);
- }
+ u64 size, u64 alignment, u64 flags);
int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
unsigned long flags);
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
-/* i915_mm.c */
-int remap_io_mapping(struct vm_area_struct *vma,
- unsigned long addr, unsigned long pfn, unsigned long size,
- struct io_mapping *iomap);
-int remap_io_sg(struct vm_area_struct *vma,
- unsigned long addr, unsigned long size,
- struct scatterlist *sgl, resource_size_t iobase);
-
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
if (GRAPHICS_VER(i915) >= 11)
#include <drm/drm_drv.h>
#include "display/intel_de.h"
+#include "display/intel_display_trace.h"
#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
#include "display/intel_hotplug.h"
#include "i915_drv.h"
#include "i915_irq.h"
-#include "i915_trace.h"
#include "intel_pm.h"
/**
static void
intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
drm_crtc_handle_vblank(&crtc->base);
}
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), l3_parity.error_work);
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
u32 error_status, row, bank, subbank;
char *parity_event[6];
u32 misccpctl;
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
static void flip_done_handler(struct drm_i915_private *i915,
enum pipe pipe)
{
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
+ struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
struct drm_crtc_state *crtc_state = crtc->base.state;
struct drm_pending_vblank_event *e = crtc_state->event;
struct drm_device *dev = &i915->drm;
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
if (gt_iir)
- gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
+ gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
if (pm_iir)
- gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
+ gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
+ gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
}
if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
- gen5_rps_irq_handler(&dev_priv->gt.rps);
+ gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
}
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
if (gt_iir) {
raw_reg_write(regs, GTIIR, gt_iir);
if (GRAPHICS_VER(i915) >= 6)
- gen6_gt_irq_handler(&i915->gt, gt_iir);
+ gen6_gt_irq_handler(to_gt(i915), gt_iir);
else
- gen5_gt_irq_handler(&i915->gt, gt_iir);
+ gen5_gt_irq_handler(to_gt(i915), gt_iir);
ret = IRQ_HANDLED;
}
u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
if (pm_iir) {
raw_reg_write(regs, GEN6_PMIIR, pm_iir);
- gen6_rps_irq_handler(&i915->gt.rps, pm_iir);
+ gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
ret = IRQ_HANDLED;
}
}
}
/* Find, queue (onto bottom-halves), then clear each source */
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
+ gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
{
struct drm_i915_private *i915 = arg;
void __iomem * const regs = i915->uncore.regs;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
u32 master_ctl;
u32 gu_misc_iir;
static irqreturn_t dg1_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = arg;
- struct intel_gt *gt = &i915->gt;
+ struct intel_gt *gt = to_gt(i915);
void __iomem * const regs = gt->uncore->regs;
u32 master_tile_ctl, master_ctl;
u32 gu_misc_iir;
if (IS_CHERRYVIEW(dev_priv))
intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
- intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK);
+ intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
}
- gen5_gt_irq_reset(&dev_priv->gt);
+ gen5_gt_irq_reset(to_gt(dev_priv));
ibx_irq_reset(dev_priv);
}
intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
- gen5_gt_irq_reset(&dev_priv->gt);
+ gen5_gt_irq_reset(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
gen8_master_intr_disable(dev_priv->uncore.regs);
- gen8_gt_irq_reset(&dev_priv->gt);
+ gen8_gt_irq_reset(to_gt(dev_priv));
gen8_display_irq_reset(dev_priv);
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
static void gen11_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
gen11_master_intr_disable(dev_priv->uncore.regs);
static void dg1_irq_reset(struct drm_i915_private *dev_priv)
{
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
dg1_master_intr_disable(dev_priv->uncore.regs);
intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
- gen8_gt_irq_reset(&dev_priv->gt);
+ gen8_gt_irq_reset(to_gt(dev_priv));
GEN3_IRQ_RESET(uncore, GEN8_PCU_);
ibx_irq_postinstall(dev_priv);
- gen5_gt_irq_postinstall(&dev_priv->gt);
+ gen5_gt_irq_postinstall(to_gt(dev_priv));
GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
display_mask | extra_mask);
static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- gen5_gt_irq_postinstall(&dev_priv->gt);
+ gen5_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
else if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev_priv);
- gen8_gt_irq_postinstall(&dev_priv->gt);
+ gen8_gt_irq_postinstall(to_gt(dev_priv));
gen8_de_irq_postinstall(dev_priv);
gen8_master_intr_enable(dev_priv->uncore.regs);
static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
{
- struct intel_gt *gt = &dev_priv->gt;
+ struct intel_gt *gt = to_gt(dev_priv);
struct intel_uncore *uncore = gt->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
{
- gen8_gt_irq_postinstall(&dev_priv->gt);
+ gen8_gt_irq_postinstall(to_gt(dev_priv));
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[RCS0], iir);
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[RCS0],
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
iir);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_cs_irq(dev_priv->gt.engine[VCS0],
+ intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
iir >> 25);
if (iir & I915_MASTER_ERROR_INTERRUPT)
/* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
- dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
+ to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
if (!HAS_DISPLAY(dev_priv))
return;
*
*/
-#include <linux/vga_switcheroo.h>
-
#include <drm/drm_drv.h>
#include <drm/i915_pciids.h>
+#include "i915_driver.h"
#include "i915_drv.h"
#include "i915_pci.h"
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
DRM_COLOR_LUT_EQUAL_CHANNELS, \
}
+#define ICL_COLORS \
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ }
/* Keep in gen based order, and chronological order within a gen */
#define I830_FEATURES \
GEN(2), \
.is_mobile = 1, \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_overlay = 1, \
.display.cursor_needs_physical = 1, \
.display.overlay_needs_physical = 1, \
#define I845_FEATURES \
GEN(2), \
- .pipe_mask = BIT(PIPE_A), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A), \
+ .display.pipe_mask = BIT(PIPE_A), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A), \
.display.has_overlay = 1, \
.display.overlay_needs_physical = 1, \
.display.has_gmch = 1, \
#define GEN3_FEATURES \
GEN(3), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
.platform_engine_mask = BIT(RCS0), \
#define GEN4_FEATURES \
GEN(4), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
#define GEN5_FEATURES \
GEN(5), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
#define GEN6_FEATURES \
GEN(6), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
#define GEN7_FEATURES \
GEN(7), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
GEN7_FEATURES,
PLATFORM(INTEL_IVYBRIDGE),
.gt = 2,
- .pipe_mask = 0, /* legal, last one wins */
- .cpu_transcoder_mask = 0,
+ .display.pipe_mask = 0, /* legal, last one wins */
+ .display.cpu_transcoder_mask = 0,
.has_l3_dpf = 1,
};
PLATFORM(INTEL_VALLEYVIEW),
GEN(7),
.is_lp = 1,
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B),
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B),
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_reset_engine = true,
#define G75_FEATURES \
GEN7_FEATURES, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
static const struct intel_device_info chv_info = {
PLATFORM(INTEL_CHERRYVIEW),
GEN(8),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C),
.display.has_hotplug = 1,
.is_lp = 1,
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.dbuf.slice_mask = BIT(DBUF_S1), \
.display.has_hotplug = 1, \
.platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_A) | BIT(TRANSCODER_DSI_C), \
.has_64bit_reloc = 1, \
#define GEN11_FEATURES \
GEN9_FEATURES, \
GEN11_DEFAULT_PAGE_SIZES, \
- .abox_mask = BIT(0), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .display.abox_mask = BIT(0), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
[TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
GEN(11), \
- .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \
+ ICL_COLORS, \
.dbuf.size = 2048, \
.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
.display.has_dsc = 1, \
#define GEN12_FEATURES \
GEN11_FEATURES, \
GEN(12), \
- .abox_mask = GENMASK(2, 1), \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
+ .display.abox_mask = GENMASK(2, 1), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | \
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), \
.pipe_offsets = { \
TGL_CURSOR_OFFSETS, \
.has_global_mocs = 1, \
.has_pxp = 1, \
- .display.has_dsb = 1
+ .display.has_dsb = 0 /* FIXME: LUT load is broken with DSB */
static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
static const struct intel_device_info rkl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ROCKETLAKE),
- .abox_mask = BIT(0),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ .display.abox_mask = BIT(0),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
BIT(TRANSCODER_C),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
DGFX_FEATURES,
.graphics.rel = 10,
PLATFORM(INTEL_DG1),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.require_force_probe = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) |
static const struct intel_device_info adl_s_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ALDERLAKE_S),
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
.platform_engine_mask =
}
#define XE_LPD_FEATURES \
- .abox_mask = GENMASK(1, 0), \
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 0 }, \
- .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
- BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \
+ .display.abox_mask = GENMASK(1, 0), \
+ .color = { .degamma_lut_size = 128, .gamma_lut_size = 1024, \
+ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
+ DRM_COLOR_LUT_EQUAL_CHANNELS, \
+ }, \
.dbuf.size = 4096, \
.dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \
BIT(DBUF_S4), \
.display.has_ipc = 1, \
.display.has_psr = 1, \
.display.ver = 13, \
- .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
+ .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
[TRANSCODER_C] = PIPE_C_OFFSET, \
[TRANSCODER_D] = PIPE_D_OFFSET, \
+ [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \
}, \
.trans_offsets = { \
[TRANSCODER_A] = TRANSCODER_A_OFFSET, \
[TRANSCODER_B] = TRANSCODER_B_OFFSET, \
[TRANSCODER_C] = TRANSCODER_C_OFFSET, \
[TRANSCODER_D] = TRANSCODER_D_OFFSET, \
+ [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \
+ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
}, \
XE_LPD_CURSOR_OFFSETS
GEN12_FEATURES,
XE_LPD_FEATURES,
PLATFORM(INTEL_ALDERLAKE_P),
- .require_force_probe = 1,
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D) |
+ BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1),
.display.has_cdclk_crawl = 1,
.display.has_modular_fia = 1,
.display.has_psr_hw_tracking = 0,
DGFX_FEATURES,
PLATFORM(INTEL_XEHPSDV),
.display = { },
- .pipe_mask = 0,
+ .has_64k_pages = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
.graphics.rel = 55,
.media.rel = 55,
PLATFORM(INTEL_DG2),
+ .has_64k_pages = 1,
.platform_engine_mask =
BIT(RCS0) | BIT(BCS0) |
BIT(VECS0) | BIT(VECS1) |
BIT(VCS0) | BIT(VCS2),
.require_force_probe = 1,
+ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
+ BIT(TRANSCODER_C) | BIT(TRANSCODER_D),
};
#undef PLATFORM
INTEL_ADLS_IDS(&adl_s_info),
INTEL_ADLP_IDS(&adl_p_info),
INTEL_DG1_IDS(&dg1_info),
+ INTEL_RPLS_IDS(&adl_s_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
if (PCI_FUNC(pdev->devfn))
return -ENODEV;
- /*
- * apple-gmux is needed on dual GPU MacBook Pro
- * to probe the panel if we're the inactive GPU.
- */
- if (vga_switcheroo_client_probe_defer(pdev))
+ /* Detect if we need to wait for other drivers early on */
+ if (intel_modeset_probe_defer(pdev))
return -EPROBE_DEFER;
err = i915_driver_probe(pdev, ent);
#define VLV_G3DCTL _MMIO(0x9024)
#define VLV_GSCKGCTL _MMIO(0x9028)
+#define FBC_LLC_READ_CTRL _MMIO(0x9044)
+#define FBC_LLC_FULLY_OPEN REG_BIT(30)
+
#define GEN6_MBCTL _MMIO(0x0907c)
#define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4)
#define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3)
#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9)
#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8)
#define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5)
+#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0)
#define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008)
#define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31)
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
#define FPGA_DBG _MMIO(0x42300)
-#define FPGA_DBG_RM_NOCLAIM (1 << 31)
+#define FPGA_DBG_RM_NOCLAIM REG_BIT(31)
#define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028)
-#define CLAIM_ER_CLR (1 << 31)
-#define CLAIM_ER_OVERFLOW (1 << 16)
-#define CLAIM_ER_CTR_MASK 0xffff
+#define CLAIM_ER_CLR REG_BIT(31)
+#define CLAIM_ER_OVERFLOW REG_BIT(16)
+#define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0)
#define DERRMR _MMIO(0x44050)
/* Note that HBLANK events are reserved on bdw+ */
#define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */
#define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */
#define FBC_CONTROL _MMIO(0x3208)
-#define FBC_CTL_EN REG_BIT(31)
-#define FBC_CTL_PERIODIC REG_BIT(30)
-#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
-#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
-#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
-#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
-#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm */
-#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
-#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
-#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
-#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
+#define FBC_CTL_EN REG_BIT(31)
+#define FBC_CTL_PERIODIC REG_BIT(30)
+#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16)
+#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x))
+#define FBC_CTL_STOP_ON_MOD REG_BIT(15)
+#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */
+#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */
+#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5)
+#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x))
+#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x))
#define FBC_COMMAND _MMIO(0x320c)
-#define FBC_CMD_COMPRESS (1 << 0)
+#define FBC_CMD_COMPRESS REG_BIT(0)
#define FBC_STATUS _MMIO(0x3210)
-#define FBC_STAT_COMPRESSING (1 << 31)
-#define FBC_STAT_COMPRESSED (1 << 30)
-#define FBC_STAT_MODIFIED (1 << 29)
-#define FBC_STAT_CURRENT_LINE_SHIFT (0)
-#define FBC_CONTROL2 _MMIO(0x3214)
-#define FBC_CTL_FENCE_DBL (0 << 4)
-#define FBC_CTL_IDLE_IMM (0 << 2)
-#define FBC_CTL_IDLE_FULL (1 << 2)
-#define FBC_CTL_IDLE_LINE (2 << 2)
-#define FBC_CTL_IDLE_DEBUG (3 << 2)
-#define FBC_CTL_CPU_FENCE (1 << 1)
-#define FBC_CTL_PLANE(plane) ((plane) << 0)
-#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */
-#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4)
+#define FBC_STAT_COMPRESSING REG_BIT(31)
+#define FBC_STAT_COMPRESSED REG_BIT(30)
+#define FBC_STAT_MODIFIED REG_BIT(29)
+#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0)
+#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */
+#define FBC_CTL_FENCE_DBL REG_BIT(4)
+#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2)
+#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0)
+#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1)
+#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2)
+#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3)
+#define FBC_CTL_CPU_FENCE_EN REG_BIT(1)
+#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0)
+#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane))
+#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */
+#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */
+#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1)
+#define FBC_MOD_NUM_VALID REG_BIT(0)
+#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */
+#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */
+#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0)
+#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1)
+#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2)
+#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3)
#define FBC_LL_SIZE (1536)
-#define FBC_LLC_READ_CTRL _MMIO(0x9044)
-#define FBC_LLC_FULLY_OPEN (1 << 30)
-
/* Framebuffer compression for GM45+ */
#define DPFC_CB_BASE _MMIO(0x3200)
+#define ILK_DPFC_CB_BASE _MMIO(0x43200)
#define DPFC_CONTROL _MMIO(0x3208)
-#define DPFC_CTL_EN (1 << 31)
-#define DPFC_CTL_PLANE(plane) ((plane) << 30)
-#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29)
-#define DPFC_CTL_FENCE_EN (1 << 29)
-#define IVB_DPFC_CTL_FENCE_EN (1 << 28)
-#define DPFC_CTL_PERSISTENT_MODE (1 << 25)
-#define DPFC_SR_EN (1 << 10)
-#define DPFC_CTL_LIMIT_1X (0 << 6)
-#define DPFC_CTL_LIMIT_2X (1 << 6)
-#define DPFC_CTL_LIMIT_4X (2 << 6)
+#define ILK_DPFC_CONTROL _MMIO(0x43208)
+#define DPFC_CTL_EN REG_BIT(31)
+#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */
+#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */
+#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */
+#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane))
+#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */
+#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */
+#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */
+#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */
+#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */
+#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6)
+#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0)
+#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1)
+#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2)
+#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0)
+#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence))
#define DPFC_RECOMP_CTL _MMIO(0x320c)
-#define DPFC_RECOMP_STALL_EN (1 << 27)
-#define DPFC_RECOMP_STALL_WM_SHIFT (16)
-#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000)
-#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0)
-#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f)
+#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
+#define DPFC_RECOMP_STALL_EN REG_BIT(27)
+#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16)
+#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0)
#define DPFC_STATUS _MMIO(0x3210)
-#define DPFC_INVAL_SEG_SHIFT (16)
-#define DPFC_INVAL_SEG_MASK (0x07ff0000)
-#define DPFC_COMP_SEG_SHIFT (0)
-#define DPFC_COMP_SEG_MASK (0x000007ff)
+#define ILK_DPFC_STATUS _MMIO(0x43210)
+#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16)
+#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0)
#define DPFC_STATUS2 _MMIO(0x3214)
+#define ILK_DPFC_STATUS2 _MMIO(0x43214)
+#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0)
#define DPFC_FENCE_YOFF _MMIO(0x3218)
-#define DPFC_CHICKEN _MMIO(0x3224)
-#define DPFC_HT_MODIFY (1 << 31)
-
-/* Framebuffer compression for Ironlake */
-#define ILK_DPFC_CB_BASE _MMIO(0x43200)
-#define ILK_DPFC_CONTROL _MMIO(0x43208)
-#define FBC_CTL_FALSE_COLOR (1 << 10)
-/* The bit 28-8 is reserved */
-#define DPFC_RESERVED (0x1FFFFF00)
-#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c)
-#define ILK_DPFC_STATUS _MMIO(0x43210)
-#define ILK_DPFC_COMP_SEG_MASK 0x7ff
-#define IVB_FBC_STATUS2 _MMIO(0x43214)
-#define IVB_FBC_COMP_SEG_MASK 0x7ff
-#define BDW_FBC_COMP_SEG_MASK 0xfff
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
+#define DPFC_CHICKEN _MMIO(0x3224)
#define ILK_DPFC_CHICKEN _MMIO(0x43224)
-#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
-#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14)
-#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
+#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */
+#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */
+#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */
+#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */
+
#define GLK_FBC_STRIDE _MMIO(0x43228)
#define FBC_STRIDE_OVERRIDE REG_BIT(15)
#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
+
#define ILK_FBC_RT_BASE _MMIO(0x2128)
-#define ILK_FBC_RT_VALID (1 << 0)
-#define SNB_FBC_FRONT_BUFFER (1 << 1)
+#define ILK_FBC_RT_VALID REG_BIT(0)
+#define SNB_FBC_FRONT_BUFFER REG_BIT(1)
#define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000)
#define ILK_FBCQ_DIS (1 << 22)
* The following two registers are of type GTTMMADR
*/
#define SNB_DPFC_CTL_SA _MMIO(0x100100)
-#define SNB_CPU_FENCE_ENABLE (1 << 29)
-#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
+#define SNB_DPFC_FENCE_EN REG_BIT(29)
+#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0)
+#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence))
+#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104)
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE _MMIO(0x7020)
#define IPS_ENABLE (1 << 31)
#define MSG_FBC_REND_STATE _MMIO(0x50380)
-#define FBC_REND_NUKE (1 << 2)
-#define FBC_REND_CACHE_CLEAN (1 << 1)
+#define FBC_REND_NUKE REG_BIT(2)
+#define FBC_REND_CACHE_CLEAN REG_BIT(1)
/*
* GPIO regs
/* Pipe A CRC regs */
#define _PIPE_CRC_CTL_A 0x60050
-#define PIPE_CRC_ENABLE (1 << 31)
+#define PIPE_CRC_ENABLE REG_BIT(31)
/* skl+ source selection */
-#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28)
-#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28)
-#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28)
-#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28)
-#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28)
-#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28)
-#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28)
-#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28)
+#define PIPE_CRC_SOURCE_MASK_SKL REG_GENMASK(30, 28)
+#define PIPE_CRC_SOURCE_PLANE_1_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 0)
+#define PIPE_CRC_SOURCE_PLANE_2_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 2)
+#define PIPE_CRC_SOURCE_DMUX_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 4)
+#define PIPE_CRC_SOURCE_PLANE_3_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 6)
+#define PIPE_CRC_SOURCE_PLANE_4_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 7)
+#define PIPE_CRC_SOURCE_PLANE_5_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 5)
+#define PIPE_CRC_SOURCE_PLANE_6_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 3)
+#define PIPE_CRC_SOURCE_PLANE_7_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 1)
/* ivb+ source selection */
-#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
-#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
-#define PIPE_CRC_SOURCE_PF_IVB (2 << 29)
+#define PIPE_CRC_SOURCE_MASK_IVB REG_GENMASK(30, 29)
+#define PIPE_CRC_SOURCE_PRIMARY_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 0)
+#define PIPE_CRC_SOURCE_SPRITE_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 1)
+#define PIPE_CRC_SOURCE_PF_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 2)
/* ilk+ source selection */
-#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28)
-#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28)
-#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28)
-/* embedded DP port on the north display block, reserved on ivb */
-#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28)
-#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */
+#define PIPE_CRC_SOURCE_MASK_ILK REG_GENMASK(30, 28)
+#define PIPE_CRC_SOURCE_PRIMARY_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 0)
+#define PIPE_CRC_SOURCE_SPRITE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 1)
+#define PIPE_CRC_SOURCE_PIPE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 2)
+/* embedded DP port on the north display block */
+#define PIPE_CRC_SOURCE_PORT_A_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 4)
+#define PIPE_CRC_SOURCE_FDI_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 5)
/* vlv source selection */
-#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27)
-#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27)
-#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27)
+#define PIPE_CRC_SOURCE_MASK_VLV REG_GENMASK(30, 27)
+#define PIPE_CRC_SOURCE_PIPE_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 0)
+#define PIPE_CRC_SOURCE_HDMIB_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 1)
+#define PIPE_CRC_SOURCE_HDMIC_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 2)
/* with DP port the pipe source is invalid */
-#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27)
-#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27)
-#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27)
+#define PIPE_CRC_SOURCE_DP_D_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 3)
+#define PIPE_CRC_SOURCE_DP_B_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 6)
+#define PIPE_CRC_SOURCE_DP_C_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 7)
/* gen3+ source selection */
-#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28)
-#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28)
-#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28)
+#define PIPE_CRC_SOURCE_MASK_I9XX REG_GENMASK(30, 28)
+#define PIPE_CRC_SOURCE_PIPE_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 0)
+#define PIPE_CRC_SOURCE_SDVOB_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 1)
+#define PIPE_CRC_SOURCE_SDVOC_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 2)
/* with DP/TV port the pipe source is invalid */
-#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28)
-#define PIPE_CRC_SOURCE_TV_PRE (4 << 28)
-#define PIPE_CRC_SOURCE_TV_POST (5 << 28)
-#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28)
-#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28)
+#define PIPE_CRC_SOURCE_DP_D_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 3)
+#define PIPE_CRC_SOURCE_TV_PRE REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 4)
+#define PIPE_CRC_SOURCE_TV_POST REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 5)
+#define PIPE_CRC_SOURCE_DP_B_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 6)
+#define PIPE_CRC_SOURCE_DP_C_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 7)
/* gen2 doesn't have source selection bits */
-#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30)
+#define PIPE_CRC_INCLUDE_BORDER_I8XX REG_BIT(30)
#define _PIPE_CRC_RES_1_A_IVB 0x60064
#define _PIPE_CRC_RES_2_A_IVB 0x60068
#define PSR_EVENT_LPSP_MODE_EXIT (1 << 1)
#define PSR_EVENT_PSR_DISABLE (1 << 0)
-#define _PSR2_STATUS_A 0x60940
-#define _PSR2_STATUS_EDP 0x6f940
-#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
-#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28)
-#define EDP_PSR2_STATUS_STATE_SHIFT 28
+#define _PSR2_STATUS_A 0x60940
+#define _PSR2_STATUS_EDP 0x6f940
+#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A)
+#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28)
+#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8)
#define _PSR2_SU_STATUS_A 0x60914
#define _PSR2_SU_STATUS_EDP 0x6f914
#define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154)
#define DC_BALANCE_RESET_VLV (1 << 31)
#define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0))
-#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */
-#define PIPE_B_SCRAMBLE_RESET (1 << 1)
-#define PIPE_A_SCRAMBLE_RESET (1 << 0)
+#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */
+#define PIPE_B_SCRAMBLE_RESET REG_BIT(1)
+#define PIPE_A_SCRAMBLE_RESET REG_BIT(0)
/* Gen 3 SDVO bits: */
#define SDVO_ENABLE (1 << 31)
#define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26)
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
-#define PIPEB_LINE_COMPARE_INT_EN (1 << 29)
-#define PIPEB_HLINE_INT_EN (1 << 28)
-#define PIPEB_VBLANK_INT_EN (1 << 27)
-#define SPRITED_FLIP_DONE_INT_EN (1 << 26)
-#define SPRITEC_FLIP_DONE_INT_EN (1 << 25)
-#define PLANEB_FLIP_DONE_INT_EN (1 << 24)
-#define PIPE_PSR_INT_EN (1 << 22)
-#define PIPEA_LINE_COMPARE_INT_EN (1 << 21)
-#define PIPEA_HLINE_INT_EN (1 << 20)
-#define PIPEA_VBLANK_INT_EN (1 << 19)
-#define SPRITEB_FLIP_DONE_INT_EN (1 << 18)
-#define SPRITEA_FLIP_DONE_INT_EN (1 << 17)
-#define PLANEA_FLIPDONE_INT_EN (1 << 16)
-#define PIPEC_LINE_COMPARE_INT_EN (1 << 13)
-#define PIPEC_HLINE_INT_EN (1 << 12)
-#define PIPEC_VBLANK_INT_EN (1 << 11)
-#define SPRITEF_FLIPDONE_INT_EN (1 << 10)
-#define SPRITEE_FLIPDONE_INT_EN (1 << 9)
-#define PLANEC_FLIPDONE_INT_EN (1 << 8)
+#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29)
+#define PIPEB_HLINE_INT_EN REG_BIT(28)
+#define PIPEB_VBLANK_INT_EN REG_BIT(27)
+#define SPRITED_FLIP_DONE_INT_EN REG_BIT(26)
+#define SPRITEC_FLIP_DONE_INT_EN REG_BIT(25)
+#define PLANEB_FLIP_DONE_INT_EN REG_BIT(24)
+#define PIPE_PSR_INT_EN REG_BIT(22)
+#define PIPEA_LINE_COMPARE_INT_EN REG_BIT(21)
+#define PIPEA_HLINE_INT_EN REG_BIT(20)
+#define PIPEA_VBLANK_INT_EN REG_BIT(19)
+#define SPRITEB_FLIP_DONE_INT_EN REG_BIT(18)
+#define SPRITEA_FLIP_DONE_INT_EN REG_BIT(17)
+#define PLANEA_FLIPDONE_INT_EN REG_BIT(16)
+#define PIPEC_LINE_COMPARE_INT_EN REG_BIT(13)
+#define PIPEC_HLINE_INT_EN REG_BIT(12)
+#define PIPEC_VBLANK_INT_EN REG_BIT(11)
+#define SPRITEF_FLIPDONE_INT_EN REG_BIT(10)
+#define SPRITEE_FLIPDONE_INT_EN REG_BIT(9)
+#define PLANEC_FLIPDONE_INT_EN REG_BIT(8)
#define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */
-#define SPRITEF_INVALID_GTT_INT_EN (1 << 27)
-#define SPRITEE_INVALID_GTT_INT_EN (1 << 26)
-#define PLANEC_INVALID_GTT_INT_EN (1 << 25)
-#define CURSORC_INVALID_GTT_INT_EN (1 << 24)
-#define CURSORB_INVALID_GTT_INT_EN (1 << 23)
-#define CURSORA_INVALID_GTT_INT_EN (1 << 22)
-#define SPRITED_INVALID_GTT_INT_EN (1 << 21)
-#define SPRITEC_INVALID_GTT_INT_EN (1 << 20)
-#define PLANEB_INVALID_GTT_INT_EN (1 << 19)
-#define SPRITEB_INVALID_GTT_INT_EN (1 << 18)
-#define SPRITEA_INVALID_GTT_INT_EN (1 << 17)
-#define PLANEA_INVALID_GTT_INT_EN (1 << 16)
-#define DPINVGTT_EN_MASK 0xff0000
-#define DPINVGTT_EN_MASK_CHV 0xfff0000
-#define SPRITEF_INVALID_GTT_STATUS (1 << 11)
-#define SPRITEE_INVALID_GTT_STATUS (1 << 10)
-#define PLANEC_INVALID_GTT_STATUS (1 << 9)
-#define CURSORC_INVALID_GTT_STATUS (1 << 8)
-#define CURSORB_INVALID_GTT_STATUS (1 << 7)
-#define CURSORA_INVALID_GTT_STATUS (1 << 6)
-#define SPRITED_INVALID_GTT_STATUS (1 << 5)
-#define SPRITEC_INVALID_GTT_STATUS (1 << 4)
-#define PLANEB_INVALID_GTT_STATUS (1 << 3)
-#define SPRITEB_INVALID_GTT_STATUS (1 << 2)
-#define SPRITEA_INVALID_GTT_STATUS (1 << 1)
-#define PLANEA_INVALID_GTT_STATUS (1 << 0)
-#define DPINVGTT_STATUS_MASK 0xff
-#define DPINVGTT_STATUS_MASK_CHV 0xfff
+#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16)
+#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16)
+#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27)
+#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26)
+#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25)
+#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24)
+#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23)
+#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22)
+#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21)
+#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20)
+#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19)
+#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18)
+#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17)
+#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16)
+#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0)
+#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0)
+#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11)
+#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10)
+#define PLANEC_INVALID_GTT_STATUS REG_BIT(9)
+#define CURSORC_INVALID_GTT_STATUS REG_BIT(8)
+#define CURSORB_INVALID_GTT_STATUS REG_BIT(7)
+#define CURSORA_INVALID_GTT_STATUS REG_BIT(6)
+#define SPRITED_INVALID_GTT_STATUS REG_BIT(5)
+#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4)
+#define PLANEB_INVALID_GTT_STATUS REG_BIT(3)
+#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2)
+#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1)
+#define PLANEA_INVALID_GTT_STATUS REG_BIT(0)
#define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030)
#define DSPARB_CSTART_MASK (0x7f << 7)
#define DVS_SOURCE_KEY (1 << 22)
#define DVS_RGB_ORDER_XBGR (1 << 20)
#define DVS_YUV_FORMAT_BT709 (1 << 18)
-#define DVS_YUV_BYTE_ORDER_MASK (3 << 16)
+#define DVS_YUV_ORDER_MASK (3 << 16)
#define DVS_YUV_ORDER_YUYV (0 << 16)
#define DVS_YUV_ORDER_UYVY (1 << 16)
#define DVS_YUV_ORDER_YVYU (2 << 16)
#define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19)
#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */
-#define SPRITE_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SPRITE_YUV_ORDER_MASK (3 << 16)
#define SPRITE_YUV_ORDER_YUYV (0 << 16)
#define SPRITE_YUV_ORDER_UYVY (1 << 16)
#define SPRITE_YUV_ORDER_YVYU (2 << 16)
#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */
#define SP_SOURCE_KEY (1 << 22)
#define SP_YUV_FORMAT_BT709 (1 << 18)
-#define SP_YUV_BYTE_ORDER_MASK (3 << 16)
+#define SP_YUV_ORDER_MASK (3 << 16)
#define SP_YUV_ORDER_YUYV (0 << 16)
#define SP_YUV_ORDER_UYVY (1 << 16)
#define SP_YUV_ORDER_YVYU (2 << 16)
#define PLANE_CTL_YUV420_Y_PLANE (1 << 19)
#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18)
#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
-#define PLANE_CTL_YUV422_YUYV (0 << 16)
-#define PLANE_CTL_YUV422_UYVY (1 << 16)
-#define PLANE_CTL_YUV422_YVYU (2 << 16)
-#define PLANE_CTL_YUV422_VYUY (3 << 16)
+#define PLANE_CTL_YUV422_ORDER_YUYV (0 << 16)
+#define PLANE_CTL_YUV422_ORDER_UYVY (1 << 16)
+#define PLANE_CTL_YUV422_ORDER_YVYU (2 << 16)
+#define PLANE_CTL_YUV422_ORDER_VYUY (3 << 16)
#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15)
#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
#define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */
#define _PLANE_CUS_CTL_1_A 0x701c8
#define _PLANE_CUS_CTL_2_A 0x702c8
#define PLANE_CUS_ENABLE (1 << 31)
-#define PLANE_CUS_PLANE_4_RKL (0 << 30)
-#define PLANE_CUS_PLANE_5_RKL (1 << 30)
-#define PLANE_CUS_PLANE_6 (0 << 30)
-#define PLANE_CUS_PLANE_7 (1 << 30)
+#define PLANE_CUS_Y_PLANE_4_RKL (0 << 30)
+#define PLANE_CUS_Y_PLANE_5_RKL (1 << 30)
+#define PLANE_CUS_Y_PLANE_6_ICL (0 << 30)
+#define PLANE_CUS_Y_PLANE_7_ICL (1 << 30)
#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19)
#define PLANE_CUS_HPHASE_0 (0 << 16)
#define PLANE_CUS_HPHASE_0_25 (1 << 16)
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
-#define _PLANE_CC_VAL_1_B 0x711b4
-#define _PLANE_CC_VAL_2_B 0x712b4
-#define _PLANE_CC_VAL_1(pipe) _PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B)
-#define _PLANE_CC_VAL_2(pipe) _PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B)
-#define PLANE_CC_VAL(pipe, plane) \
- _MMIO_PLANE(plane, _PLANE_CC_VAL_1(pipe), _PLANE_CC_VAL_2(pipe))
+#define _PLANE_CC_VAL_1_B 0x711b4
+#define _PLANE_CC_VAL_2_B 0x712b4
+#define _PLANE_CC_VAL_1(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_1_A, _PLANE_CC_VAL_1_B) + (dw) * 4)
+#define _PLANE_CC_VAL_2(pipe, dw) (_PIPE(pipe, _PLANE_CC_VAL_2_A, _PLANE_CC_VAL_2_B) + (dw) * 4)
+#define PLANE_CC_VAL(pipe, plane, dw) \
+ _MMIO_PLANE((plane), _PLANE_CC_VAL_1((pipe), (dw)), _PLANE_CC_VAL_2((pipe), (dw)))
/* Input CSC Register Definitions */
#define _PLANE_INPUT_CSC_RY_GY_1_A 0x701E0
/*
* The below are numbered starting from "S1" on gen11/gen12, but starting
- * with gen13 display, the bspec switches to a 0-based numbering scheme
+ * with display 13, the bspec switches to a 0-based numbering scheme
* (although the addresses stay the same so new S0 = old S1, new S1 = old S2).
* We'll just use the 0-based numbering here for all platforms since it's the
* way things will be named by the hardware team going forward, plus it's more
#define RESET_PCH_HANDSHAKE_ENABLE (1 << 4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
-#define ICL_DELAY_PMRSP (1 << 22)
-#define MASK_WAKEMEM (1 << 13)
+#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
+#define ICL_DELAY_PMRSP REG_BIT(22)
+#define DISABLE_FLR_SRC REG_BIT(15)
+#define MASK_WAKEMEM REG_BIT(13)
#define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434)
#define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27)
_PIPEB_CHICKEN)
#define UNDERRUN_RECOVERY_DISABLE_ADLP REG_BIT(30)
#define UNDERRUN_RECOVERY_ENABLE_DG2 REG_BIT(30)
-#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
-#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
+#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU REG_BIT(15)
+#define DG2_RENDER_CCSTAG_4_3_EN REG_BIT(12)
+#define PER_PIXEL_ALPHA_BYPASS_EN REG_BIT(7)
#define VFLSKPD _MMIO(0x62a8)
#define DIS_OVER_FETCH_CACHE REG_BIT(1)
#define GEN6_OFFSET(x) ((x) << 19)
#define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23
+ #define GEN9_IGNORE_SLICE_RATIO (0 << 0)
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
#define GEN6_RC_CONTROL _MMIO(0xA090)
#define GEN6_RP_UP_BUSY_CONT (0x4 << 3)
#define GEN6_RP_DOWN_IDLE_AVG (0x2 << 0)
#define GEN6_RP_DOWN_IDLE_CONT (0x1 << 0)
+ #define GEN6_RPSWCTL_SHIFT 9
+ #define GEN9_RPSWCTL_ENABLE (0x2 << GEN6_RPSWCTL_SHIFT)
+ #define GEN9_RPSWCTL_DISABLE (0x0 << GEN6_RPSWCTL_SHIFT)
#define GEN6_RP_UP_THRESHOLD _MMIO(0xA02C)
#define GEN6_RP_DOWN_THRESHOLD _MMIO(0xA030)
#define GEN6_RP_CUR_UP_EI _MMIO(0xA050)
#define AUD_PIN_BUF_CTL _MMIO(0x48414)
#define AUD_PIN_BUF_ENABLE REG_BIT(31)
+#define AUD_TS_CDCLK_M _MMIO(0x65ea0)
+#define AUD_TS_CDCLK_M_EN REG_BIT(31)
+#define AUD_TS_CDCLK_N _MMIO(0x65ea4)
+
/* Display Audio Config Reg */
#define AUD_CONFIG_BE _MMIO(0x65ef0)
#define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe)))
#define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT)
#define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT)
#define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT)
-#define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT)
-#define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1)
#define TRANS_DDI_MODE_SELECT_MASK (7 << 24)
#define TRANS_DDI_MODE_SELECT_HDMI (0 << 24)
#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16)
#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
+/* CDCLK_SQUASH_CTL */
+#define CDCLK_SQUASH_CTL _MMIO(0x46008)
+#define CDCLK_SQUASH_ENABLE REG_BIT(31)
+#define CDCLK_SQUASH_WINDOW_SIZE_MASK REG_GENMASK(27, 24)
+#define CDCLK_SQUASH_WINDOW_SIZE(x) REG_FIELD_PREP(CDCLK_SQUASH_WINDOW_SIZE_MASK, (x))
+#define CDCLK_SQUASH_WAVEFORM_MASK REG_GENMASK(15, 0)
+#define CDCLK_SQUASH_WAVEFORM(x) REG_FIELD_PREP(CDCLK_SQUASH_WAVEFORM_MASK, (x))
+
/* LCPLL_CTL */
#define LCPLL1_CTL _MMIO(0x46010)
#define LCPLL2_CTL _MMIO(0x46014)
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
+#include <linux/sched/mm.h>
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"
#include "gt/intel_rps.h"
#include "i915_active.h"
+ #include "i915_deps.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_pm.h"
struct i915_capture_list *next = capture->next;
i915_vma_snapshot_put(capture->vma_snapshot);
+ kfree(capture);
capture = next;
}
}
return 0;
}
+ /**
+ * i915_request_await_deps - set this request to (async) wait upon a struct
+ * i915_deps dma_fence collection
+ * @rq: request we are wishing to use
+ * @deps: The struct i915_deps containing the dependencies.
+ *
+ * Returns 0 if successful, negative error code on error.
+ */
+ int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
+ {
+ int i, err;
+
+ for (i = 0; i < deps->num_deps; ++i) {
+ err = i915_request_await_dma_fence(rq, deps->fences[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+ }
+
/**
* i915_request_await_object - set this request to (async) wait upon a bo
* @to: request we are wishing to use
struct intel_remapped_plane_info {
/* in gtt pages */
- u32 offset;
- u16 width;
- u16 height;
- u16 src_stride;
- u16 dst_stride;
+ u32 offset:31;
+ u32 linear:1;
+ union {
+ /* in gtt pages for !linear */
+ struct {
+ u16 width;
+ u16 height;
+ u16 src_stride;
+ u16 dst_stride;
+ };
+
+ /* in gtt pages for linear */
+ u32 size;
+ };
} __packed;
struct intel_remapped_info {
#define I915_VMA_PAGES_BIAS 24
#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
atomic_t pages_count; /* number of active binds to the pages */
- struct mutex pages_mutex; /* protect acquire/release of backing pages */
/**
* Support different GGTT views into the same object.
#define INTEL_SUBPLATFORM_G10 0
#define INTEL_SUBPLATFORM_G11 1
+/* ADL-S */
+#define INTEL_SUBPLATFORM_RPL_S 0
+
enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
func(is_dgfx); \
/* Keep has_* in alphabetical order */ \
func(has_64bit_reloc); \
+ func(has_64k_pages); \
func(gpu_reset_clobbers_display); \
func(has_reset_engine); \
func(has_global_mocs); \
u8 gt; /* GT number, 0 if undefined */
- u8 pipe_mask;
- u8 cpu_transcoder_mask;
-
- u8 abox_mask;
-
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
u8 ver;
u8 rel;
+ u8 pipe_mask;
+ u8 cpu_transcoder_mask;
+ u8 abox_mask;
+
#define DEFINE_FLAG(name) u8 name:1
DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG);
#undef DEFINE_FLAG
*/
#include <linux/pm_runtime.h>
-#include <asm/iosf_mbi.h>
#include "gt/intel_lrc_reg.h" /* for shadow reg list */
#include "i915_drv.h"
+#include "i915_iosf_mbi.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
#include "intel_pm.h"
}
void intel_uncore_init_early(struct intel_uncore *uncore,
- struct drm_i915_private *i915)
+ struct intel_gt *gt)
{
spin_lock_init(&uncore->lock);
- uncore->i915 = i915;
- uncore->rpm = &i915->runtime_pm;
- uncore->debug = &i915->mmio_debug;
+ uncore->i915 = gt->i915;
+ uncore->gt = gt;
+ uncore->rpm = >->i915->runtime_pm;
+ uncore->debug = >->i915->mmio_debug;
}
static void uncore_raw_init(struct intel_uncore *uncore)
*/
#include <linux/component.h>
-#include "drm/i915_pxp_tee_interface.h"
-#include "drm/i915_component.h"
+
+#include <drm/i915_pxp_tee_interface.h>
+#include <drm/i915_component.h>
+
#include "i915_drv.h"
#include "intel_pxp.h"
#include "intel_pxp_session.h"
static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev)
{
- return &kdev_to_i915(i915_kdev)->gt.pxp;
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+
+ return &to_gt(i915)->pxp;
}
static int intel_pxp_tee_io_message(struct intel_pxp *pxp,