]> Git Repo - linux.git/commitdiff
Merge tag 'drm-msm-next-2020-09-27' of https://gitlab.freedesktop.org/drm/msm into...
authorDave Airlie <[email protected]>
Tue, 29 Sep 2020 00:18:49 +0000 (10:18 +1000)
committerDave Airlie <[email protected]>
Tue, 29 Sep 2020 00:18:49 +0000 (10:18 +1000)
* DSI support for sm8150/sm8250
* Support for per-process GPU pagetables (finally!) for a6xx.
  There are still some iommu/arm-smmu changes required to
  enable, without which it will fallback to the current single
  pgtable state.  The first part (ie. what doesn't depend on
  drm side patches) is queued up for v5.10[1].
* DisplayPort support.  Userspace DP compliance tool support
  is already merged in IGT[2]
* The usual assortment of smaller fixes/cleanups

Signed-off-by: Dave Airlie <[email protected]>
From: Rob Clark <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvqjuzH=Po_9EzzFsp2Xq3tqJUTKfsA2g09XY7_+6Ypfw@mail.gmail.com
1  2 
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gpummu.c
drivers/gpu/drm/msm/msm_iommu.c
include/drm/drm_dp_helper.h

index 5a9d933e425a41eefb138f892a26bf93fdb30b15,10b8310f290b2b8750d0dbdfdad78000e156f43f..bbe0f95426c0871ef59b204782b3b225c5c2da66
@@@ -47,7 -47,6 +47,7 @@@
  #include "display/intel_ddi.h"
  #include "display/intel_dp.h"
  #include "display/intel_dp_mst.h"
 +#include "display/intel_dpll_mgr.h"
  #include "display/intel_dsi.h"
  #include "display/intel_dvo.h"
  #include "display/intel_gmbus.h"
@@@ -67,7 -66,6 +67,7 @@@
  #include "intel_bw.h"
  #include "intel_cdclk.h"
  #include "intel_color.h"
 +#include "intel_csr.h"
  #include "intel_display_types.h"
  #include "intel_dp_link_training.h"
  #include "intel_fbc.h"
@@@ -2031,12 -2029,12 +2031,12 @@@ intel_tile_width_bytes(const struct drm
        case I915_FORMAT_MOD_Y_TILED_CCS:
                if (is_ccs_plane(fb, color_plane))
                        return 128;
 -              /* fall through */
 +              fallthrough;
        case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
        case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
                if (is_ccs_plane(fb, color_plane))
                        return 64;
 -              /* fall through */
 +              fallthrough;
        case I915_FORMAT_MOD_Y_TILED:
                if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
                        return 128;
        case I915_FORMAT_MOD_Yf_TILED_CCS:
                if (is_ccs_plane(fb, color_plane))
                        return 128;
 -              /* fall through */
 +              fallthrough;
        case I915_FORMAT_MOD_Yf_TILED:
                switch (cpp) {
                case 1:
@@@ -2187,7 -2185,7 +2187,7 @@@ static unsigned int intel_surf_alignmen
        case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
                if (is_semiplanar_uv_plane(fb, color_plane))
                        return intel_tile_row_size(fb, color_plane);
 -              /* Fall-through */
 +              fallthrough;
        case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
                return 16 * 1024;
        case I915_FORMAT_MOD_Y_TILED_CCS:
                if (INTEL_GEN(dev_priv) >= 12 &&
                    is_semiplanar_uv_plane(fb, color_plane))
                        return intel_tile_row_size(fb, color_plane);
 -              /* Fall-through */
 +              fallthrough;
        case I915_FORMAT_MOD_Yf_TILED:
                return 1 * 1024 * 1024;
        default:
@@@ -2312,7 -2310,7 +2312,7 @@@ err
  
  void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
  {
 -      i915_gem_object_lock(vma->obj);
 +      i915_gem_object_lock(vma->obj, NULL);
        if (flags & PLANE_HAS_FENCE)
                i915_vma_unpin_fence(vma);
        i915_gem_object_unpin_from_display_plane(vma);
@@@ -3452,7 -3450,7 +3452,7 @@@ initial_plane_vma(struct drm_i915_priva
        if (IS_ERR(vma))
                goto err_obj;
  
 -      if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
 +      if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
                goto err_obj;
  
        if (i915_gem_object_is_tiled(obj) &&
@@@ -3763,44 -3761,6 +3763,44 @@@ static int glk_max_plane_width(const st
        }
  }
  
 +static int icl_min_plane_width(const struct drm_framebuffer *fb)
 +{
 +      /* Wa_14011264657, Wa_14011050563: gen11+ */
 +      switch (fb->format->format) {
 +      case DRM_FORMAT_C8:
 +              return 18;
 +      case DRM_FORMAT_RGB565:
 +              return 10;
 +      case DRM_FORMAT_XRGB8888:
 +      case DRM_FORMAT_XBGR8888:
 +      case DRM_FORMAT_ARGB8888:
 +      case DRM_FORMAT_ABGR8888:
 +      case DRM_FORMAT_XRGB2101010:
 +      case DRM_FORMAT_XBGR2101010:
 +      case DRM_FORMAT_ARGB2101010:
 +      case DRM_FORMAT_ABGR2101010:
 +      case DRM_FORMAT_XVYU2101010:
 +      case DRM_FORMAT_Y212:
 +      case DRM_FORMAT_Y216:
 +              return 6;
 +      case DRM_FORMAT_NV12:
 +              return 20;
 +      case DRM_FORMAT_P010:
 +      case DRM_FORMAT_P012:
 +      case DRM_FORMAT_P016:
 +              return 12;
 +      case DRM_FORMAT_XRGB16161616F:
 +      case DRM_FORMAT_XBGR16161616F:
 +      case DRM_FORMAT_ARGB16161616F:
 +      case DRM_FORMAT_ABGR16161616F:
 +      case DRM_FORMAT_XVYU12_16161616:
 +      case DRM_FORMAT_XVYU16161616:
 +              return 4;
 +      default:
 +              return 1;
 +      }
 +}
 +
  static int icl_max_plane_width(const struct drm_framebuffer *fb,
                               int color_plane,
                               unsigned int rotation)
@@@ -3883,31 -3843,29 +3883,31 @@@ static int skl_check_main_surface(struc
        int y = plane_state->uapi.src.y1 >> 16;
        int w = drm_rect_width(&plane_state->uapi.src) >> 16;
        int h = drm_rect_height(&plane_state->uapi.src) >> 16;
 -      int max_width;
 -      int max_height;
 -      u32 alignment;
 -      u32 offset;
 +      int max_width, min_width, max_height;
 +      u32 alignment, offset;
        int aux_plane = intel_main_to_aux_plane(fb, 0);
        u32 aux_offset = plane_state->color_plane[aux_plane].offset;
  
 -      if (INTEL_GEN(dev_priv) >= 11)
 +      if (INTEL_GEN(dev_priv) >= 11) {
                max_width = icl_max_plane_width(fb, 0, rotation);
 -      else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 +              min_width = icl_min_plane_width(fb);
 +      } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
                max_width = glk_max_plane_width(fb, 0, rotation);
 -      else
 +              min_width = 1;
 +      } else {
                max_width = skl_max_plane_width(fb, 0, rotation);
 +              min_width = 1;
 +      }
  
        if (INTEL_GEN(dev_priv) >= 11)
                max_height = icl_max_plane_height();
        else
                max_height = skl_max_plane_height();
  
 -      if (w > max_width || h > max_height) {
 +      if (w > max_width || w < min_width || h > max_height) {
                drm_dbg_kms(&dev_priv->drm,
 -                          "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
 -                          w, h, max_width, max_height);
 +                          "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
 +                          w, h, min_width, max_width, max_height);
                return -EINVAL;
        }
  
@@@ -6253,7 -6211,7 +6253,7 @@@ static int skl_update_scaler_plane(stru
        case DRM_FORMAT_ARGB16161616F:
                if (INTEL_GEN(dev_priv) >= 11)
                        break;
 -              /* fall through */
 +              fallthrough;
        default:
                drm_dbg_kms(&dev_priv->drm,
                            "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
@@@ -7332,10 -7290,6 +7332,10 @@@ enum intel_display_power_domain intel_p
                return POWER_DOMAIN_PORT_DDI_F_LANES;
        case PORT_G:
                return POWER_DOMAIN_PORT_DDI_G_LANES;
 +      case PORT_H:
 +              return POWER_DOMAIN_PORT_DDI_H_LANES;
 +      case PORT_I:
 +              return POWER_DOMAIN_PORT_DDI_I_LANES;
        default:
                MISSING_CASE(port);
                return POWER_DOMAIN_PORT_OTHER;
@@@ -7361,10 -7315,6 +7361,10 @@@ intel_aux_power_domain(struct intel_dig
                        return POWER_DOMAIN_AUX_F_TBT;
                case AUX_CH_G:
                        return POWER_DOMAIN_AUX_G_TBT;
 +              case AUX_CH_H:
 +                      return POWER_DOMAIN_AUX_H_TBT;
 +              case AUX_CH_I:
 +                      return POWER_DOMAIN_AUX_I_TBT;
                default:
                        MISSING_CASE(dig_port->aux_ch);
                        return POWER_DOMAIN_AUX_C_TBT;
@@@ -7396,10 -7346,6 +7396,10 @@@ intel_legacy_aux_to_power_domain(enum a
                return POWER_DOMAIN_AUX_F;
        case AUX_CH_G:
                return POWER_DOMAIN_AUX_G;
 +      case AUX_CH_H:
 +              return POWER_DOMAIN_AUX_H;
 +      case AUX_CH_I:
 +              return POWER_DOMAIN_AUX_I;
        default:
                MISSING_CASE(aux_ch);
                return POWER_DOMAIN_AUX_A;
@@@ -8168,7 -8114,7 +8168,7 @@@ static void compute_m_n(unsigned int m
         * which the devices expect also in synchronous clock mode.
         */
        if (constant_n)
-               *ret_n = 0x8000;
+               *ret_n = DP_LINK_CONSTANT_N_VALUE;
        else
                *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
  
@@@ -10856,18 -10802,9 +10856,18 @@@ static void icl_get_ddi_pll(struct drm_
        u32 temp;
  
        if (intel_phy_is_combo(dev_priv, phy)) {
 -              temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
 -                      ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
 -              id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
 +              u32 mask, shift;
 +
 +              if (IS_ROCKETLAKE(dev_priv)) {
 +                      mask = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
 +                      shift = RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
 +              } else {
 +                      mask = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
 +                      shift = ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
 +              }
 +
 +              temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) & mask;
 +              id = temp >> shift;
                port_dpll_id = ICL_PORT_DPLL_DEFAULT;
        } else if (intel_phy_is_tc(dev_priv, phy)) {
                u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
@@@ -10959,7 -10896,7 +10959,7 @@@ static void hsw_get_ddi_pll(struct drm_
                break;
        default:
                MISSING_CASE(ddi_pll_sel);
 -              /* fall through */
 +              fallthrough;
        case PORT_CLK_SEL_NONE:
                return;
        }
@@@ -11019,10 -10956,10 +11019,10 @@@ static bool hsw_get_transcoder_state(st
                        drm_WARN(dev, 1,
                                 "unknown pipe linked to transcoder %s\n",
                                 transcoder_name(panel_transcoder));
 -                      /* fall through */
 +                      fallthrough;
                case TRANS_DDI_EDP_INPUT_A_ONOFF:
                        force_thru = true;
 -                      /* fall through */
 +                      fallthrough;
                case TRANS_DDI_EDP_INPUT_A_ON:
                        trans_pipe = PIPE_A;
                        break;
@@@ -12823,9 -12760,6 +12823,9 @@@ static int intel_crtc_atomic_check(stru
  
        }
  
 +      if (!mode_changed)
 +              intel_psr2_sel_fetch_update(state, crtc);
 +
        return 0;
  }
  
@@@ -13249,7 -13183,7 +13249,7 @@@ static bool check_digital_port_conflict
                case INTEL_OUTPUT_DDI:
                        if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
                                break;
 -                      /* else, fall through */
 +                      fallthrough;
                case INTEL_OUTPUT_DP:
                case INTEL_OUTPUT_HDMI:
                case INTEL_OUTPUT_EDP:
@@@ -13484,6 -13418,12 +13484,6 @@@ encoder_retry
                    "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
                    base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
  
 -      /*
 -       * Make drm_calc_timestamping_constants in
 -       * drm_atomic_helper_update_legacy_modeset_state() happy
 -       */
 -      pipe_config->uapi.adjusted_mode = pipe_config->hw.adjusted_mode;
 -
        return 0;
  }
  
@@@ -14643,8 -14583,16 +14643,8 @@@ u8 intel_calc_active_pipes(struct intel
  static int intel_modeset_checks(struct intel_atomic_state *state)
  {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 -      int ret;
  
        state->modeset = true;
 -      state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
 -
 -      if (state->active_pipes != dev_priv->active_pipes) {
 -              ret = _intel_atomic_lock_global_state(state);
 -              if (ret)
 -                      return ret;
 -      }
  
        if (IS_HASWELL(dev_priv))
                return hsw_mode_set_planes_workaround(state);
@@@ -14788,8 -14736,7 +14788,8 @@@ static int intel_atomic_check_cdclk(str
                                    bool *need_cdclk_calc)
  {
        struct drm_i915_private *dev_priv = to_i915(state->base.dev);
 -      struct intel_cdclk_state *new_cdclk_state;
 +      const struct intel_cdclk_state *old_cdclk_state;
 +      const struct intel_cdclk_state *new_cdclk_state;
        struct intel_plane_state *plane_state;
        struct intel_bw_state *new_bw_state;
        struct intel_plane *plane;
                        return ret;
        }
  
 +      old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
        new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
  
 -      if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
 +      if (new_cdclk_state &&
 +          old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
                *need_cdclk_calc = true;
  
        ret = dev_priv->display.bw_calc_min_cdclk(state);
@@@ -14985,7 -14930,7 +14985,7 @@@ static int intel_atomic_check(struct dr
        if (any_ms && !check_digital_port_conflicts(state)) {
                drm_dbg_kms(&dev_priv->drm,
                            "rejecting conflicting digital port configuration\n");
 -              ret = EINVAL;
 +              ret = -EINVAL;
                goto fail;
        }
  
        if (dev_priv->wm.distrust_bios_wm)
                any_ms = true;
  
 -      if (any_ms) {
 -              ret = intel_modeset_checks(state);
 -              if (ret)
 -                      goto fail;
 -      }
 -
        intel_fbc_choose_crtc(dev_priv, state);
        ret = calc_watermark_data(state);
        if (ret)
                goto fail;
  
        if (any_ms) {
 +              ret = intel_modeset_checks(state);
 +              if (ret)
 +                      goto fail;
 +
                ret = intel_modeset_calc_cdclk(state);
                if (ret)
                        return ret;
@@@ -15189,8 -15136,6 +15189,8 @@@ static void commit_pipe_config(struct i
  
                if (new_crtc_state->update_pipe)
                        intel_pipe_fastset(old_crtc_state, new_crtc_state);
 +
 +              intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
        }
  
        if (dev_priv->display.atomic_update_watermarks)
@@@ -15759,6 -15704,14 +15759,6 @@@ static void intel_atomic_track_fbs(stru
                                        plane->frontbuffer_bit);
  }
  
 -static void assert_global_state_locked(struct drm_i915_private *dev_priv)
 -{
 -      struct intel_crtc *crtc;
 -
 -      for_each_intel_crtc(&dev_priv->drm, crtc)
 -              drm_modeset_lock_assert_held(&crtc->base.mutex);
 -}
 -
  static int intel_atomic_commit(struct drm_device *dev,
                               struct drm_atomic_state *_state,
                               bool nonblock)
        intel_shared_dpll_swap_state(state);
        intel_atomic_track_fbs(state);
  
 -      if (state->global_state_changed) {
 -              assert_global_state_locked(dev_priv);
 -
 -              dev_priv->active_pipes = state->active_pipes;
 -      }
 -
        drm_atomic_state_get(&state->base);
        INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
  
@@@ -16880,7 -16839,7 +16880,7 @@@ static void intel_setup_outputs(struct 
  
        intel_pps_init(dev_priv);
  
 -      if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
 +      if (!HAS_DISPLAY(dev_priv))
                return;
  
        if (IS_ROCKETLAKE(dev_priv)) {
@@@ -17180,7 -17139,7 +17180,7 @@@ static int intel_framebuffer_init(struc
        if (!intel_fb->frontbuffer)
                return -ENOMEM;
  
 -      i915_gem_object_lock(obj);
 +      i915_gem_object_lock(obj, NULL);
        tiling = i915_gem_object_get_tiling(obj);
        stride = i915_gem_object_get_stride(obj);
        i915_gem_object_unlock(obj);
@@@ -17866,27 -17825,6 +17866,27 @@@ int intel_modeset_init_noirq(struct drm
  {
        int ret;
  
 +      if (i915_inject_probe_failure(i915))
 +              return -ENODEV;
 +
 +      if (HAS_DISPLAY(i915)) {
 +              ret = drm_vblank_init(&i915->drm,
 +                                    INTEL_NUM_PIPES(i915));
 +              if (ret)
 +                      return ret;
 +      }
 +
 +      intel_bios_init(i915);
 +
 +      ret = intel_vga_register(i915);
 +      if (ret)
 +              goto cleanup_bios;
 +
 +      /* FIXME: completely on the wrong abstraction layer */
 +      intel_power_domains_init_hw(i915, false);
 +
 +      intel_csr_ucode_init(i915);
 +
        i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
        i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI |
                                        WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE);
  
        ret = intel_cdclk_init(i915);
        if (ret)
 -              return ret;
 +              goto cleanup_vga_client_pw_domain_csr;
  
        ret = intel_dbuf_init(i915);
        if (ret)
 -              return ret;
 +              goto cleanup_vga_client_pw_domain_csr;
  
        ret = intel_bw_init(i915);
        if (ret)
 -              return ret;
 +              goto cleanup_vga_client_pw_domain_csr;
  
        init_llist_head(&i915->atomic_helper.free_list);
        INIT_WORK(&i915->atomic_helper.free_work,
        intel_fbc_init(i915);
  
        return 0;
 +
 +cleanup_vga_client_pw_domain_csr:
 +      intel_csr_ucode_fini(i915);
 +      intel_power_domains_driver_remove(i915);
 +      intel_vga_unregister(i915);
 +cleanup_bios:
 +      intel_bios_driver_remove(i915);
 +
 +      return ret;
  }
  
 -/* part #2: call after irq install */
 -int intel_modeset_init(struct drm_i915_private *i915)
 +/* part #2: call after irq install, but before gem init */
 +int intel_modeset_init_nogem(struct drm_i915_private *i915)
  {
        struct drm_device *dev = &i915->drm;
        enum pipe pipe;
                    INTEL_NUM_PIPES(i915),
                    INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
  
 -      if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
 +      if (HAS_DISPLAY(i915)) {
                for_each_pipe(i915, pipe) {
                        ret = intel_crtc_init(i915, pipe);
                        if (ret) {
        if (i915->max_cdclk_freq == 0)
                intel_update_max_cdclk(i915);
  
 +      /*
 +       * If the platform has HTI, we need to find out whether it has reserved
 +       * any display resources before we create our display outputs.
 +       */
 +      if (INTEL_INFO(i915)->display.has_hti)
 +              i915->hti_state = intel_de_read(i915, HDPORT_STATE);
 +
        /* Just disable it once at startup */
        intel_vga_disable(i915);
        intel_setup_outputs(i915);
        return 0;
  }
  
 +/* part #3: call after gem init */
 +int intel_modeset_init(struct drm_i915_private *i915)
 +{
 +      int ret;
 +
 +      intel_overlay_setup(i915);
 +
 +      if (!HAS_DISPLAY(i915))
 +              return 0;
 +
 +      ret = intel_fbdev_init(&i915->drm);
 +      if (ret)
 +              return ret;
 +
 +      /* Only enable hotplug handling once the fbdev is fully set up. */
 +      intel_hpd_init(i915);
 +
 +      intel_init_ipc(i915);
 +
 +      intel_psr_set_force_mode_changed(i915->psr.dp);
 +
 +      return 0;
 +}
 +
  void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
  {
        struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@@@ -18933,18 -18831,6 +18933,18 @@@ void intel_modeset_driver_remove_noirq(
        intel_fbc_cleanup_cfb(i915);
  }
  
 +/* part #3: call after gem init */
 +void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915)
 +{
 +      intel_csr_ucode_fini(i915);
 +
 +      intel_power_domains_driver_remove(i915);
 +
 +      intel_vga_unregister(i915);
 +
 +      intel_bios_driver_remove(i915);
 +}
 +
  #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
  
  struct intel_display_error_state {
@@@ -19005,7 -18891,7 +19005,7 @@@ intel_display_capture_error_state(struc
  
        BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
  
 -      if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))
 +      if (!HAS_DISPLAY(dev_priv))
                return NULL;
  
        error = kzalloc(sizeof(*error), GFP_ATOMIC);
index 91726da82ed67a28634c2044975b0852fc5212b3,c941c8138f257eed721cbae850276479b9ade247..d6804a8023555ac8eed22053445692b1437993db
@@@ -18,13 -18,24 +18,24 @@@ static void a5xx_dump(struct msm_gpu *g
  
  #define GPU_PAS_ID 13
  
- static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+               bool sync)
  {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
        uint32_t wptr;
        unsigned long flags;
  
+       /*
+        * Most flush operations need to issue a WHERE_AM_I opcode to sync up
+        * the rptr shadow
+        */
+       if (a5xx_gpu->has_whereami && sync) {
+               OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+               OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
+               OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
+       }
        spin_lock_irqsave(&ring->lock, flags);
  
        /* Copy the shadow to the actual register */
@@@ -43,8 -54,7 +54,7 @@@
                gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
  }
  
- static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-       struct msm_file_private *ctx)
+ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
  {
        struct msm_drm_private *priv = gpu->dev->dev_private;
        struct msm_ringbuffer *ring = submit->ring;
@@@ -57,9 -67,9 +67,9 @@@
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
 -                      /* fall-thru */
 +                      fallthrough;
                case MSM_SUBMIT_CMD_BUF:
                        /* copy commands into RB: */
                        obj = submit->bos[submit->cmd[i].idx].obj;
                }
        }
  
-       a5xx_flush(gpu, ring);
+       a5xx_flush(gpu, ring, true);
        a5xx_preempt_trigger(gpu);
  
        /* we might not necessarily have a cmd from userspace to
        msm_gpu_retire(gpu);
  }
  
- static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-       struct msm_file_private *ctx)
+ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
  {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
  
        if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
                priv->lastctx = NULL;
-               a5xx_submit_in_rb(gpu, submit, ctx);
+               a5xx_submit_in_rb(gpu, submit);
                return;
        }
  
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
 -                      /* fall-thru */
 +                      fallthrough;
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
        /* Set bit 0 to trigger an interrupt on preempt complete */
        OUT_RING(ring, 0x01);
  
-       a5xx_flush(gpu, ring);
+       /* A WHERE_AM_I packet is not needed after a YIELD */
+       a5xx_flush(gpu, ring, false);
  
        /* Check to see if we need to start preemption */
        a5xx_preempt_trigger(gpu);
@@@ -365,7 -375,7 +375,7 @@@ static int a5xx_me_init(struct msm_gpu 
        OUT_RING(ring, 0x00000000);
        OUT_RING(ring, 0x00000000);
  
-       gpu->funcs->flush(gpu, ring);
+       a5xx_flush(gpu, ring, true);
        return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
  }
  
@@@ -407,11 -417,31 +417,31 @@@ static int a5xx_preempt_start(struct ms
        OUT_RING(ring, 0x01);
        OUT_RING(ring, 0x01);
  
-       gpu->funcs->flush(gpu, ring);
+       /* The WHERE_AMI_I packet is not needed after a YIELD is issued */
+       a5xx_flush(gpu, ring, false);
  
        return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
  }
  
+ static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
+               struct drm_gem_object *obj)
+ {
+       u32 *buf = msm_gem_get_vaddr_active(obj);
+       if (IS_ERR(buf))
+               return;
+       /*
+        * If the lowest nibble is 0xa that is an indication that this microcode
+        * has been patched. The actual version is in dword [3] but we only care
+        * about the patchlevel which is the lowest nibble of dword [3]
+        */
+       if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+               a5xx_gpu->has_whereami = true;
+       msm_gem_put_vaddr(obj);
+ }
  static int a5xx_ucode_init(struct msm_gpu *gpu)
  {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
                }
  
                msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
+               a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
        }
  
        gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@@ -506,6 -537,7 +537,7 @@@ static int a5xx_zap_shader_init(struct 
  static int a5xx_hw_init(struct msm_gpu *gpu)
  {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
        int ret;
  
        gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
        gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
                gpu->rb[0]->iova);
  
+       /*
+        * If the microcode supports the WHERE_AM_I opcode then we can use that
+        * in lieu of the RPTR shadow and enable preemption. Otherwise, we
+        * can't safely use the RPTR shadow or preemption. In either case, the
+        * RPTR shadow should be disabled in hardware.
+        */
        gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
                MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
  
+       /* Disable preemption if WHERE_AM_I isn't available */
+       if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) {
+               a5xx_preempt_fini(gpu);
+               gpu->nr_rings = 1;
+       } else {
+               /* Create a privileged buffer for the RPTR shadow */
+               if (!a5xx_gpu->shadow_bo) {
+                       a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+                               sizeof(u32) * gpu->nr_rings,
+                               MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+                               gpu->aspace, &a5xx_gpu->shadow_bo,
+                               &a5xx_gpu->shadow_iova);
+                       if (IS_ERR(a5xx_gpu->shadow))
+                               return PTR_ERR(a5xx_gpu->shadow);
+               }
+               gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
+                       REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
+       }
        a5xx_preempt_hw_init(gpu);
  
        /* Disable the interrupts through the initial bringup stage */
                OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
                OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
  
-               gpu->funcs->flush(gpu, gpu->rb[0]);
+               a5xx_flush(gpu, gpu->rb[0], true);
                if (!a5xx_idle(gpu, gpu->rb[0]))
                        return -EINVAL;
        }
                OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
                OUT_RING(gpu->rb[0], 0x00000000);
  
-               gpu->funcs->flush(gpu, gpu->rb[0]);
+               a5xx_flush(gpu, gpu->rb[0], true);
                if (!a5xx_idle(gpu, gpu->rb[0]))
                        return -EINVAL;
        } else if (ret == -ENODEV) {
@@@ -825,6 -884,11 +884,11 @@@ static void a5xx_destroy(struct msm_gp
                drm_gem_object_put(a5xx_gpu->gpmu_bo);
        }
  
+       if (a5xx_gpu->shadow_bo) {
+               msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+               drm_gem_object_put(a5xx_gpu->shadow_bo);
+       }
        adreno_gpu_cleanup(adreno_gpu);
        kfree(a5xx_gpu);
  }
@@@ -1057,17 -1121,6 +1121,6 @@@ static irqreturn_t a5xx_irq(struct msm_
        return IRQ_HANDLED;
  }
  
- static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
-               REG_A5XX_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
- };
  static const u32 a5xx_registers[] = {
        0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
        0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
@@@ -1432,6 -1485,17 +1485,17 @@@ static unsigned long a5xx_gpu_busy(stru
        return (unsigned long)busy_time;
  }
  
+ static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+ {
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+       if (a5xx_gpu->has_whereami)
+               return a5xx_gpu->shadow[ring->id];
+       return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
+ }
  static const struct adreno_gpu_funcs funcs = {
        .base = {
                .get_param = adreno_get_param,
                .pm_resume = a5xx_pm_resume,
                .recover = a5xx_recover,
                .submit = a5xx_submit,
-               .flush = a5xx_flush,
                .active_ring = a5xx_active_ring,
                .irq = a5xx_irq,
                .destroy = a5xx_destroy,
                .gpu_state_get = a5xx_gpu_state_get,
                .gpu_state_put = a5xx_gpu_state_put,
                .create_address_space = adreno_iommu_create_address_space,
+               .get_rptr = a5xx_get_rptr,
        },
        .get_timestamp = a5xx_get_timestamp,
  };
@@@ -1512,14 -1576,12 +1576,12 @@@ struct msm_gpu *a5xx_gpu_init(struct dr
        gpu = &adreno_gpu->base;
  
        adreno_gpu->registers = a5xx_registers;
-       adreno_gpu->reg_offsets = a5xx_register_offsets;
  
        a5xx_gpu->lm_leakage = 0x4E001A;
  
        check_speed_bin(&pdev->dev);
  
-       /* Restricting nr_rings to 1 to temporarily disable preemption */
-       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
        if (ret) {
                a5xx_destroy(&(a5xx_gpu->base.base));
                return ERR_PTR(ret);
index e1c7bcd1b1eb7bcdaec33785a5eb0b33f57e4142,ab1e9eb619e0465feda836b51d54f75726738a6b..491fee410dafe0cfd9f6a74ac0f63551fee6f724
@@@ -11,6 -11,7 +11,7 @@@
  #include "a6xx_gpu.h"
  #include "a6xx_gmu.xml.h"
  #include "msm_gem.h"
+ #include "msm_gpu_trace.h"
  #include "msm_mmu.h"
  
  static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
@@@ -124,6 -125,8 +125,8 @@@ void a6xx_gmu_set_freq(struct msm_gpu *
        gmu->current_perf_index = perf_index;
        gmu->freq = gmu->gpu_freqs[perf_index];
  
+       trace_msm_gmu_freq_change(gmu->freq, perf_index);
        /*
         * This can get called from devfreq while the hardware is idle. Don't
         * bring up the power if it isn't already active
@@@ -608,7 -611,7 +611,7 @@@ static void a6xx_gmu_power_config(struc
                gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
                        A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
                        A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
 -              /* Fall through */
 +              fallthrough;
        case GMU_IDLE_STATE_SPTP:
                gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
                        GMU_PWR_COL_HYST);
index 66a95e22b7b3d523b5dca63bfc12595707b98c4d,8915882e444493d187654f531aaa15a1827abb58..948f3656c20cadbb33597971771eb2f7fd8097cd
@@@ -51,9 -51,20 +51,20 @@@ bool a6xx_idle(struct msm_gpu *gpu, str
  
  static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
  {
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
        uint32_t wptr;
        unsigned long flags;
  
+       /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
+       if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
+               struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+               OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+               OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
+               OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
+       }
        spin_lock_irqsave(&ring->lock, flags);
  
        /* Copy the shadow to the actual register */
@@@ -81,8 -92,50 +92,50 @@@ static void get_stats_counter(struct ms
        OUT_RING(ring, upper_32_bits(iova));
  }
  
- static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-       struct msm_file_private *ctx)
+ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
+               struct msm_ringbuffer *ring, struct msm_file_private *ctx)
+ {
+       phys_addr_t ttbr;
+       u32 asid;
+       u64 memptr = rbmemptr(ring, ttbr0);
+       if (ctx == a6xx_gpu->cur_ctx)
+               return;
+       if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+               return;
+       /* Execute the table update */
+       OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
+       OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
+       OUT_RING(ring,
+               CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
+               CP_SMMU_TABLE_UPDATE_1_ASID(asid));
+       OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
+       OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
+       /*
+        * Write the new TTBR0 to the memstore. This is good for debugging.
+        */
+       OUT_PKT7(ring, CP_MEM_WRITE, 4);
+       OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
+       OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
+       OUT_RING(ring, lower_32_bits(ttbr));
+       OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
+       /*
+        * And finally, trigger a uche flush to be sure there isn't anything
+        * lingering in that part of the GPU
+        */
+       OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+       OUT_RING(ring, 0x31);
+       a6xx_gpu->cur_ctx = ctx;
+ }
+ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
  {
        unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
        struct msm_drm_private *priv = gpu->dev->dev_private;
        struct msm_ringbuffer *ring = submit->ring;
        unsigned int i;
  
+       a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
        get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
                rbmemptr_stats(ring, index, cpcycles_start));
  
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
 -                      /* fall-thru */
 +                      fallthrough;
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
@@@ -464,6 -519,30 +519,30 @@@ static int a6xx_cp_init(struct msm_gpu 
        return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
  }
  
+ static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+               struct drm_gem_object *obj)
+ {
+       u32 *buf = msm_gem_get_vaddr_active(obj);
+       if (IS_ERR(buf))
+               return;
+       /*
+        * If the lowest nibble is 0xa that is an indication that this microcode
+        * has been patched. The actual version is in dword [3] but we only care
+        * about the patchlevel which is the lowest nibble of dword [3]
+        *
+        * Otherwise check that the firmware is greater than or equal to 1.90
+        * which was the first version that had this fix built in
+        */
+       if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+               a6xx_gpu->has_whereami = true;
+       else if ((buf[0] & 0xfff) > 0x190)
+               a6xx_gpu->has_whereami = true;
+       msm_gem_put_vaddr(obj);
+ }
  static int a6xx_ucode_init(struct msm_gpu *gpu)
  {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
                }
  
                msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
+               a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
        }
  
        gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@@ -699,12 -779,43 +779,43 @@@ static int a6xx_hw_init(struct msm_gpu 
        gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
                gpu->rb[0]->iova);
  
-       gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
-               MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+       /* Targets that support extended APRIV can use the RPTR shadow from
+        * hardware but all the other ones need to disable the feature. Targets
+        * that support the WHERE_AM_I opcode can use that instead
+        */
+       if (adreno_gpu->base.hw_apriv)
+               gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
+       else
+               gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
+                       MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+       /*
+        * Expanded APRIV and targets that support WHERE_AM_I both need a
+        * privileged buffer to store the RPTR shadow
+        */
+       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
+               if (!a6xx_gpu->shadow_bo) {
+                       a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
+                               sizeof(u32) * gpu->nr_rings,
+                               MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+                               gpu->aspace, &a6xx_gpu->shadow_bo,
+                               &a6xx_gpu->shadow_iova);
+                       if (IS_ERR(a6xx_gpu->shadow))
+                               return PTR_ERR(a6xx_gpu->shadow);
+               }
+               gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
+                       REG_A6XX_CP_RB_RPTR_ADDR_HI,
+                       shadowptr(a6xx_gpu, gpu->rb[0]));
+       }
  
        /* Always come up on rb 0 */
        a6xx_gpu->cur_ring = gpu->rb[0];
  
+       a6xx_gpu->cur_ctx = NULL;
        /* Enable the SQE_to start the CP engine */
        gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
  
@@@ -911,18 -1022,6 +1022,6 @@@ static irqreturn_t a6xx_irq(struct msm_
        return IRQ_HANDLED;
  }
  
- static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
-               REG_A6XX_CP_RB_RPTR_ADDR_LO),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
-               REG_A6XX_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
- };
  static int a6xx_pm_resume(struct msm_gpu *gpu)
  {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
  
        gpu->needs_hw_init = true;
  
+       trace_msm_gpu_resume(0);
        ret = a6xx_gmu_resume(a6xx_gpu);
        if (ret)
                return ret;
@@@ -945,6 -1046,8 +1046,8 @@@ static int a6xx_pm_suspend(struct msm_g
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
  
+       trace_msm_gpu_suspend(0);
        devfreq_suspend_device(gpu->devfreq.devfreq);
  
        return a6xx_gmu_stop(a6xx_gpu);
@@@ -983,6 -1086,11 +1086,11 @@@ static void a6xx_destroy(struct msm_gp
                drm_gem_object_put(a6xx_gpu->sqe_bo);
        }
  
+       if (a6xx_gpu->shadow_bo) {
+               msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
+               drm_gem_object_put(a6xx_gpu->shadow_bo);
+       }
        a6xx_gmu_remove(a6xx_gpu);
  
        adreno_gpu_cleanup(adreno_gpu);
@@@ -1017,6 -1125,31 +1125,31 @@@ static unsigned long a6xx_gpu_busy(stru
        return (unsigned long)busy_time;
  }
  
+ static struct msm_gem_address_space *
+ a6xx_create_private_address_space(struct msm_gpu *gpu)
+ {
+       struct msm_mmu *mmu;
+       mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
+       if (IS_ERR(mmu))
+               return ERR_CAST(mmu);
+       return msm_gem_address_space_create(mmu,
+               "gpu", 0x100000000ULL, 0x1ffffffffULL);
+ }
+ static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+ {
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+               return a6xx_gpu->shadow[ring->id];
+       return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
+ }
  static const struct adreno_gpu_funcs funcs = {
        .base = {
                .get_param = adreno_get_param,
                .pm_resume = a6xx_pm_resume,
                .recover = a6xx_recover,
                .submit = a6xx_submit,
-               .flush = a6xx_flush,
                .active_ring = a6xx_active_ring,
                .irq = a6xx_irq,
                .destroy = a6xx_destroy,
                .gpu_state_put = a6xx_gpu_state_put,
  #endif
                .create_address_space = adreno_iommu_create_address_space,
+               .create_private_address_space = a6xx_create_private_address_space,
+               .get_rptr = a6xx_get_rptr,
        },
        .get_timestamp = a6xx_get_timestamp,
  };
@@@ -1048,6 -1182,8 +1182,8 @@@ struct msm_gpu *a6xx_gpu_init(struct dr
  {
        struct msm_drm_private *priv = dev->dev_private;
        struct platform_device *pdev = priv->gpu_pdev;
+       struct adreno_platform_config *config = pdev->dev.platform_data;
+       const struct adreno_info *info;
        struct device_node *node;
        struct a6xx_gpu *a6xx_gpu;
        struct adreno_gpu *adreno_gpu;
        gpu = &adreno_gpu->base;
  
        adreno_gpu->registers = NULL;
-       adreno_gpu->reg_offsets = a6xx_register_offsets;
  
-       if (adreno_is_a650(adreno_gpu))
+       /*
+        * We need to know the platform type before calling into adreno_gpu_init
+        * so that the hw_apriv flag can be correctly set. Snoop into the info
+        * and grab the revision number
+        */
+       info = adreno_info(config->rev);
+       if (info && info->revn == 650)
                adreno_gpu->base.hw_apriv = true;
  
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
index e47958c3704abf823752e84ab7843c3053cf9991,14e14caf90f9849a257e63d43cbc97182551882c..ec602113be78cd5647b02d93f1f95368c78db476
@@@ -52,23 -52,16 +52,14 @@@ static void sync_for_device(struct msm_
  {
        struct device *dev = msm_obj->base.dev->dev;
  
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sgtable_for_device(dev, msm_obj->sgt,
-                                           DMA_BIDIRECTIONAL);
-       } else {
-               dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
-       }
 -      dma_map_sg(dev, msm_obj->sgt->sgl,
 -              msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  static void sync_for_cpu(struct msm_gem_object *msm_obj)
  {
        struct device *dev = msm_obj->base.dev->dev;
  
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL);
-       } else {
-               dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
-       }
 -      dma_unmap_sg(dev, msm_obj->sgt->sgl,
 -              msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  /* allocate pages from VRAM carveout, used when no IOMMU: */
@@@ -123,7 -116,7 +114,7 @@@ static struct page **get_pages(struct d
  
                msm_obj->pages = p;
  
 -              msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
 +              msm_obj->sgt = drm_prime_pages_to_sg(obj->dev, p, npages);
                if (IS_ERR(msm_obj->sgt)) {
                        void *ptr = ERR_CAST(msm_obj->sgt);
  
@@@ -750,31 -743,31 +741,31 @@@ int msm_gem_sync_object(struct drm_gem_
        return 0;
  }
  
- void msm_gem_move_to_active(struct drm_gem_object *obj,
-               struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
+ void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
  {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
        WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
-       msm_obj->gpu = gpu;
-       if (exclusive)
-               dma_resv_add_excl_fence(obj->resv, fence);
-       else
-               dma_resv_add_shared_fence(obj->resv, fence);
-       list_del_init(&msm_obj->mm_list);
-       list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+       if (!atomic_fetch_inc(&msm_obj->active_count)) {
+               msm_obj->gpu = gpu;
+               list_del_init(&msm_obj->mm_list);
+               list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+       }
  }
  
- void msm_gem_move_to_inactive(struct drm_gem_object *obj)
+ void msm_gem_active_put(struct drm_gem_object *obj)
  {
-       struct drm_device *dev = obj->dev;
-       struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_drm_private *priv = obj->dev->dev_private;
  
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
  
-       msm_obj->gpu = NULL;
-       list_del_init(&msm_obj->mm_list);
-       list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+       if (!atomic_dec_return(&msm_obj->active_count)) {
+               msm_obj->gpu = NULL;
+               list_del_init(&msm_obj->mm_list);
+               list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+       }
  }
  
  int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@@ -849,11 -842,28 +840,28 @@@ void msm_gem_describe(struct drm_gem_ob
  
                seq_puts(m, "      vmas:");
  
-               list_for_each_entry(vma, &msm_obj->vmas, list)
-                       seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
-                               vma->aspace != NULL ? vma->aspace->name : NULL,
-                               vma->iova, vma->mapped ? "mapped" : "unmapped",
+               list_for_each_entry(vma, &msm_obj->vmas, list) {
+                       const char *name, *comm;
+                       if (vma->aspace) {
+                               struct msm_gem_address_space *aspace = vma->aspace;
+                               struct task_struct *task =
+                                       get_pid_task(aspace->pid, PIDTYPE_PID);
+                               if (task) {
+                                       comm = kstrdup(task->comm, GFP_KERNEL);
+                               } else {
+                                       comm = NULL;
+                               }
+                               name = aspace->name;
+                       } else {
+                               name = comm = NULL;
+                       }
+                       seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
+                               name, comm ? ":" : "", comm ? comm : "",
+                               vma->aspace, vma->iova,
+                               vma->mapped ? "mapped" : "unmapped",
                                vma->inuse);
+                       kfree(comm);
+               }
  
                seq_puts(m, "\n");
        }
index 53a7348476a16ee35155852e77d3bbadbb8aaea8,aab121f4beb76672e35982cc5d8da82a62ce9f97..379496186c7ff4e68cffc3a736a4f340c6188815
@@@ -30,20 -30,21 +30,20 @@@ static int msm_gpummu_map(struct msm_mm
  {
        struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
        unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
 -      struct scatterlist *sg;
 +      struct sg_dma_page_iter dma_iter;
        unsigned prot_bits = 0;
 -      unsigned i, j;
  
        if (prot & IOMMU_WRITE)
                prot_bits |= 1;
        if (prot & IOMMU_READ)
                prot_bits |= 2;
  
 -      for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 -              dma_addr_t addr = sg->dma_address;
 -              for (j = 0; j < sg->length / GPUMMU_PAGE_SIZE; j++, idx++) {
 -                      gpummu->table[idx] = addr | prot_bits;
 -                      addr += GPUMMU_PAGE_SIZE;
 -              }
 +      for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
 +              dma_addr_t addr = sg_page_iter_dma_address(&dma_iter);
 +              int i;
 +
 +              for (i = 0; i < PAGE_SIZE; i += GPUMMU_PAGE_SIZE)
 +                      gpummu->table[idx++] = (addr + i) | prot_bits;
        }
  
        /* we can improve by deferring flush for multiple map() */
@@@ -101,7 -102,7 +101,7 @@@ struct msm_mmu *msm_gpummu_new(struct d
        }
  
        gpummu->gpu = gpu;
-       msm_mmu_init(&gpummu->base, dev, &funcs);
+       msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
  
        return &gpummu->base;
  }
index 6c31e65834c62e1c0309ea377f348c58ca5e3ffa,697cc0a059d63d3c4042f231454bc16a39433486..3a83ffdb3b90d2985220df7c3b2dcd7622c168e8
   * Author: Rob Clark <[email protected]>
   */
  
+ #include <linux/adreno-smmu-priv.h>
+ #include <linux/io-pgtable.h>
  #include "msm_drv.h"
  #include "msm_mmu.h"
  
  struct msm_iommu {
        struct msm_mmu base;
        struct iommu_domain *domain;
+       atomic_t pagetables;
  };
  #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
  
+ struct msm_iommu_pagetable {
+       struct msm_mmu base;
+       struct msm_mmu *parent;
+       struct io_pgtable_ops *pgtbl_ops;
+       phys_addr_t ttbr;
+       u32 asid;
+ };
+ static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
+ {
+       return container_of(mmu, struct msm_iommu_pagetable, base);
+ }
+ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
+               size_t size)
+ {
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+       size_t unmapped = 0;
+       /* Unmap the block one page at a time */
+       while (size) {
+               unmapped += ops->unmap(ops, iova, 4096, NULL);
+               iova += 4096;
+               size -= 4096;
+       }
+       iommu_flush_tlb_all(to_msm_iommu(pagetable->parent)->domain);
+       return (unmapped == size) ? 0 : -EINVAL;
+ }
+ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
+               struct sg_table *sgt, size_t len, int prot)
+ {
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+       struct scatterlist *sg;
+       size_t mapped = 0;
+       u64 addr = iova;
+       unsigned int i;
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               size_t size = sg->length;
+               phys_addr_t phys = sg_phys(sg);
+               /* Map the block one page at a time */
+               while (size) {
+                       if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
+                               msm_iommu_pagetable_unmap(mmu, iova, mapped);
+                               return -EINVAL;
+                       }
+                       phys += 4096;
+                       addr += 4096;
+                       size -= 4096;
+                       mapped += 4096;
+               }
+       }
+       return 0;
+ }
+ static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
+ {
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
+       struct adreno_smmu_priv *adreno_smmu =
+               dev_get_drvdata(pagetable->parent->dev);
+       /*
+        * If this is the last attached pagetable for the parent,
+        * disable TTBR0 in the arm-smmu driver
+        */
+       if (atomic_dec_return(&iommu->pagetables) == 0)
+               adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
+       free_io_pgtable_ops(pagetable->pgtbl_ops);
+       kfree(pagetable);
+ }
+ int msm_iommu_pagetable_params(struct msm_mmu *mmu,
+               phys_addr_t *ttbr, int *asid)
+ {
+       struct msm_iommu_pagetable *pagetable;
+       if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+               return -EINVAL;
+       pagetable = to_pagetable(mmu);
+       if (ttbr)
+               *ttbr = pagetable->ttbr;
+       if (asid)
+               *asid = pagetable->asid;
+       return 0;
+ }
+ static const struct msm_mmu_funcs pagetable_funcs = {
+               .map = msm_iommu_pagetable_map,
+               .unmap = msm_iommu_pagetable_unmap,
+               .destroy = msm_iommu_pagetable_destroy,
+ };
+ static void msm_iommu_tlb_flush_all(void *cookie)
+ {
+ }
+ static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+               size_t granule, void *cookie)
+ {
+ }
+ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+               unsigned long iova, size_t granule, void *cookie)
+ {
+ }
+ static const struct iommu_flush_ops null_tlb_ops = {
+       .tlb_flush_all = msm_iommu_tlb_flush_all,
+       .tlb_flush_walk = msm_iommu_tlb_flush_walk,
+       .tlb_flush_leaf = msm_iommu_tlb_flush_walk,
+       .tlb_add_page = msm_iommu_tlb_add_page,
+ };
+ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+ {
+       struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
+       struct msm_iommu *iommu = to_msm_iommu(parent);
+       struct msm_iommu_pagetable *pagetable;
+       const struct io_pgtable_cfg *ttbr1_cfg = NULL;
+       struct io_pgtable_cfg ttbr0_cfg;
+       int ret;
+       /* Get the pagetable configuration from the domain */
+       if (adreno_smmu->cookie)
+               ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+       if (!ttbr1_cfg)
+               return ERR_PTR(-ENODEV);
+       pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
+       if (!pagetable)
+               return ERR_PTR(-ENOMEM);
+       msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
+               MSM_MMU_IOMMU_PAGETABLE);
+       /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
+       ttbr0_cfg = *ttbr1_cfg;
+       /* The incoming cfg will have the TTBR1 quirk enabled */
+       ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
+       ttbr0_cfg.tlb = &null_tlb_ops;
+       pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
+               &ttbr0_cfg, iommu->domain);
+       if (!pagetable->pgtbl_ops) {
+               kfree(pagetable);
+               return ERR_PTR(-ENOMEM);
+       }
+       /*
+        * If this is the first pagetable that we've allocated, send it back to
+        * the arm-smmu driver as a trigger to set up TTBR0
+        */
+       if (atomic_inc_return(&iommu->pagetables) == 1) {
+               ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
+               if (ret) {
+                       free_io_pgtable_ops(pagetable->pgtbl_ops);
+                       kfree(pagetable);
+                       return ERR_PTR(ret);
+               }
+       }
+       /* Needed later for TLB flush */
+       pagetable->parent = parent;
+       pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
+       /*
+        * TODO we would like each set of page tables to have a unique ASID
+        * to optimize TLB invalidation.  But iommu_flush_tlb_all() will
+        * end up flushing the ASID used for TTBR1 pagetables, which is not
+        * what we want.  So for now just use the same ASID as TTBR1.
+        */
+       pagetable->asid = 0;
+       return &pagetable->base;
+ }
  static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
                unsigned long iova, int flags, void *arg)
  {
@@@ -36,7 -231,11 +231,11 @@@ static int msm_iommu_map(struct msm_mm
        struct msm_iommu *iommu = to_msm_iommu(mmu);
        size_t ret;
  
 -      ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
+       /* The arm-smmu driver expects the addresses to be sign extended */
+       if (iova & BIT_ULL(48))
+               iova |= GENMASK_ULL(63, 49);
 +      ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
        WARN_ON(!ret);
  
        return (ret == len) ? 0 : -EINVAL;
@@@ -46,6 -245,9 +245,9 @@@ static int msm_iommu_unmap(struct msm_m
  {
        struct msm_iommu *iommu = to_msm_iommu(mmu);
  
+       if (iova & BIT_ULL(48))
+               iova |= GENMASK_ULL(63, 49);
        iommu_unmap(iommu->domain, iova, len);
  
        return 0;
@@@ -78,9 -280,11 +280,11 @@@ struct msm_mmu *msm_iommu_new(struct de
                return ERR_PTR(-ENOMEM);
  
        iommu->domain = domain;
-       msm_mmu_init(&iommu->base, dev, &funcs);
+       msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
        iommu_set_fault_handler(domain, msm_fault_handler, iommu);
  
+       atomic_set(&iommu->pagetables, 0);
        ret = iommu_attach_device(iommu->domain, dev);
        if (ret) {
                kfree(iommu);
index c9f2851904d0ad5bfa2dcdb22b119f11f5a0efc4,a31d7aebb8b856a8752fb34f0eb4b22e019249cb..da53aebb7230b980ba23e9c87d361d9b6d6906df
@@@ -26,9 -26,6 +26,9 @@@
  #include <linux/delay.h>
  #include <linux/i2c.h>
  #include <linux/types.h>
 +#include <drm/drm_connector.h>
 +
 +struct drm_device;
  
  /*
   * Unless otherwise noted, all values are from the DP 1.1a spec.  Note that
  # define DP_DS_PORT_TYPE_DP_DUALMODE        5
  # define DP_DS_PORT_TYPE_WIRELESS           6
  # define DP_DS_PORT_HPD                           (1 << 3)
 +# define DP_DS_NON_EDID_MASK              (0xf << 4)
 +# define DP_DS_NON_EDID_720x480i_60       (1 << 4)
 +# define DP_DS_NON_EDID_720x480i_50       (2 << 4)
 +# define DP_DS_NON_EDID_1920x1080i_60     (3 << 4)
 +# define DP_DS_NON_EDID_1920x1080i_50     (4 << 4)
 +# define DP_DS_NON_EDID_1280x720_60       (5 << 4)
 +# define DP_DS_NON_EDID_1280x720_50       (7 << 4)
  /* offset 1 for VGA is maximum megapixels per second / 8 */
 -/* offset 2 */
 +/* offset 1 for DVI/HDMI is maximum TMDS clock in Mbps / 2.5 */
 +/* offset 2 for VGA/DVI/HDMI */
  # define DP_DS_MAX_BPC_MASK               (3 << 0)
  # define DP_DS_8BPC                       0
  # define DP_DS_10BPC                      1
  # define DP_DS_12BPC                      2
  # define DP_DS_16BPC                      3
 +/* offset 3 for DVI */
 +# define DP_DS_DVI_DUAL_LINK              (1 << 1)
 +# define DP_DS_DVI_HIGH_COLOR_DEPTH       (1 << 2)
 +/* offset 3 for HDMI */
 +# define DP_DS_HDMI_FRAME_SEQ_TO_FRAME_PACK (1 << 0)
 +# define DP_DS_HDMI_YCBCR422_PASS_THROUGH   (1 << 1)
 +# define DP_DS_HDMI_YCBCR420_PASS_THROUGH   (1 << 2)
 +# define DP_DS_HDMI_YCBCR444_TO_422_CONV    (1 << 3)
 +# define DP_DS_HDMI_YCBCR444_TO_420_CONV    (1 << 4)
  
  #define DP_MAX_DOWNSTREAM_PORTS                   0x10
  
  #define DP_CEC_TX_MESSAGE_BUFFER               0x3020
  #define DP_CEC_MESSAGE_BUFFER_LENGTH             0x10
  
 +#define DP_PROTOCOL_CONVERTER_CONTROL_0               0x3050 /* DP 1.3 */
 +# define DP_HDMI_DVI_OUTPUT_CONFIG            (1 << 0) /* DP 1.3 */
 +#define DP_PROTOCOL_CONVERTER_CONTROL_1               0x3051 /* DP 1.3 */
 +# define DP_CONVERSION_TO_YCBCR420_ENABLE     (1 << 0) /* DP 1.3 */
 +# define DP_HDMI_EDID_PROCESSING_DISABLE      (1 << 1) /* DP 1.4 */
 +# define DP_HDMI_AUTONOMOUS_SCRAMBLING_DISABLE        (1 << 2) /* DP 1.4 */
 +# define DP_HDMI_FORCE_SCRAMBLING             (1 << 3) /* DP 1.4 */
 +#define DP_PROTOCOL_CONVERTER_CONTROL_2               0x3052 /* DP 1.3 */
 +# define DP_CONVERSION_TO_YCBCR422_ENABLE     (1 << 0) /* DP 1.3 */
 +
  #define DP_AUX_HDCP_BKSV              0x68000
  #define DP_AUX_HDCP_RI_PRIME          0x68005
  #define DP_AUX_HDCP_AKSV              0x68007
  #define DP_POWER_DOWN_PHY             0x25
  #define DP_SINK_EVENT_NOTIFY          0x30
  #define DP_QUERY_STREAM_ENC_STATUS    0x38
 +#define  DP_QUERY_STREAM_ENC_STATUS_STATE_NO_EXIST    0
 +#define  DP_QUERY_STREAM_ENC_STATUS_STATE_INACTIVE    1
 +#define  DP_QUERY_STREAM_ENC_STATUS_STATE_ACTIVE      2
  
  /* DP 1.2 MST sideband reply types */
  #define DP_SIDEBAND_REPLY_ACK         0x00
  #define DP_MST_PHYSICAL_PORT_0 0
  #define DP_MST_LOGICAL_PORT_0 8
  
+ #define DP_LINK_CONSTANT_N_VALUE 0x8000
  #define DP_LINK_STATUS_SIZE      6
  bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count);
@@@ -1639,60 -1607,19 +1640,60 @@@ static inline ssize_t drm_dp_dpcd_write
        return drm_dp_dpcd_write(aux, offset, &value, 1);
  }
  
 +int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
 +                        u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 +
  int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
                                 u8 status[DP_LINK_STATUS_SIZE]);
  
  bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
                                    u8 real_edid_checksum);
  
 -int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 -                              const u8 port_cap[4]);
 +int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
 +                              const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                              u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]);
 +bool drm_dp_downstream_is_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                             const u8 port_cap[4], u8 type);
 +bool drm_dp_downstream_is_tmds(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                             const u8 port_cap[4],
 +                             const struct edid *edid);
 +int drm_dp_downstream_max_dotclock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                 const u8 port_cap[4]);
 +int drm_dp_downstream_max_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                   const u8 port_cap[4],
 +                                   const struct edid *edid);
 +int drm_dp_downstream_min_tmds_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                   const u8 port_cap[4],
 +                                   const struct edid *edid);
  int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 -                            const u8 port_cap[4]);
 +                            const u8 port_cap[4],
 +                            const struct edid *edid);
 +bool drm_dp_downstream_420_passthrough(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                     const u8 port_cap[4]);
 +bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                           const u8 port_cap[4]);
 +struct drm_display_mode *drm_dp_downstream_mode(struct drm_device *dev,
 +                                              const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                                              const u8 port_cap[4]);
  int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
 -void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 -                           const u8 port_cap[4], struct drm_dp_aux *aux);
 +void drm_dp_downstream_debug(struct seq_file *m,
 +                           const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                           const u8 port_cap[4],
 +                           const struct edid *edid,
 +                           struct drm_dp_aux *aux);
 +enum drm_mode_subconnector
 +drm_dp_subconnector_type(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                       const u8 port_cap[4]);
 +void drm_dp_set_subconnector_property(struct drm_connector *connector,
 +                                    enum drm_connector_status status,
 +                                    const u8 *dpcd,
 +                                    const u8 port_cap[4]);
 +
 +struct drm_dp_desc;
 +bool drm_dp_read_sink_count_cap(struct drm_connector *connector,
 +                              const u8 dpcd[DP_RECEIVER_CAP_SIZE],
 +                              const struct drm_dp_desc *desc);
 +int drm_dp_read_sink_count(struct drm_dp_aux *aux);
  
  void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
  void drm_dp_aux_init(struct drm_dp_aux *aux);
@@@ -1752,8 -1679,7 +1753,8 @@@ enum drm_dp_quirk 
         * @DP_DPCD_QUIRK_NO_SINK_COUNT:
         *
         * The device does not set SINK_COUNT to a non-zero value.
 -       * The driver should ignore SINK_COUNT during detection.
 +       * The driver should ignore SINK_COUNT during detection. Note that
 +       * drm_dp_read_sink_count_cap() automatically checks for this quirk.
         */
        DP_DPCD_QUIRK_NO_SINK_COUNT,
        /**
This page took 0.209223 seconds and 4 git commands to generate.