A couple of fixes from the previous pull request as well as gl3 support.
There is one drm core change, an export of a previously private function.
Take 2 implementing screen targets, this time with the fbdev code adjusted
accordingly.
Also there is an implementation of register-driven command buffers, that
overrides the FIFO ring for command processing. It's needed for our upcoming
hardware revision.
* 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux: (35 commits)
drm/vmwgfx: Fix copyright headers
drm/vmwgfx: Add DX query support. Various fixes.
drm/vmwgfx: Add command parser support for a couple of DX commands
drm/vmwgfx: Command parser fixes for DX
drm/vmwgfx: Initial DX support
drm/vmwgfx: Update device includes for DX device functionality
drm: export the DRM permission check code
drm/vmwgfx: Fix crash when unloading vmwgfx v2
drm/vmwgfx: Fix framebuffer creation on older hardware
drm/vmwgfx: Fixed topology boundary checking for Screen Targets
drm/vmwgfx: Fix an uninitialized value
drm/vmwgfx: Fix compiler warning with 32-bit dma_addr_t
drm/vmwgfx: Kill a bunch of sparse warnings
drm/vmwgfx: Fix kms preferred mode sorting
drm/vmwgfx: Reinstate the legacy display system dirty callback
drm/vmwgfx: Implement fbdev on kms v2
drm/vmwgfx: Add a kernel interface to create a framebuffer v2
drm/vmwgfx: Avoid cmdbuf alloc sleeping if !TASK_RUNNING
drm/vmwgfx: Convert screen targets to new helpers v3
drm/vmwgfx: Convert screen objects to the new helpers
...
/**************************************************************************
*
- * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
+ #include "vmwgfx_binding.h"
#include "ttm/ttm_placement.h"
struct vmw_user_context {
struct ttm_base_object base;
struct vmw_resource res;
- struct vmw_ctx_binding_state cbs;
+ struct vmw_ctx_binding_state *cbs;
struct vmw_cmdbuf_res_manager *man;
+ struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
+ spinlock_t cotable_lock;
+ struct vmw_dma_buffer *dx_query_mob;
};
-
-
- typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
-
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_context_base_to_res(struct ttm_base_object *base);
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
- static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
- static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
- bool rebind);
- static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
- static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
- static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
+ static int vmw_dx_context_create(struct vmw_resource *res);
+ static int vmw_dx_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf);
+ static int vmw_dx_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf);
+ static int vmw_dx_context_destroy(struct vmw_resource *res);
+
static uint64_t vmw_user_context_size;
static const struct vmw_user_resource_conv user_context_conv = {
.unbind = vmw_gb_context_unbind
};
- static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
- [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
- [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
- [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
+ static const struct vmw_res_func vmw_dx_context_func = {
+ .res_type = vmw_res_dx_context,
+ .needs_backup = true,
+ .may_evict = true,
+ .type_name = "dx contexts",
+ .backup_placement = &vmw_mob_placement,
+ .create = vmw_dx_context_create,
+ .destroy = vmw_dx_context_destroy,
+ .bind = vmw_dx_context_bind,
+ .unbind = vmw_dx_context_unbind
+ };
/**
* Context management:
*/
+ static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
+ {
+ struct vmw_resource *res;
+ int i;
+
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ spin_lock(&uctx->cotable_lock);
+ res = uctx->cotables[i];
+ uctx->cotables[i] = NULL;
+ spin_unlock(&uctx->cotable_lock);
+
+ if (res)
+ vmw_resource_unreference(&res);
+ }
+ }
+
static void vmw_hw_context_destroy(struct vmw_resource *res)
{
struct vmw_user_context *uctx =
} *cmd;
- if (res->func->destroy == vmw_gb_context_destroy) {
+ if (res->func->destroy == vmw_gb_context_destroy ||
+ res->func->destroy == vmw_dx_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
vmw_cmdbuf_res_man_destroy(uctx->man);
mutex_lock(&dev_priv->binding_mutex);
- (void) vmw_context_binding_state_kill(&uctx->cbs);
- (void) vmw_gb_context_destroy(res);
+ vmw_binding_state_kill(uctx->cbs);
+ (void) res->func->destroy(res);
mutex_unlock(&dev_priv->binding_mutex);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
+ vmw_context_cotables_unref(uctx);
return;
}
return;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
+ cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
}
static int vmw_gb_context_init(struct vmw_private *dev_priv,
+ bool dx,
struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
+ void (*res_free)(struct vmw_resource *res))
{
- int ret;
+ int ret, i;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
+ res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+ SVGA3D_CONTEXT_DATA_SIZE);
ret = vmw_resource_init(dev_priv, res, true,
- res_free, &vmw_gb_context_func);
- res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
+ res_free,
+ dx ? &vmw_dx_context_func :
+ &vmw_gb_context_func);
if (unlikely(ret != 0))
goto out_err;
if (dev_priv->has_mob) {
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
- if (unlikely(IS_ERR(uctx->man))) {
+ if (IS_ERR(uctx->man)) {
ret = PTR_ERR(uctx->man);
uctx->man = NULL;
goto out_err;
}
}
- memset(&uctx->cbs, 0, sizeof(uctx->cbs));
- INIT_LIST_HEAD(&uctx->cbs.list);
+ uctx->cbs = vmw_binding_state_alloc(dev_priv);
+ if (IS_ERR(uctx->cbs)) {
+ ret = PTR_ERR(uctx->cbs);
+ goto out_err;
+ }
+
+ spin_lock_init(&uctx->cotable_lock);
+
+ if (dx) {
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
+ &uctx->res, i);
+ if (unlikely(uctx->cotables[i] == NULL)) {
+ ret = -ENOMEM;
+ goto out_cotables;
+ }
+ }
+ }
+
+
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
+ out_cotables:
+ vmw_context_cotables_unref(uctx);
out_err:
if (res_free)
res_free(res);
static int vmw_context_init(struct vmw_private *dev_priv,
struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
+ void (*res_free)(struct vmw_resource *res),
+ bool dx)
{
int ret;
} *cmd;
if (dev_priv->has_mob)
- return vmw_gb_context_init(dev_priv, res, res_free);
+ return vmw_gb_context_init(dev_priv, dx, res, res_free);
ret = vmw_resource_init(dev_priv, res, false,
res_free, &vmw_legacy_context_func);
return -ENOMEM;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
+ cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy);
return 0;
return ret;
}
- struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
- {
- struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
- int ret;
-
- if (unlikely(res == NULL))
- return NULL;
-
- ret = vmw_context_init(dev_priv, res, NULL);
-
- return (ret == 0) ? res : NULL;
- }
+ /*
+ * GB context.
+ */
static int vmw_gb_context_create(struct vmw_resource *res)
{
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
return 0;
"binding.\n");
return -ENOMEM;
}
-
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_state_scrub(&uctx->cbs);
+ vmw_binding_state_scrub(uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
if (dev_priv->query_cid == res->id)
dev_priv->query_cid_valid = false;
vmw_resource_release_id(res);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
+
+ return 0;
+ }
+
+ /*
+ * DX context.
+ */
+
+ static int vmw_dx_context_create(struct vmw_resource *res)
+ {
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDefineContext body;
+ } *cmd;
+
+ if (likely(res->id != -1))
+ return 0;
+
+ ret = vmw_resource_alloc_id(res);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed to allocate a context id.\n");
+ goto out_no_id;
+ }
+
+ if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
+ ret = -EBUSY;
+ goto out_no_fifo;
+ }
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "creation.\n");
+ ret = -ENOMEM;
+ goto out_no_fifo;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ vmw_fifo_resource_inc(dev_priv);
+
+ return 0;
+
+ out_no_fifo:
+ vmw_resource_release_id(res);
+ out_no_id:
+ return ret;
+ }
+
+ static int vmw_dx_context_bind(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+ {
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindContext body;
+ } *cmd;
+ struct ttm_buffer_object *bo = val_buf->bo;
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "binding.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ cmd->body.mobid = bo->mem.start;
+ cmd->body.validContents = res->backup_dirty;
+ res->backup_dirty = false;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+
+ return 0;
+ }
+
+ /**
+ * vmw_dx_context_scrub_cotables - Scrub all bindings and
+ * cotables from a context
+ *
+ * @ctx: Pointer to the context resource
+ * @readback: Whether to save the otable contents on scrubbing.
+ *
+ * COtables must be unbound before their context, but unbinding requires
+ * the backup buffer being reserved, whereas scrubbing does not.
+ * This function scrubs all cotables of a context, potentially reading back
+ * the contents into their backup buffers. However, scrubbing cotables
+ * also makes the device context invalid, so scrub all bindings first so
+ * that doesn't have to be done later with an invalid context.
+ */
+ void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
+ bool readback)
+ {
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ int i;
+
+ vmw_binding_state_scrub(uctx->cbs);
+ for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
+ struct vmw_resource *res;
+
+ /* Avoid racing with ongoing cotable destruction. */
+ spin_lock(&uctx->cotable_lock);
+ res = uctx->cotables[vmw_cotable_scrub_order[i]];
+ if (res)
+ res = vmw_resource_reference_unless_doomed(res);
+ spin_unlock(&uctx->cotable_lock);
+ if (!res)
+ continue;
+
+ WARN_ON(vmw_cotable_scrub(res, readback));
+ vmw_resource_unreference(&res);
+ }
+ }
+
+ static int vmw_dx_context_unbind(struct vmw_resource *res,
+ bool readback,
+ struct ttm_validate_buffer *val_buf)
+ {
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct ttm_buffer_object *bo = val_buf->bo;
+ struct vmw_fence_obj *fence;
+ struct vmw_user_context *uctx =
+ container_of(res, struct vmw_user_context, res);
+
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXReadbackContext body;
+ } *cmd1;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXBindContext body;
+ } *cmd2;
+ uint32_t submit_size;
+ uint8_t *cmd;
+
+
+ BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
+
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_dx_context_scrub_cotables(res, readback);
+
+ if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
+ readback) {
+ WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
+ if (vmw_query_readback_all(uctx->dx_query_mob))
+ DRM_ERROR("Failed to read back query states\n");
+ }
+
+ submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
+
+ cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "unbinding.\n");
+ mutex_unlock(&dev_priv->binding_mutex);
+ return -ENOMEM;
+ }
+
+ cmd2 = (void *) cmd;
+ if (readback) {
+ cmd1 = (void *) cmd;
+ cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
+ cmd1->header.size = sizeof(cmd1->body);
+ cmd1->body.cid = res->id;
+ cmd2 = (void *) (&cmd1[1]);
+ }
+ cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
+ cmd2->header.size = sizeof(cmd2->body);
+ cmd2->body.cid = res->id;
+ cmd2->body.mobid = SVGA3D_INVALID_ID;
+
+ vmw_fifo_commit(dev_priv, submit_size);
+ mutex_unlock(&dev_priv->binding_mutex);
+
+ /*
+ * Create a fence object and fence the backup buffer.
+ */
+
+ (void) vmw_execbuf_fence_commands(NULL, dev_priv,
+ &fence, NULL);
+
+ vmw_fence_single_bo(bo, fence);
+
+ if (likely(fence != NULL))
+ vmw_fence_obj_unreference(&fence);
+
+ return 0;
+ }
+
+ static int vmw_dx_context_destroy(struct vmw_resource *res)
+ {
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDXDestroyContext body;
+ } *cmd;
+
+ if (likely(res->id == -1))
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for context "
+ "destruction.\n");
+ return -ENOMEM;
+ }
+
+ cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.cid = res->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (dev_priv->query_cid == res->id)
+ dev_priv->query_cid_valid = false;
+ vmw_resource_release_id(res);
+ vmw_fifo_resource_dec(dev_priv);
return 0;
}
container_of(res, struct vmw_user_context, res);
struct vmw_private *dev_priv = res->dev_priv;
+ if (ctx->cbs)
+ vmw_binding_state_free(ctx->cbs);
+
+ (void) vmw_context_bind_dx_query(res, NULL);
+
ttm_base_object_kfree(ctx, base);
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
}
- int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ static int vmw_context_define(struct drm_device *dev, void *data,
+ struct drm_file *file_priv, bool dx)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_context *ctx;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
+ if (!dev_priv->has_dx && dx) {
+ DRM_ERROR("DX contexts not supported by device.\n");
+ return -EINVAL;
+ }
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* From here on, the destructor takes over resource freeing.
*/
- ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
+ ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
if (unlikely(ret != 0))
goto out_unlock;
out_unlock:
ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
-
- }
-
- /**
- * vmw_context_scrub_shader - scrub a shader binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
- static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
- {
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShader body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
- return -ENOMEM;
- }
-
- cmd->header.id = SVGA_3D_CMD_SET_SHADER;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.cid = bi->ctx->id;
- cmd->body.type = bi->i1.shader_type;
- cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
- }
-
- /**
- * vmw_context_scrub_render_target - scrub a render target binding
- * from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- */
- static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
- bool rebind)
- {
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetRenderTarget body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for render target "
- "unbinding.\n");
- return -ENOMEM;
- }
-
- cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.cid = bi->ctx->id;
- cmd->body.type = bi->i1.rt_type;
- cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- cmd->body.target.face = 0;
- cmd->body.target.mipmap = 0;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
}
- /**
- * vmw_context_scrub_texture - scrub a texture binding from a context.
- *
- * @bi: single binding information.
- * @rebind: Whether to issue a bind instead of scrub command.
- *
- * TODO: Possibly complement this function with a function that takes
- * a list of texture bindings and combines them to a single command.
- */
- static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
- bool rebind)
- {
- struct vmw_private *dev_priv = bi->ctx->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- struct {
- SVGA3dCmdSetTextureState c;
- SVGA3dTextureState s1;
- } body;
- } *cmd;
-
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for texture "
- "unbinding.\n");
- return -ENOMEM;
- }
-
-
- cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.c.cid = bi->ctx->id;
- cmd->body.s1.stage = bi->i1.texture_stage;
- cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
- cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
-
- return 0;
- }
-
- /**
- * vmw_context_binding_drop: Stop tracking a context binding
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Stops tracking a context binding, and re-initializes its storage.
- * Typically used when the context binding is replaced with a binding to
- * another (or the same, for that matter) resource.
- */
- static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
+ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- list_del(&cb->ctx_list);
- if (!list_empty(&cb->res_list))
- list_del(&cb->res_list);
- cb->bi.ctx = NULL;
+ return vmw_context_define(dev, data, file_priv, false);
}
- /**
- * vmw_context_binding_add: Start tracking a context binding
- *
- * @cbs: Pointer to the context binding state tracker.
- * @bi: Information about the binding to track.
- *
- * Performs basic checks on the binding to make sure arguments are within
- * bounds and then starts tracking the binding in the context binding
- * state structure @cbs.
- */
- int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *bi)
+ int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct vmw_ctx_binding *loc;
-
- switch (bi->bt) {
- case vmw_ctx_binding_rt:
- if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
- DRM_ERROR("Illegal render target type %u.\n",
- (unsigned) bi->i1.rt_type);
- return -EINVAL;
- }
- loc = &cbs->render_targets[bi->i1.rt_type];
- break;
- case vmw_ctx_binding_tex:
- if (unlikely((unsigned)bi->i1.texture_stage >=
- SVGA3D_NUM_TEXTURE_UNITS)) {
- DRM_ERROR("Illegal texture/sampler unit %u.\n",
- (unsigned) bi->i1.texture_stage);
- return -EINVAL;
- }
- loc = &cbs->texture_units[bi->i1.texture_stage];
- break;
- case vmw_ctx_binding_shader:
- if (unlikely((unsigned)bi->i1.shader_type >=
- SVGA3D_SHADERTYPE_MAX)) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) bi->i1.shader_type);
- return -EINVAL;
- }
- loc = &cbs->shaders[bi->i1.shader_type];
- break;
+ union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
+ struct drm_vmw_context_arg *rep = &arg->rep;
+
+ switch (arg->req) {
+ case drm_vmw_context_legacy:
+ return vmw_context_define(dev, rep, file_priv, false);
+ case drm_vmw_context_dx:
+ return vmw_context_define(dev, rep, file_priv, true);
default:
- BUG();
- }
-
- if (loc->bi.ctx != NULL)
- vmw_context_binding_drop(loc);
-
- loc->bi = *bi;
- loc->bi.scrubbed = false;
- list_add_tail(&loc->ctx_list, &cbs->list);
- INIT_LIST_HEAD(&loc->res_list);
-
- return 0;
- }
-
- /**
- * vmw_context_binding_transfer: Transfer a context binding tracking entry.
- *
- * @cbs: Pointer to the persistent context binding state tracker.
- * @bi: Information about the binding to track.
- *
- */
- static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
- const struct vmw_ctx_bindinfo *bi)
- {
- struct vmw_ctx_binding *loc;
-
- switch (bi->bt) {
- case vmw_ctx_binding_rt:
- loc = &cbs->render_targets[bi->i1.rt_type];
break;
- case vmw_ctx_binding_tex:
- loc = &cbs->texture_units[bi->i1.texture_stage];
- break;
- case vmw_ctx_binding_shader:
- loc = &cbs->shaders[bi->i1.shader_type];
- break;
- default:
- BUG();
- }
-
- if (loc->bi.ctx != NULL)
- vmw_context_binding_drop(loc);
-
- if (bi->res != NULL) {
- loc->bi = *bi;
- list_add_tail(&loc->ctx_list, &cbs->list);
- list_add_tail(&loc->res_list, &bi->res->binding_head);
}
+ return -EINVAL;
}
/**
- * vmw_context_binding_kill - Kill a binding on the device
- * and stop tracking it.
- *
- * @cb: Pointer to binding tracker storage.
- *
- * Emits FIFO commands to scrub a binding represented by @cb.
- * Then stops tracking the binding and re-initializes its storage.
- */
- static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
- {
- if (!cb->bi.scrubbed) {
- (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
- cb->bi.scrubbed = true;
- }
- vmw_context_binding_drop(cb);
- }
-
- /**
- * vmw_context_binding_state_kill - Kill all bindings associated with a
- * struct vmw_ctx_binding state structure, and re-initialize the structure.
+ * vmw_context_binding_list - Return a list of context bindings
*
- * @cbs: Pointer to the context binding state tracker.
+ * @ctx: The context resource
*
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker. Then re-initializes the whole structure.
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
- static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
+ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry, *next;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
- list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
- vmw_context_binding_kill(entry);
+ return vmw_binding_state_list(uctx->cbs);
}
- /**
- * vmw_context_binding_state_scrub - Scrub all bindings associated with a
- * struct vmw_ctx_binding state structure.
- *
- * @cbs: Pointer to the context binding state tracker.
- *
- * Emits commands to scrub all bindings associated with the
- * context binding state tracker.
- */
- static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+ struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry;
-
- list_for_each_entry(entry, &cbs->list, ctx_list) {
- if (!entry->bi.scrubbed) {
- (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
- entry->bi.scrubbed = true;
- }
- }
+ return container_of(ctx, struct vmw_user_context, res)->man;
}
- /**
- * vmw_context_binding_res_list_kill - Kill all bindings on a
- * resource binding list
- *
- * @head: list head of resource binding list
- *
- * Kills all bindings associated with a specific resource. Typically
- * called before the resource is destroyed.
- */
- void vmw_context_binding_res_list_kill(struct list_head *head)
+ struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
+ SVGACOTableType cotable_type)
{
- struct vmw_ctx_binding *entry, *next;
+ if (cotable_type >= SVGA_COTABLE_DX10_MAX)
+ return ERR_PTR(-EINVAL);
- list_for_each_entry_safe(entry, next, head, res_list)
- vmw_context_binding_kill(entry);
+ return vmw_resource_reference
+ (container_of(ctx, struct vmw_user_context, res)->
+ cotables[cotable_type]);
}
/**
- * vmw_context_binding_res_list_scrub - Scrub all bindings on a
- * resource binding list
+ * vmw_context_binding_state -
+ * Return a pointer to a context binding state structure
*
- * @head: list head of resource binding list
+ * @ctx: The context resource
*
- * Scrub all bindings associated with a specific resource. Typically
- * called before the resource is evicted.
+ * Returns the current state of bindings of the given context. Note that
+ * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
- void vmw_context_binding_res_list_scrub(struct list_head *head)
+ struct vmw_ctx_binding_state *
+ vmw_context_binding_state(struct vmw_resource *ctx)
{
- struct vmw_ctx_binding *entry;
-
- list_for_each_entry(entry, head, res_list) {
- if (!entry->bi.scrubbed) {
- (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
- entry->bi.scrubbed = true;
- }
- }
+ return container_of(ctx, struct vmw_user_context, res)->cbs;
}
/**
- * vmw_context_binding_state_transfer - Commit staged binding info
+ * vmw_context_bind_dx_query -
+ * Sets query MOB for the context. If @mob is NULL, then this function will
+ * remove the association between the MOB and the context. This function
+ * assumes the binding_mutex is held.
*
- * @ctx: Pointer to context to commit the staged binding info to.
- * @from: Staged binding info built during execbuf.
+ * @ctx_res: The context resource
+ * @mob: a reference to the query MOB
*
- * Transfers binding info from a temporary structure to the persistent
- * structure in the context. This can be done once commands
+ * Returns -EINVAL if a MOB has already been set and does not match the one
+ * specified in the parameter. 0 otherwise.
*/
- void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
- struct vmw_ctx_binding_state *from)
+ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
+ struct vmw_dma_buffer *mob)
{
struct vmw_user_context *uctx =
- container_of(ctx, struct vmw_user_context, res);
- struct vmw_ctx_binding *entry, *next;
-
- list_for_each_entry_safe(entry, next, &from->list, ctx_list)
- vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
- }
+ container_of(ctx_res, struct vmw_user_context, res);
- /**
- * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
- *
- * @ctx: The context resource
- *
- * Walks through the context binding list and rebinds all scrubbed
- * resources.
- */
- int vmw_context_rebind_all(struct vmw_resource *ctx)
- {
- struct vmw_ctx_binding *entry;
- struct vmw_user_context *uctx =
- container_of(ctx, struct vmw_user_context, res);
- struct vmw_ctx_binding_state *cbs = &uctx->cbs;
- int ret;
+ if (mob == NULL) {
+ if (uctx->dx_query_mob) {
+ uctx->dx_query_mob->dx_query_ctx = NULL;
+ vmw_dmabuf_unreference(&uctx->dx_query_mob);
+ uctx->dx_query_mob = NULL;
+ }
- list_for_each_entry(entry, &cbs->list, ctx_list) {
- if (likely(!entry->bi.scrubbed))
- continue;
+ return 0;
+ }
- if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
- SVGA3D_INVALID_ID))
- continue;
+ /* Can only have one MOB per context for queries */
+ if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
+ return -EINVAL;
- ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
- if (unlikely(ret != 0))
- return ret;
+ mob->dx_query_ctx = ctx_res;
- entry->bi.scrubbed = false;
- }
+ if (!uctx->dx_query_mob)
+ uctx->dx_query_mob = vmw_dmabuf_reference(mob);
return 0;
}
/**
- * vmw_context_binding_list - Return a list of context bindings
- *
- * @ctx: The context resource
+ * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
*
- * Returns the current list of bindings of the given context. Note that
- * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ * @ctx_res: The context resource
*/
- struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+ struct vmw_dma_buffer *
+ vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
- return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
- }
+ struct vmw_user_context *uctx =
+ container_of(ctx_res, struct vmw_user_context, res);
- struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
- {
- return container_of(ctx, struct vmw_user_context, res)->man;
+ return uctx->dx_query_mob;
}
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
#include <drm/drmP.h>
#include "vmwgfx_drv.h"
+ #include "vmwgfx_binding.h"
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_object.h>
#define DRM_IOCTL_VMW_SYNCCPU \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
struct drm_vmw_synccpu_arg)
+ #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
+ struct drm_vmw_context_arg)
/**
* The core DRM version of this macro doesn't account for
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
- VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
- DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED |
+ DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl,
DRM_UNLOCKED | DRM_RENDER_ALLOW),
+ VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
+ vmw_extended_context_define_ioctl,
+ DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
};
static struct pci_device_id vmw_pci_id_list[] = {
DRM_INFO(" Command Buffers 2.\n");
if (capabilities & SVGA_CAP_GBOBJECTS)
DRM_INFO(" Guest Backed Resources.\n");
+ if (capabilities & SVGA_CAP_DX)
+ DRM_INFO(" DX Features.\n");
}
/**
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
- struct ttm_buffer_object *bo;
+ struct vmw_dma_buffer *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
/*
- * Create the bo as pinned, so that a tryreserve will
+ * Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
- ret = ttm_bo_create(&dev_priv->bdev,
- PAGE_SIZE,
- ttm_bo_type_device,
- &vmw_sys_ne_placement,
- 0, false, NULL,
- &bo);
+ vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
+ if (!vbo)
+ return -ENOMEM;
+ ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
+ &vmw_sys_ne_placement, false,
+ &vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(bo, false, true, false, NULL);
+ ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
BUG_ON(ret != 0);
+ vmw_bo_pin_reserved(vbo, true);
- ret = ttm_bo_kmap(bo, 0, 1, &map);
+ ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
result->result32 = 0xff;
ttm_bo_kunmap(&map);
}
- vmw_bo_pin(bo, false);
- ttm_bo_unreserve(bo);
+ vmw_bo_pin_reserved(vbo, false);
+ ttm_bo_unreserve(&vbo->base);
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
- ttm_bo_unref(&bo);
+ vmw_dmabuf_unreference(&vbo);
} else
- dev_priv->dummy_query_bo = bo;
+ dev_priv->dummy_query_bo = vbo;
return ret;
}
+ /**
+ * vmw_request_device_late - Perform late device setup
+ *
+ * @dev_priv: Pointer to device private.
+ *
+ * This function performs setup of otables and enables large command
+ * buffer submission. These tasks are split out to a separate function
+ * because it reverts vmw_release_device_early and is intended to be used
+ * by an error path in the hibernation code.
+ */
+ static int vmw_request_device_late(struct vmw_private *dev_priv)
+ {
+ int ret;
+
+ if (dev_priv->has_mob) {
+ ret = vmw_otables_setup(dev_priv);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Unable to initialize "
+ "guest Memory OBjects.\n");
+ return ret;
+ }
+ }
+
+ if (dev_priv->cman) {
+ ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
+ 256*4096, 2*4096);
+ if (ret) {
+ struct vmw_cmdbuf_man *man = dev_priv->cman;
+
+ dev_priv->cman = NULL;
+ vmw_cmdbuf_man_destroy(man);
+ }
+ }
+
+ return 0;
+ }
+
static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
- if (dev_priv->has_mob) {
- ret = vmw_otables_setup(dev_priv);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Unable to initialize "
- "guest Memory OBjects.\n");
- goto out_no_mob;
- }
+ dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
+ if (IS_ERR(dev_priv->cman)) {
+ dev_priv->cman = NULL;
+ dev_priv->has_dx = false;
}
+
+ ret = vmw_request_device_late(dev_priv);
+ if (ret)
+ goto out_no_mob;
+
ret = vmw_dummy_query_bo_create(dev_priv);
if (unlikely(ret != 0))
goto out_no_query_bo;
return 0;
out_no_query_bo:
- if (dev_priv->has_mob)
+ if (dev_priv->cman)
+ vmw_cmdbuf_remove_pool(dev_priv->cman);
+ if (dev_priv->has_mob) {
+ (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
vmw_otables_takedown(dev_priv);
+ }
+ if (dev_priv->cman)
+ vmw_cmdbuf_man_destroy(dev_priv->cman);
out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_fifo_release(dev_priv, &dev_priv->fifo);
return ret;
}
- static void vmw_release_device(struct vmw_private *dev_priv)
+ /**
+ * vmw_release_device_early - Early part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the first part of command submission takedown, to be called before
+ * buffer management is taken down.
+ */
+ static void vmw_release_device_early(struct vmw_private *dev_priv)
{
/*
* Previous destructions should've released
BUG_ON(dev_priv->pinned_bo != NULL);
- ttm_bo_unref(&dev_priv->dummy_query_bo);
- if (dev_priv->has_mob)
- vmw_otables_takedown(dev_priv);
- vmw_fence_fifo_down(dev_priv->fman);
- vmw_fifo_release(dev_priv, &dev_priv->fifo);
- }
-
-
- /**
- * Increase the 3d resource refcount.
- * If the count was prevously zero, initialize the fifo, switching to svga
- * mode. Note that the master holds a ref as well, and may request an
- * explicit switch to svga mode if fb is not running, using @unhide_svga.
- */
- int vmw_3d_resource_inc(struct vmw_private *dev_priv,
- bool unhide_svga)
- {
- int ret = 0;
+ vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
+ if (dev_priv->cman)
+ vmw_cmdbuf_remove_pool(dev_priv->cman);
- mutex_lock(&dev_priv->release_mutex);
- if (unlikely(dev_priv->num_3d_resources++ == 0)) {
- ret = vmw_request_device(dev_priv);
- if (unlikely(ret != 0))
- --dev_priv->num_3d_resources;
- } else if (unhide_svga) {
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- vmw_read(dev_priv, SVGA_REG_ENABLE) &
- ~SVGA_REG_ENABLE_HIDE);
+ if (dev_priv->has_mob) {
+ ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+ vmw_otables_takedown(dev_priv);
}
-
- mutex_unlock(&dev_priv->release_mutex);
- return ret;
}
/**
- * Decrease the 3d resource refcount.
- * If the count reaches zero, disable the fifo, switching to vga mode.
- * Note that the master holds a refcount as well, and may request an
- * explicit switch to vga mode when it releases its refcount to account
- * for the situation of an X server vt switch to VGA with 3d resources
- * active.
+ * vmw_release_device_late - Late part of fifo takedown.
+ *
+ * @dev_priv: Pointer to device private struct.
+ *
+ * This is the last part of the command submission takedown, to be called when
+ * command submission is no longer needed. It may wait on pending fences.
*/
- void vmw_3d_resource_dec(struct vmw_private *dev_priv,
- bool hide_svga)
+ static void vmw_release_device_late(struct vmw_private *dev_priv)
{
- int32_t n3d;
-
- mutex_lock(&dev_priv->release_mutex);
- if (unlikely(--dev_priv->num_3d_resources == 0))
- vmw_release_device(dev_priv);
- else if (hide_svga)
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- vmw_read(dev_priv, SVGA_REG_ENABLE) |
- SVGA_REG_ENABLE_HIDE);
-
- n3d = (int32_t) dev_priv->num_3d_resources;
- mutex_unlock(&dev_priv->release_mutex);
+ vmw_fence_fifo_down(dev_priv->fman);
+ if (dev_priv->cman)
+ vmw_cmdbuf_man_destroy(dev_priv->cman);
- BUG_ON(n3d < 0);
+ vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
/**
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
spin_lock_init(&dev_priv->cap_lock);
+ spin_lock_init(&dev_priv->svga_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
dev_priv->max_mob_size =
vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
- } else
+ dev_priv->stdu_max_width =
+ vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
+ dev_priv->stdu_max_height =
+ vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
+
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+ SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
+ dev_priv->texture_max_width = vmw_read(dev_priv,
+ SVGA_REG_DEV_CAP);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP,
+ SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
+ dev_priv->texture_max_height = vmw_read(dev_priv,
+ SVGA_REG_DEV_CAP);
+ } else {
+ dev_priv->texture_max_width = 8192;
+ dev_priv->texture_max_height = 8192;
dev_priv->prim_bb_mem = dev_priv->vram_size;
+ }
+
+ vmw_print_capabilities(dev_priv->capabilities);
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0))
goto out_err0;
- /*
- * Limit back buffer size to VRAM size. Remove this once
- * screen targets are implemented.
- */
- if (dev_priv->prim_bb_mem > dev_priv->vram_size)
- dev_priv->prim_bb_mem = dev_priv->vram_size;
-
- vmw_print_capabilities(dev_priv->capabilities);
-
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
DRM_INFO("Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
dev_priv->active_master = &dev_priv->fbdev_master;
- ret = ttm_bo_device_init(&dev_priv->bdev,
- dev_priv->bo_global_ref.ref.object,
- &vmw_bo_driver,
- dev->anon_inode->i_mapping,
- VMWGFX_FILE_PAGE_OFFSET,
- false);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Failed initializing TTM buffer object driver.\n");
- goto out_err1;
- }
-
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
dev_priv->mmio_size);
goto out_no_fman;
}
+ ret = ttm_bo_device_init(&dev_priv->bdev,
+ dev_priv->bo_global_ref.ref.object,
+ &vmw_bo_driver,
+ dev->anon_inode->i_mapping,
+ VMWGFX_FILE_PAGE_OFFSET,
+ false);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed initializing TTM buffer object driver.\n");
+ goto out_no_bdev;
+ }
+ /*
+ * Enable VRAM, but initially don't use it until SVGA is enabled and
+ * unhidden.
+ */
ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
(dev_priv->vram_size >> PAGE_SHIFT));
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing memory manager for VRAM.\n");
goto out_no_vram;
}
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
dev_priv->has_gmr = true;
if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
}
}
- vmw_kms_save_vga(dev_priv);
+ if (dev_priv->has_mob) {
+ spin_lock(&dev_priv->cap_lock);
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
+ dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ spin_unlock(&dev_priv->cap_lock);
+ }
+
- /* Start kms and overlay systems, needs fifo. */
ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0))
goto out_no_kms;
vmw_overlay_init(dev_priv);
+ ret = vmw_request_device(dev_priv);
+ if (ret)
+ goto out_no_fifo;
+
+ DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
+
if (dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- goto out_no_fifo;
+ vmw_fifo_resource_inc(dev_priv);
+ vmw_svga_enable(dev_priv);
vmw_fb_init(dev_priv);
}
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
- vmw_kms_restore_vga(dev_priv);
if (dev_priv->has_mob)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
out_no_vram:
+ (void)ttm_bo_device_release(&dev_priv->bdev);
+ out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
iounmap(dev_priv->mmio_virt);
out_err3:
arch_phys_wc_del(dev_priv->mmio_mtrr);
- (void)ttm_bo_device_release(&dev_priv->bdev);
- out_err1:
vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->res_idr[i]);
+ if (dev_priv->ctx.staged_bindings)
+ vmw_binding_state_free(dev_priv->ctx.staged_bindings);
kfree(dev_priv);
return ret;
}
drm_ht_remove(&dev_priv->ctx.res_ht);
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) {
+ vmw_fb_off(dev_priv);
vmw_fb_close(dev_priv);
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, false);
+ vmw_fifo_resource_dec(dev_priv);
+ vmw_svga_disable(dev_priv);
}
+
vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv);
- if (dev_priv->has_mob)
- (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
if (dev_priv->has_gmr)
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
+ vmw_release_device_early(dev_priv);
+ if (dev_priv->has_mob)
+ (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
+ (void) ttm_bo_device_release(&dev_priv->bdev);
+ vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
(void)ttm_bo_device_release(&dev_priv->bdev);
+ if (dev_priv->ctx.staged_bindings)
+ vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv);
for (i = vmw_res_context; i < vmw_res_max; ++i)
const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
- if (unlikely(ioctl->cmd != cmd)) {
- DRM_ERROR("Invalid command format, ioctl %d\n",
- nr - DRM_COMMAND_BASE);
- return -EINVAL;
+ if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
+ ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
+ goto out_io_encoding;
+
+ return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
+ _IOC_SIZE(cmd));
}
+
+ if (unlikely(ioctl->cmd != cmd))
+ goto out_io_encoding;
+
flags = ioctl->flags;
} else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL;
vmaster = vmw_master_check(dev, file_priv, flags);
- if (unlikely(IS_ERR(vmaster))) {
+ if (IS_ERR(vmaster)) {
ret = PTR_ERR(vmaster);
if (ret != -ERESTARTSYS)
ttm_read_unlock(&vmaster->lock);
return ret;
+
+ out_io_encoding:
+ DRM_ERROR("Invalid command format, ioctl %d\n",
+ nr - DRM_COMMAND_BASE);
+
+ return -EINVAL;
}
static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
static void vmw_lastclose(struct drm_device *dev)
{
- struct drm_crtc *crtc;
- struct drm_mode_set set;
- int ret;
-
- set.x = 0;
- set.y = 0;
- set.fb = NULL;
- set.mode = NULL;
- set.connectors = NULL;
- set.num_connectors = 0;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- set.crtc = crtc;
- ret = drm_mode_set_config_internal(&set);
- WARN_ON(ret != 0);
- }
-
}
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
- INIT_LIST_HEAD(&vmaster->fb_surf);
- mutex_init(&vmaster->fb_surf_mutex);
}
static int vmw_master_create(struct drm_device *dev,
kfree(vmaster);
}
-
static int vmw_master_set(struct drm_device *dev,
struct drm_file *file_priv,
bool from_open)
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
- if (!dev_priv->enable_fb) {
- ret = vmw_3d_resource_inc(dev_priv, true);
- if (unlikely(ret != 0))
- return ret;
- vmw_kms_save_vga(dev_priv);
- vmw_write(dev_priv, SVGA_REG_TRACES, 0);
- }
-
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
if (unlikely(ret != 0))
- goto out_no_active_lock;
+ return ret;
ttm_lock_set_kill(&active->lock, true, SIGTERM);
- ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Unable to clean VRAM on "
- "master drop.\n");
- }
-
dev_priv->active_master = NULL;
}
dev_priv->active_master = vmaster;
return 0;
-
- out_no_active_lock:
- if (!dev_priv->enable_fb) {
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
- vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- }
- return ret;
}
static void vmw_master_drop(struct drm_device *dev,
}
ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
- vmw_execbuf_release_pinned_bo(dev_priv);
- if (!dev_priv->enable_fb) {
- ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
- if (unlikely(ret != 0))
- DRM_ERROR("Unable to clean VRAM on master drop.\n");
- vmw_kms_restore_vga(dev_priv);
- vmw_3d_resource_dec(dev_priv, true);
- vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- }
+ if (!dev_priv->enable_fb)
+ vmw_svga_disable(dev_priv);
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
vmw_fb_on(dev_priv);
}
+ /**
+ * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in non-exclusive mode.
+ */
+ static void __vmw_svga_enable(struct vmw_private *dev_priv)
+ {
+ spin_lock(&dev_priv->svga_lock);
+ if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
+ }
+ spin_unlock(&dev_priv->svga_lock);
+ }
+
+ /**
+ * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ */
+ void vmw_svga_enable(struct vmw_private *dev_priv)
+ {
+ ttm_read_lock(&dev_priv->reservation_sem, false);
+ __vmw_svga_enable(dev_priv);
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ }
+
+ /**
+ * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Needs the reservation sem to be held in exclusive mode.
+ * Will not empty VRAM. VRAM must be emptied by caller.
+ */
+ static void __vmw_svga_disable(struct vmw_private *dev_priv)
+ {
+ spin_lock(&dev_priv->svga_lock);
+ if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ SVGA_REG_ENABLE_HIDE |
+ SVGA_REG_ENABLE_ENABLE);
+ }
+ spin_unlock(&dev_priv->svga_lock);
+ }
+
+ /**
+ * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
+ * running.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * Will empty VRAM.
+ */
+ void vmw_svga_disable(struct vmw_private *dev_priv)
+ {
+ ttm_write_lock(&dev_priv->reservation_sem, false);
+ spin_lock(&dev_priv->svga_lock);
+ if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
+ dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
+ spin_unlock(&dev_priv->svga_lock);
+ if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+ DRM_ERROR("Failed evicting VRAM buffers.\n");
+ vmw_write(dev_priv, SVGA_REG_ENABLE,
+ SVGA_REG_ENABLE_HIDE |
+ SVGA_REG_ENABLE_ENABLE);
+ } else
+ spin_unlock(&dev_priv->svga_lock);
+ ttm_write_unlock(&dev_priv->reservation_sem);
+ }
static void vmw_remove(struct pci_dev *pdev)
{
switch (val) {
case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
+ if (dev_priv->enable_fb)
+ vmw_fb_off(dev_priv);
ttm_suspend_lock(&dev_priv->reservation_sem);
- /**
+ /*
* This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory.
*/
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
+ vmw_release_device_early(dev_priv);
ttm_bo_swapout_all(&dev_priv->bdev);
-
+ vmw_fence_fifo_down(dev_priv->fman);
break;
case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
case PM_POST_RESTORE:
+ vmw_fence_fifo_up(dev_priv->fman);
ttm_suspend_unlock(&dev_priv->reservation_sem);
-
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
break;
case PM_RESTORE_PREPARE:
break;
return 0;
}
- /**
- * These might not be needed with the virtual SVGA device.
- */
-
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- if (dev_priv->num_3d_resources != 0) {
- DRM_INFO("Can't suspend or hibernate "
- "while 3D resources are active.\n");
+ if (dev_priv->refuse_hibernation)
return -EBUSY;
- }
pci_save_state(pdev);
pci_disable_device(pdev);
return vmw_pci_resume(pdev);
}
- static int vmw_pm_prepare(struct device *kdev)
+ static int vmw_pm_freeze(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- /**
- * Release 3d reference held by fbdev and potentially
- * stop fifo.
- */
dev_priv->suspended = true;
if (dev_priv->enable_fb)
- vmw_3d_resource_dec(dev_priv, true);
-
- if (dev_priv->num_3d_resources != 0) {
-
- DRM_INFO("Can't suspend or hibernate "
- "while 3D resources are active.\n");
+ vmw_fifo_resource_dec(dev_priv);
+ if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
+ DRM_ERROR("Can't hibernate while 3D resources are active.\n");
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv, true);
+ vmw_fifo_resource_inc(dev_priv);
+ WARN_ON(vmw_request_device_late(dev_priv));
dev_priv->suspended = false;
return -EBUSY;
}
+ if (dev_priv->enable_fb)
+ __vmw_svga_disable(dev_priv);
+
+ vmw_release_device_late(dev_priv);
+
return 0;
}
- static void vmw_pm_complete(struct device *kdev)
+ static int vmw_pm_restore(struct device *kdev)
{
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ int ret;
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID);
- /**
- * Reclaim 3d reference held by fbdev and potentially
- * start fifo.
- */
if (dev_priv->enable_fb)
- vmw_3d_resource_inc(dev_priv, false);
+ vmw_fifo_resource_inc(dev_priv);
+
+ ret = vmw_request_device(dev_priv);
+ if (ret)
+ return ret;
+
+ if (dev_priv->enable_fb)
+ __vmw_svga_enable(dev_priv);
dev_priv->suspended = false;
+
+ return 0;
}
static const struct dev_pm_ops vmw_pm_ops = {
- .prepare = vmw_pm_prepare,
- .complete = vmw_pm_complete,
+ .freeze = vmw_pm_freeze,
+ .thaw = vmw_pm_restore,
+ .restore = vmw_pm_restore,
.suspend = vmw_pm_suspend,
.resume = vmw_pm_resume,
};
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
-
- struct vmw_clip_rect {
- int x1, x2, y1, y2;
- };
-
- /**
- * Clip @num_rects number of @rects against @clip storing the
- * results in @out_rects and the number of passed rects in @out_num.
- */
- static void vmw_clip_cliprects(struct drm_clip_rect *rects,
- int num_rects,
- struct vmw_clip_rect clip,
- SVGASignedRect *out_rects,
- int *out_num)
- {
- int i, k;
-
- for (i = 0, k = 0; i < num_rects; i++) {
- int x1 = max_t(int, clip.x1, rects[i].x1);
- int y1 = max_t(int, clip.y1, rects[i].y1);
- int x2 = min_t(int, clip.x2, rects[i].x2);
- int y2 = min_t(int, clip.y2, rects[i].y2);
-
- if (x1 >= x2)
- continue;
- if (y1 >= y2)
- continue;
-
- out_rects[k].left = x1;
- out_rects[k].top = y1;
- out_rects[k].right = x2;
- out_rects[k].bottom = y2;
- k++;
- }
-
- *out_num = k;
- }
-
- void vmw_display_unit_cleanup(struct vmw_display_unit *du)
+ void vmw_du_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
vmw_surface_unreference(&du->cursor_surface);
memcpy(&cmd[1], image, image_size);
- cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
- cmd->cursor.id = cpu_to_le32(0);
- cmd->cursor.width = cpu_to_le32(width);
- cmd->cursor.height = cpu_to_le32(height);
- cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
- cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
+ cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
+ cmd->cursor.id = 0;
+ cmd->cursor.width = width;
+ cmd->cursor.height = height;
+ cmd->cursor.hotspotX = hotspotX;
+ cmd->cursor.hotspotY = hotspotY;
vmw_fifo_commit(dev_priv, cmd_size);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
- __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ u32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
srf->snooper.age++;
- /* we can't call this function from this function since execbuf has
- * reserved fifo space.
- *
- * if (srf->snooper.crtc)
- * vmw_ldu_crtc_cursor_update_image(dev_priv,
- * srf->snooper.image, 64, 64,
- * du->hotspot_x, du->hotspot_y);
- */
-
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(bo);
* Surface framebuffer code
*/
- #define vmw_framebuffer_to_vfbs(x) \
- container_of(x, struct vmw_framebuffer_surface, base.base)
-
- struct vmw_framebuffer_surface {
- struct vmw_framebuffer base;
- struct vmw_surface *surface;
- struct vmw_dma_buffer *buffer;
- struct list_head head;
- struct drm_master *master;
- };
-
static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
- struct vmw_master *vmaster = vmw_master(vfbs->master);
-
- mutex_lock(&vmaster->fb_surf_mutex);
- list_del(&vfbs->head);
- mutex_unlock(&vmaster->fb_surf_mutex);
-
- drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
- ttm_base_object_unref(&vfbs->base.user_obj);
+ if (vfbs->base.user_obj)
+ ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
- static int do_surface_dirty_sou(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence)
- {
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *clips_ptr;
- struct drm_clip_rect *tmp;
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, num_units;
- int ret = 0; /* silence warning */
- int left, right, top, bottom;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
- SVGASignedRect *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
- head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(!clips || !num_clips);
-
- tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
- if (unlikely(tmp == NULL)) {
- DRM_ERROR("Temporary cliprect memory alloc failed.\n");
- return -ENOMEM;
- }
-
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
- cmd = kzalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Temporary fifo memory alloc failed.\n");
- ret = -ENOMEM;
- goto out_free_tmp;
- }
-
- /* setup blits pointer */
- blits = (SVGASignedRect *)&cmd[1];
-
- /* initial clip region */
- left = clips->x1;
- right = clips->x2;
- top = clips->y1;
- bottom = clips->y2;
-
- /* skip the first clip rect */
- for (i = 1, clips_ptr = clips + inc;
- i < num_clips; i++, clips_ptr += inc) {
- left = min_t(int, left, (int)clips_ptr->x1);
- right = max_t(int, right, (int)clips_ptr->x2);
- top = min_t(int, top, (int)clips_ptr->y1);
- bottom = max_t(int, bottom, (int)clips_ptr->y2);
- }
-
- /* only need to do this once */
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
- cmd->body.srcRect.left = left;
- cmd->body.srcRect.right = right;
- cmd->body.srcRect.top = top;
- cmd->body.srcRect.bottom = bottom;
-
- clips_ptr = clips;
- for (i = 0; i < num_clips; i++, clips_ptr += inc) {
- tmp[i].x1 = clips_ptr->x1 - left;
- tmp[i].x2 = clips_ptr->x2 - left;
- tmp[i].y1 = clips_ptr->y1 - top;
- tmp[i].y2 = clips_ptr->y2 - top;
- }
-
- /* do per unit writing, reuse fifo for each */
- for (i = 0; i < num_units; i++) {
- struct vmw_display_unit *unit = units[i];
- struct vmw_clip_rect clip;
- int num;
-
- clip.x1 = left - unit->crtc.x;
- clip.y1 = top - unit->crtc.y;
- clip.x2 = right - unit->crtc.x;
- clip.y2 = bottom - unit->crtc.y;
-
- /* skip any crtcs that misses the clip region */
- if (clip.x1 >= unit->crtc.mode.hdisplay ||
- clip.y1 >= unit->crtc.mode.vdisplay ||
- clip.x2 <= 0 || clip.y2 <= 0)
- continue;
-
- /*
- * In order for the clip rects to be correctly scaled
- * the src and dest rects needs to be the same size.
- */
- cmd->body.destRect.left = clip.x1;
- cmd->body.destRect.right = clip.x2;
- cmd->body.destRect.top = clip.y1;
- cmd->body.destRect.bottom = clip.y2;
-
- /* create a clip rect of the crtc in dest coords */
- clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
- clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
- clip.x1 = 0 - clip.x1;
- clip.y1 = 0 - clip.y1;
-
- /* need to reset sid as it is changed by execbuf */
- cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
- cmd->body.destScreenId = unit->unit;
-
- /* clip and write blits to cmd stream */
- vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
- /* if no cliprects hit skip this */
- if (num == 0)
- continue;
-
- /* only return the last fence */
- if (out_fence && *out_fence)
- vmw_fence_obj_unreference(out_fence);
-
- /* recalculate package length */
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, out_fence);
-
- if (unlikely(ret != 0))
- break;
- }
-
-
- kfree(cmd);
- out_free_tmp:
- kfree(tmp);
-
- return ret;
- }
-
static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect norect;
int ret, inc = 1;
- if (unlikely(vfbs->master != file_priv->master))
- return -EINVAL;
-
- /* Require ScreenObject support for 3D */
- if (!dev_priv->sou_priv)
+ /* Legacy Display Unit does not support 3D */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
return -EINVAL;
drm_modeset_lock_all(dev_priv->dev);
inc = 2; /* skip source rects */
}
- ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
- flags, color,
- clips, num_clips, inc, NULL);
+ if (dev_priv->active_display_unit == vmw_du_screen_object)
+ ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
+ clips, NULL, NULL, 0, 0,
+ num_clips, inc, NULL);
+ else
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
+ clips, NULL, NULL, 0, 0,
+ num_clips, inc, NULL);
+ vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
return 0;
}
+ /**
+ * vmw_kms_readback - Perform a readback from the screen system to
+ * a dma-buffer backed framebuffer.
+ *
+ * @dev_priv: Pointer to the device private structure.
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * Must be set to NULL if @user_fence_rep is NULL.
+ * @vfb: Pointer to the dma-buffer backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
+ * Must be set to non-NULL if @file_priv is non-NULL.
+ * @vclips: Array of clip rects.
+ * @num_clips: Number of clip rects in @vclips.
+ *
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted.
+ */
+ int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips)
+ {
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_screen_object:
+ return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
+ user_fence_rep, vclips, num_clips);
+ case vmw_du_screen_target:
+ return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
+ user_fence_rep, NULL, vclips, num_clips,
+ 1, false, true);
+ default:
+ WARN_ONCE(true,
+ "Readback called with invalid display system.\n");
+ }
+
+ return -ENOSYS;
+ }
+
+
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
- *mode_cmd)
+ *mode_cmd,
+ bool is_dmabuf_proxy)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
- /* 3D is only supported on HWv8 hosts which supports screen objects */
- if (!dev_priv->sou_priv)
+ /* 3D is only supported on HWv8 and newer hosts */
+ if (dev_priv->active_display_unit == vmw_du_legacy)
return -ENOSYS;
/*
case 15:
format = SVGA3D_A1R5G5B5;
break;
- case 8:
- format = SVGA3D_LUMINANCE8;
- break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
- if (unlikely(format != surface->format)) {
+ /*
+ * For DX, surface format validation is done when surface->scanout
+ * is set.
+ */
+ if (!dev_priv->has_dx && format != surface->format) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
goto out_err1;
}
- if (!vmw_surface_reference(surface)) {
- DRM_ERROR("failed to reference surface %p\n", surface);
- ret = -EINVAL;
- goto out_err2;
- }
-
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
vfbs->base.base.pitches[0] = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
- vfbs->surface = surface;
+ vfbs->surface = vmw_surface_reference(surface);
vfbs->base.user_handle = mode_cmd->handle;
- vfbs->master = drm_master_get(file_priv->master);
-
- mutex_lock(&vmaster->fb_surf_mutex);
- list_add_tail(&vfbs->head, &vmaster->fb_surf);
- mutex_unlock(&vmaster->fb_surf_mutex);
+ vfbs->is_dmabuf_proxy = is_dmabuf_proxy;
*out = &vfbs->base;
ret = drm_framebuffer_init(dev, &vfbs->base.base,
&vmw_framebuffer_surface_funcs);
if (ret)
- goto out_err3;
+ goto out_err2;
return 0;
- out_err3:
- vmw_surface_unreference(&surface);
out_err2:
+ vmw_surface_unreference(&surface);
kfree(vfbs);
out_err1:
return ret;
* Dmabuf framebuffer code
*/
- #define vmw_framebuffer_to_vfbd(x) \
- container_of(x, struct vmw_framebuffer_dmabuf, base.base)
-
- struct vmw_framebuffer_dmabuf {
- struct vmw_framebuffer base;
- struct vmw_dma_buffer *buffer;
- };
-
static void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer);
- ttm_base_object_unref(&vfbd->base.user_obj);
+ if (vfbd->base.user_obj)
+ ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
- static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment)
- {
- size_t fifo_size;
- int i;
-
- struct {
- uint32_t header;
- SVGAFifoCmdUpdate body;
- } *cmd;
-
- fifo_size = sizeof(*cmd) * num_clips;
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- for (i = 0; i < num_clips; i++, clips += increment) {
- cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
- cmd[i].body.x = cpu_to_le32(clips->x1);
- cmd[i].body.y = cpu_to_le32(clips->y1);
- cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
- cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
- }
-
- vmw_fifo_commit(dev_priv, fifo_size);
- return 0;
- }
-
- static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer)
- {
- int depth = framebuffer->base.depth;
- size_t fifo_size;
- int ret;
-
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd;
-
- /* Emulate RGBA support, contrary to svga_reg.h this is not
- * supported by hosts. This is only a problem if we are reading
- * this value later and expecting what we uploaded back.
- */
- if (depth == 32)
- depth = 24;
-
- fifo_size = sizeof(*cmd);
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header = SVGA_CMD_DEFINE_GMRFB;
- cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
- cmd->body.format.colorDepth = depth;
- cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = framebuffer->base.pitches[0];
- cmd->body.ptr.gmrId = framebuffer->user_handle;
- cmd->body.ptr.offset = 0;
-
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
-
- kfree(cmd);
-
- return ret;
- }
-
- static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
- struct vmw_private *dev_priv,
- struct vmw_framebuffer *framebuffer,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips, int increment,
- struct vmw_fence_obj **out_fence)
- {
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *clips_ptr;
- int i, k, num_units, ret;
- struct drm_crtc *crtc;
- size_t fifo_size;
-
- struct {
- uint32_t header;
- SVGAFifoCmdBlitGMRFBToScreen body;
- } *blits;
-
- ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
- if (unlikely(ret != 0))
- return ret; /* define_gmrfb prints warnings */
-
- fifo_size = sizeof(*blits) * num_clips;
- blits = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(blits == NULL)) {
- DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
- return -ENOMEM;
- }
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- for (k = 0; k < num_units; k++) {
- struct vmw_display_unit *unit = units[k];
- int hit_num = 0;
-
- clips_ptr = clips;
- for (i = 0; i < num_clips; i++, clips_ptr += increment) {
- int clip_x1 = clips_ptr->x1 - unit->crtc.x;
- int clip_y1 = clips_ptr->y1 - unit->crtc.y;
- int clip_x2 = clips_ptr->x2 - unit->crtc.x;
- int clip_y2 = clips_ptr->y2 - unit->crtc.y;
- int move_x, move_y;
-
- /* skip any crtcs that misses the clip region */
- if (clip_x1 >= unit->crtc.mode.hdisplay ||
- clip_y1 >= unit->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
- continue;
-
- /* clip size to crtc size */
- clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
- clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
-
- /* translate both src and dest to bring clip into screen */
- move_x = min_t(int, clip_x1, 0);
- move_y = min_t(int, clip_y1, 0);
-
- /* actual translate done here */
- blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
- blits[hit_num].body.destScreenId = unit->unit;
- blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
- blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
- blits[hit_num].body.destRect.left = clip_x1 - move_x;
- blits[hit_num].body.destRect.top = clip_y1 - move_y;
- blits[hit_num].body.destRect.right = clip_x2;
- blits[hit_num].body.destRect.bottom = clip_y2;
- hit_num++;
- }
-
- /* no clips hit the crtc */
- if (hit_num == 0)
- continue;
-
- /* only return the last fence */
- if (out_fence && *out_fence)
- vmw_fence_obj_unreference(out_fence);
-
- fifo_size = sizeof(*blits) * hit_num;
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
- fifo_size, 0, NULL, out_fence);
-
- if (unlikely(ret != 0))
- break;
- }
-
- kfree(blits);
-
- return ret;
- }
-
static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
increment = 2;
}
- if (dev_priv->ldu_priv) {
- ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
- flags, color,
- clips, num_clips, increment);
- } else {
- ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
- flags, color,
- clips, num_clips, increment, NULL);
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_screen_target:
+ ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
+ clips, NULL, num_clips, increment,
+ true, true);
+ break;
+ case vmw_du_screen_object:
+ ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
+ clips, num_clips, increment,
+ true,
+ NULL);
+ break;
+ case vmw_du_legacy:
+ ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
+ clips, num_clips, increment);
+ break;
+ default:
+ ret = -EINVAL;
+ WARN_ONCE(true, "Dirty called with invalid display system.\n");
+ break;
}
+ vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(dev_priv->dev);
/**
* Pin the dmabuffer to the start of vram.
*/
- static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
+ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
+ struct vmw_dma_buffer *buf;
int ret;
- /* This code should not be used with screen objects */
- BUG_ON(dev_priv->sou_priv);
-
- vmw_overlay_pause_all(dev_priv);
+ buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
- ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
-
- vmw_overlay_resume_all(dev_priv);
+ if (!buf)
+ return 0;
- WARN_ON(ret != 0);
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_legacy:
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_dmabuf_pin_in_start_of_vram(dev_priv, buf, false);
+ vmw_overlay_resume_all(dev_priv);
+ break;
+ case vmw_du_screen_object:
+ case vmw_du_screen_target:
+ if (vfb->dmabuf)
+ return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
+ false);
+
+ return vmw_dmabuf_pin_in_placement(dev_priv, buf,
+ &vmw_mob_placement, false);
+ default:
+ return -EINVAL;
+ }
- return 0;
+ return ret;
}
- static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
+ static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
+ struct vmw_dma_buffer *buf;
+
+ buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
- if (!vfbd->buffer) {
- WARN_ON(!vfbd->buffer);
+ if (WARN_ON(!buf))
return 0;
+
+ return vmw_dmabuf_unpin(dev_priv, buf, false);
+ }
+
+ /**
+ * vmw_create_dmabuf_proxy - create a proxy surface for the DMA buf
+ *
+ * @dev: DRM device
+ * @mode_cmd: parameters for the new surface
+ * @dmabuf_mob: MOB backing the DMA buf
+ * @srf_out: newly created surface
+ *
+ * When the content FB is a DMA buf, we create a surface as a proxy to the
+ * same buffer. This way we can do a surface copy rather than a surface DMA.
+ * This is a more efficient approach
+ *
+ * RETURNS:
+ * 0 on success, error code otherwise
+ */
+ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
+ const struct drm_mode_fb_cmd *mode_cmd,
+ struct vmw_dma_buffer *dmabuf_mob,
+ struct vmw_surface **srf_out)
+ {
+ uint32_t format;
+ struct drm_vmw_size content_base_size;
+ struct vmw_resource *res;
+ int ret;
+
+ switch (mode_cmd->depth) {
+ case 32:
+ case 24:
+ format = SVGA3D_X8R8G8B8;
+ break;
+
+ case 16:
+ case 15:
+ format = SVGA3D_R5G6B5;
+ break;
+
+ case 8:
+ format = SVGA3D_P8;
+ break;
+
+ default:
+ DRM_ERROR("Invalid framebuffer format %d\n", mode_cmd->depth);
+ return -EINVAL;
+ }
+
+ content_base_size.width = mode_cmd->width;
+ content_base_size.height = mode_cmd->height;
+ content_base_size.depth = 1;
+
+ ret = vmw_surface_gb_priv_define(dev,
+ 0, /* kernel visible only */
+ 0, /* flags */
+ format,
+ true, /* can be a scanout buffer */
+ 1, /* num of mip levels */
+ 0,
+ 0,
+ content_base_size,
+ srf_out);
+ if (ret) {
+ DRM_ERROR("Failed to allocate proxy content buffer\n");
+ return ret;
}
- return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
+ res = &(*srf_out)->res;
+
+ /* Reserve and switch the backing mob. */
+ mutex_lock(&res->dev_priv->cmdbuf_mutex);
+ (void) vmw_resource_reserve(res, false, true);
+ vmw_dmabuf_unreference(&res->backup);
+ res->backup = vmw_dmabuf_reference(dmabuf_mob);
+ res->backup_offset = 0;
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+
+ return 0;
}
+
+
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
}
/* Limited framebuffer color depth support for screen objects */
- if (dev_priv->sou_priv) {
+ if (dev_priv->active_display_unit == vmw_du_screen_object) {
switch (mode_cmd->depth) {
case 32:
case 24:
goto out_err1;
}
- if (!vmw_dmabuf_reference(dmabuf)) {
- DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
- ret = -EINVAL;
- goto out_err2;
- }
-
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
- if (!dev_priv->sou_priv) {
- vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
- vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
- }
vfbd->base.dmabuf = true;
- vfbd->buffer = dmabuf;
+ vfbd->buffer = vmw_dmabuf_reference(dmabuf);
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
ret = drm_framebuffer_init(dev, &vfbd->base.base,
&vmw_framebuffer_dmabuf_funcs);
if (ret)
- goto out_err3;
+ goto out_err2;
return 0;
- out_err3:
- vmw_dmabuf_unreference(&dmabuf);
out_err2:
+ vmw_dmabuf_unreference(&dmabuf);
kfree(vfbd);
out_err1:
return ret;
}
- /*
- * Generic Kernel modesetting functions
+ /**
+ * vmw_kms_new_framebuffer - Create a new framebuffer.
+ *
+ * @dev_priv: Pointer to device private struct.
+ * @dmabuf: Pointer to dma buffer to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @surface: Pointer to a surface to wrap the kms framebuffer around.
+ * Either @dmabuf or @surface must be NULL.
+ * @only_2d: No presents will occur to this dma buffer based framebuffer. This
+ * Helps the code to do some important optimizations.
+ * @mode_cmd: Frame-buffer metadata.
*/
-
- static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_mode_fb_cmd2 *mode_cmd2)
+ struct vmw_framebuffer *
+ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_surface *surface,
+ bool only_2d,
+ const struct drm_mode_fb_cmd *mode_cmd)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
- struct vmw_surface *surface = NULL;
- struct vmw_dma_buffer *bo = NULL;
- struct ttm_base_object *user_obj;
- struct drm_mode_fb_cmd mode_cmd;
+ bool is_dmabuf_proxy = false;
int ret;
- mode_cmd.width = mode_cmd2->width;
- mode_cmd.height = mode_cmd2->height;
- mode_cmd.pitch = mode_cmd2->pitches[0];
- mode_cmd.handle = mode_cmd2->handles[0];
- drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
- &mode_cmd.bpp);
-
- /**
- * This code should be conditioned on Screen Objects not being used.
- * If screen objects are used, we can allocate a GMR to hold the
- * requested framebuffer.
+ /*
+ * We cannot use the SurfaceDMA command in an non-accelerated VM,
+ * therefore, wrap the DMA buf in a surface so we can use the
+ * SurfaceCopy command.
+ */
+ if (dmabuf && only_2d &&
+ dev_priv->active_display_unit == vmw_du_screen_target) {
+ ret = vmw_create_dmabuf_proxy(dev_priv->dev, mode_cmd,
+ dmabuf, &surface);
+ if (ret)
+ return ERR_PTR(ret);
+
+ is_dmabuf_proxy = true;
+ }
+
+ /* Create the new framebuffer depending one what we have */
+ if (surface) {
+ ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
+ mode_cmd,
+ is_dmabuf_proxy);
+
+ /*
+ * vmw_create_dmabuf_proxy() adds a reference that is no longer
+ * needed
+ */
+ if (is_dmabuf_proxy)
+ vmw_surface_unreference(&surface);
+ } else if (dmabuf) {
+ ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, dmabuf, &vfb,
+ mode_cmd);
+ } else {
+ BUG();
+ }
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ vfb->pin = vmw_framebuffer_pin;
+ vfb->unpin = vmw_framebuffer_unpin;
+
+ return vfb;
+ }
+
+ /*
+ * Generic Kernel modesetting functions
+ */
+
+ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd2)
+ {
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_framebuffer *vfb = NULL;
+ struct vmw_surface *surface = NULL;
+ struct vmw_dma_buffer *bo = NULL;
+ struct ttm_base_object *user_obj;
+ struct drm_mode_fb_cmd mode_cmd;
+ int ret;
+
+ mode_cmd.width = mode_cmd2->width;
+ mode_cmd.height = mode_cmd2->height;
+ mode_cmd.pitch = mode_cmd2->pitches[0];
+ mode_cmd.handle = mode_cmd2->handles[0];
+ drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
+ &mode_cmd.bpp);
+
+ /**
+ * This code should be conditioned on Screen Objects not being used.
+ * If screen objects are used, we can allocate a GMR to hold the
+ * requested framebuffer.
*/
if (!vmw_kms_validate_mode_vram(dev_priv,
mode_cmd.pitch,
mode_cmd.height)) {
- DRM_ERROR("VRAM size is too small for requested mode.\n");
+ DRM_ERROR("Requested mode exceed bounding box limit.\n");
return ERR_PTR(-ENOMEM);
}
if (ret)
goto err_out;
- /* Create the new framebuffer depending one what we got back */
- if (bo)
- ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- &mode_cmd);
- else if (surface)
- ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
- surface, &vfb, &mode_cmd);
- else
- BUG();
+ vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
+ !(dev_priv->capabilities & SVGA_CAP_3D),
+ &mode_cmd);
+ if (IS_ERR(vfb)) {
+ ret = PTR_ERR(vfb);
+ goto err_out;
+ }
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
.fb_create = vmw_kms_fb_create,
};
+ static int vmw_kms_generic_present(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct vmw_surface *surface,
+ uint32_t sid,
+ int32_t destX, int32_t destY,
+ struct drm_vmw_rect *clips,
+ uint32_t num_clips)
+ {
+ return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
+ &surface->res, destX, destY,
+ num_clips, 1, NULL);
+ }
+
+
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_clip_rect *tmp;
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, k, num_units;
- int ret = 0; /* silence warning */
- int left, right, top, bottom;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
- SVGASignedRect *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &vfb->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(surface == NULL);
- BUG_ON(!clips || !num_clips);
-
- tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
- if (unlikely(tmp == NULL)) {
- DRM_ERROR("Temporary cliprect memory alloc failed.\n");
- return -ENOMEM;
- }
-
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- ret = -ENOMEM;
- goto out_free_tmp;
- }
-
- left = clips->x;
- right = clips->x + clips->w;
- top = clips->y;
- bottom = clips->y + clips->h;
-
- for (i = 1; i < num_clips; i++) {
- left = min_t(int, left, (int)clips[i].x);
- right = max_t(int, right, (int)clips[i].x + clips[i].w);
- top = min_t(int, top, (int)clips[i].y);
- bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
- }
-
- /* only need to do this once */
- memset(cmd, 0, fifo_size);
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-
- blits = (SVGASignedRect *)&cmd[1];
-
- cmd->body.srcRect.left = left;
- cmd->body.srcRect.right = right;
- cmd->body.srcRect.top = top;
- cmd->body.srcRect.bottom = bottom;
-
- for (i = 0; i < num_clips; i++) {
- tmp[i].x1 = clips[i].x - left;
- tmp[i].x2 = clips[i].x + clips[i].w - left;
- tmp[i].y1 = clips[i].y - top;
- tmp[i].y2 = clips[i].y + clips[i].h - top;
- }
-
- for (k = 0; k < num_units; k++) {
- struct vmw_display_unit *unit = units[k];
- struct vmw_clip_rect clip;
- int num;
-
- clip.x1 = left + destX - unit->crtc.x;
- clip.y1 = top + destY - unit->crtc.y;
- clip.x2 = right + destX - unit->crtc.x;
- clip.y2 = bottom + destY - unit->crtc.y;
-
- /* skip any crtcs that misses the clip region */
- if (clip.x1 >= unit->crtc.mode.hdisplay ||
- clip.y1 >= unit->crtc.mode.vdisplay ||
- clip.x2 <= 0 || clip.y2 <= 0)
- continue;
-
- /*
- * In order for the clip rects to be correctly scaled
- * the src and dest rects needs to be the same size.
- */
- cmd->body.destRect.left = clip.x1;
- cmd->body.destRect.right = clip.x2;
- cmd->body.destRect.top = clip.y1;
- cmd->body.destRect.bottom = clip.y2;
-
- /* create a clip rect of the crtc in dest coords */
- clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
- clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
- clip.x1 = 0 - clip.x1;
- clip.y1 = 0 - clip.y1;
-
- /* need to reset sid as it is changed by execbuf */
- cmd->body.srcImage.sid = sid;
- cmd->body.destScreenId = unit->unit;
-
- /* clip and write blits to cmd stream */
- vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
-
- /* if no cliprects hit skip this */
- if (num == 0)
- continue;
-
- /* recalculate package length */
- fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
- cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
- fifo_size, 0, NULL, NULL);
-
- if (unlikely(ret != 0))
- break;
- }
-
- kfree(cmd);
- out_free_tmp:
- kfree(tmp);
-
- return ret;
- }
-
- int vmw_kms_readback(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_vmw_rect *clips,
- uint32_t num_clips)
- {
- struct vmw_framebuffer_dmabuf *vfbd =
- vmw_framebuffer_to_vfbd(&vfb->base);
- struct vmw_dma_buffer *dmabuf = vfbd->buffer;
- struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
- struct drm_crtc *crtc;
- size_t fifo_size;
- int i, k, ret, num_units, blits_pos;
-
- struct {
- uint32_t header;
- SVGAFifoCmdDefineGMRFB body;
- } *cmd;
- struct {
- uint32_t header;
- SVGAFifoCmdBlitScreenToGMRFB body;
- } *blits;
-
- num_units = 0;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &vfb->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
- }
-
- BUG_ON(dmabuf == NULL);
- BUG_ON(!clips || !num_clips);
-
- /* take a safe guess at fifo size */
- fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
- cmd = kmalloc(fifo_size, GFP_KERNEL);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed to allocate temporary fifo memory.\n");
- return -ENOMEM;
- }
-
- memset(cmd, 0, fifo_size);
- cmd->header = SVGA_CMD_DEFINE_GMRFB;
- cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
- cmd->body.format.colorDepth = vfb->base.depth;
- cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = vfb->base.pitches[0];
- cmd->body.ptr.gmrId = vfb->user_handle;
- cmd->body.ptr.offset = 0;
-
- blits = (void *)&cmd[1];
- blits_pos = 0;
- for (i = 0; i < num_units; i++) {
- struct drm_vmw_rect *c = clips;
- for (k = 0; k < num_clips; k++, c++) {
- /* transform clip coords to crtc origin based coords */
- int clip_x1 = c->x - units[i]->crtc.x;
- int clip_x2 = c->x - units[i]->crtc.x + c->w;
- int clip_y1 = c->y - units[i]->crtc.y;
- int clip_y2 = c->y - units[i]->crtc.y + c->h;
- int dest_x = c->x;
- int dest_y = c->y;
-
- /* compensate for clipping, we negate
- * a negative number and add that.
- */
- if (clip_x1 < 0)
- dest_x += -clip_x1;
- if (clip_y1 < 0)
- dest_y += -clip_y1;
-
- /* clip */
- clip_x1 = max(clip_x1, 0);
- clip_y1 = max(clip_y1, 0);
- clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
- clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
-
- /* and cull any rects that misses the crtc */
- if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
- clip_y1 >= units[i]->crtc.mode.vdisplay ||
- clip_x2 <= 0 || clip_y2 <= 0)
- continue;
-
- blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
- blits[blits_pos].body.srcScreenId = units[i]->unit;
- blits[blits_pos].body.destOrigin.x = dest_x;
- blits[blits_pos].body.destOrigin.y = dest_y;
+ int ret;
- blits[blits_pos].body.srcRect.left = clip_x1;
- blits[blits_pos].body.srcRect.top = clip_y1;
- blits[blits_pos].body.srcRect.right = clip_x2;
- blits[blits_pos].body.srcRect.bottom = clip_y2;
- blits_pos++;
- }
+ switch (dev_priv->active_display_unit) {
+ case vmw_du_screen_target:
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
+ &surface->res, destX, destY,
+ num_clips, 1, NULL);
+ break;
+ case vmw_du_screen_object:
+ ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
+ sid, destX, destY, clips,
+ num_clips);
+ break;
+ default:
+ WARN_ONCE(true,
+ "Present called with invalid display system.\n");
+ ret = -ENOSYS;
+ break;
}
- /* reset size here and use calculated exact size from loops */
- fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
-
- ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
- 0, user_fence_rep, NULL);
+ if (ret)
+ return ret;
- kfree(cmd);
+ vmw_fifo_flush(dev_priv, false);
- return ret;
+ return 0;
}
int vmw_kms_init(struct vmw_private *dev_priv)
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
- /* assumed largest fb size */
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
+ dev->mode_config.max_width = dev_priv->texture_max_width;
+ dev->mode_config.max_height = dev_priv->texture_max_height;
- ret = vmw_kms_init_screen_object_display(dev_priv);
- if (ret) /* Fallback */
- (void)vmw_kms_init_legacy_display_system(dev_priv);
+ ret = vmw_kms_stdu_init_display(dev_priv);
+ if (ret) {
+ ret = vmw_kms_sou_init_display(dev_priv);
+ if (ret) /* Fallback */
+ ret = vmw_kms_ldu_init_display(dev_priv);
+ }
- return 0;
+ return ret;
}
int vmw_kms_close(struct vmw_private *dev_priv)
{
+ int ret;
+
/*
* Docs says we should take the lock before calling this function
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(dev_priv->dev);
- if (dev_priv->sou_priv)
- vmw_kms_close_screen_object_display(dev_priv);
+ if (dev_priv->active_display_unit == vmw_du_screen_object)
+ ret = vmw_kms_sou_close_display(dev_priv);
+ else if (dev_priv->active_display_unit == vmw_du_screen_target)
+ ret = vmw_kms_stdu_close_display(dev_priv);
else
- vmw_kms_close_legacy_display_system(dev_priv);
- return 0;
+ ret = vmw_kms_ldu_close_display(dev_priv);
+
+ return ret;
}
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
+ SVGA_FIFO_PITCHLOCK);
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
uint32_t pitch,
uint32_t height)
{
- return ((u64) pitch * (u64) height) < (u64) dev_priv->prim_bb_mem;
+ return ((u64) pitch * (u64) height) < (u64)
+ ((dev_priv->active_display_unit == vmw_du_screen_target) ?
+ dev_priv->prim_bb_mem : dev_priv->vram_size);
}
return 0;
}
- int vmw_du_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags)
- {
- struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
- struct drm_file *file_priv ;
- struct vmw_fence_obj *fence = NULL;
- struct drm_clip_rect clips;
- int ret;
-
- if (event == NULL)
- return -EINVAL;
-
- /* require ScreenObject support for page flipping */
- if (!dev_priv->sou_priv)
- return -ENOSYS;
-
- file_priv = event->base.file_priv;
- if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
- return -EINVAL;
-
- crtc->primary->fb = fb;
-
- /* do a full screen dirty update */
- clips.x1 = clips.y1 = 0;
- clips.x2 = fb->width;
- clips.y2 = fb->height;
-
- if (vfb->dmabuf)
- ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
- 0, 0, &clips, 1, 1, &fence);
- else
- ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
- 0, 0, &clips, 1, 1, &fence);
-
-
- if (ret != 0)
- goto out_no_fence;
- if (!fence) {
- ret = -EINVAL;
- goto out_no_fence;
- }
-
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- &event->event.tv_sec,
- &event->event.tv_usec,
- true);
-
- /*
- * No need to hold on to this now. The only cleanup
- * we need to do if we fail is unref the fence.
- */
- vmw_fence_obj_unreference(&fence);
-
- if (vmw_crtc_to_du(crtc)->is_implicit)
- vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
-
- return ret;
-
- out_no_fence:
- crtc->primary->fb = old_fb;
- return ret;
- }
-
-
void vmw_du_crtc_save(struct drm_crtc *crtc)
{
}
}
}
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
{
+ return 0;
}
void vmw_du_connector_save(struct drm_connector *connector)
* @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in.
*/
- static void vmw_guess_mode_timing(struct drm_display_mode *mode)
+ void vmw_guess_mode_timing(struct drm_display_mode *mode)
{
mode->hsync_start = mode->hdisplay + 50;
mode->hsync_end = mode->hsync_start + 50;
* If using screen objects, then assume 32-bpp because that's what the
* SVGA device is assuming
*/
- if (dev_priv->sou_priv)
+ if (dev_priv->active_display_unit == vmw_du_screen_object)
assumed_bpp = 4;
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ max_width = min(max_width, dev_priv->stdu_max_width);
+ max_height = min(max_height, dev_priv->stdu_max_height);
+ }
+
/* Add preferred mode */
- {
- mode = drm_mode_duplicate(dev, &prefmode);
- if (!mode)
- return 0;
- mode->hdisplay = du->pref_width;
- mode->vdisplay = du->pref_height;
- vmw_guess_mode_timing(mode);
-
- if (vmw_kms_validate_mode_vram(dev_priv,
- mode->hdisplay * assumed_bpp,
- mode->vdisplay)) {
- drm_mode_probed_add(connector, mode);
- } else {
- drm_mode_destroy(dev, mode);
- mode = NULL;
- }
+ mode = drm_mode_duplicate(dev, &prefmode);
+ if (!mode)
+ return 0;
+ mode->hdisplay = du->pref_width;
+ mode->vdisplay = du->pref_height;
+ vmw_guess_mode_timing(mode);
- if (du->pref_mode) {
- list_del_init(&du->pref_mode->head);
- drm_mode_destroy(dev, du->pref_mode);
- }
+ if (vmw_kms_validate_mode_vram(dev_priv,
+ mode->hdisplay * assumed_bpp,
+ mode->vdisplay)) {
+ drm_mode_probed_add(connector, mode);
+ } else {
+ drm_mode_destroy(dev, mode);
+ mode = NULL;
+ }
- /* mode might be null here, this is intended */
- du->pref_mode = mode;
+ if (du->pref_mode) {
+ list_del_init(&du->pref_mode->head);
+ drm_mode_destroy(dev, du->pref_mode);
}
+ /* mode might be null here, this is intended */
+ du->pref_mode = mode;
+
for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
bmode = &vmw_kms_connector_builtin[i];
if (bmode->hdisplay > max_width ||
drm_mode_probed_add(connector, mode);
}
- /* Move the prefered mode first, help apps pick the right mode. */
- if (du->pref_mode)
- list_move(&du->pref_mode->head, &connector->probed_modes);
-
drm_mode_connector_list_update(connector, true);
+ /* Move the prefered mode first, help apps pick the right mode. */
+ drm_mode_sort(&connector->modes);
return 1;
}
unsigned rects_size;
int ret;
int i;
+ u64 total_pixels = 0;
struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_vmw_rect bounding_box = {0};
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
ret = -EINVAL;
goto out_free;
}
+
+ /*
+ * bounding_box.w and bunding_box.h are used as
+ * lower-right coordinates
+ */
+ if (rects[i].x + rects[i].w > bounding_box.w)
+ bounding_box.w = rects[i].x + rects[i].w;
+
+ if (rects[i].y + rects[i].h > bounding_box.h)
+ bounding_box.h = rects[i].y + rects[i].h;
+
+ total_pixels += (u64) rects[i].w * (u64) rects[i].h;
+ }
+
+ if (dev_priv->active_display_unit == vmw_du_screen_target) {
+ /*
+ * For Screen Targets, the limits for a toplogy are:
+ * 1. Bounding box (assuming 32bpp) must be < prim_bb_mem
+ * 2. Total pixels (assuming 32bpp) must be < prim_bb_mem
+ */
+ u64 bb_mem = bounding_box.w * bounding_box.h * 4;
+ u64 pixel_mem = total_pixels * 4;
+
+ if (bb_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Topology is beyond supported limits.\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ if (pixel_mem > dev_priv->prim_bb_mem) {
+ DRM_ERROR("Combined output size too large\n");
+ ret = -EINVAL;
+ goto out_free;
+ }
}
vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
kfree(rects);
return ret;
}
+
+ /**
+ * vmw_kms_helper_dirty - Helper to build commands and perform actions based
+ * on a set of cliprects and a set of display units.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @framebuffer: Pointer to the framebuffer on which to perform the actions.
+ * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
+ * Cliprects are given in framebuffer coordinates.
+ * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
+ * be NULL. Cliprects are given in source coordinates.
+ * @dest_x: X coordinate offset for the crtc / destination clip rects.
+ * @dest_y: Y coordinate offset for the crtc / destination clip rects.
+ * @num_clips: Number of cliprects in the @clips or @vclips array.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
+ */
+ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ const struct drm_clip_rect *clips,
+ const struct drm_vmw_rect *vclips,
+ s32 dest_x, s32 dest_y,
+ int num_clips,
+ int increment,
+ struct vmw_kms_dirty *dirty)
+ {
+ struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+ struct drm_crtc *crtc;
+ u32 num_units = 0;
+ u32 i, k;
+ int ret;
+
+ dirty->dev_priv = dev_priv;
+
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+ if (crtc->primary->fb != &framebuffer->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
+
+ for (k = 0; k < num_units; k++) {
+ struct vmw_display_unit *unit = units[k];
+ s32 crtc_x = unit->crtc.x;
+ s32 crtc_y = unit->crtc.y;
+ s32 crtc_width = unit->crtc.mode.hdisplay;
+ s32 crtc_height = unit->crtc.mode.vdisplay;
+ const struct drm_clip_rect *clips_ptr = clips;
+ const struct drm_vmw_rect *vclips_ptr = vclips;
+
+ dirty->unit = unit;
+ if (dirty->fifo_reserve_size > 0) {
+ dirty->cmd = vmw_fifo_reserve(dev_priv,
+ dirty->fifo_reserve_size);
+ if (!dirty->cmd) {
+ DRM_ERROR("Couldn't reserve fifo space "
+ "for dirty blits.\n");
+ return ret;
+ }
+ memset(dirty->cmd, 0, dirty->fifo_reserve_size);
+ }
+ dirty->num_hits = 0;
+ for (i = 0; i < num_clips; i++, clips_ptr += increment,
+ vclips_ptr += increment) {
+ s32 clip_left;
+ s32 clip_top;
+
+ /*
+ * Select clip array type. Note that integer type
+ * in @clips is unsigned short, whereas in @vclips
+ * it's 32-bit.
+ */
+ if (clips) {
+ dirty->fb_x = (s32) clips_ptr->x1;
+ dirty->fb_y = (s32) clips_ptr->y1;
+ dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
+ crtc_x;
+ dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
+ crtc_y;
+ } else {
+ dirty->fb_x = vclips_ptr->x;
+ dirty->fb_y = vclips_ptr->y;
+ dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
+ dest_x - crtc_x;
+ dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
+ dest_y - crtc_y;
+ }
+
+ dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
+ dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
+
+ /* Skip this clip if it's outside the crtc region */
+ if (dirty->unit_x1 >= crtc_width ||
+ dirty->unit_y1 >= crtc_height ||
+ dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
+ continue;
+
+ /* Clip right and bottom to crtc limits */
+ dirty->unit_x2 = min_t(s32, dirty->unit_x2,
+ crtc_width);
+ dirty->unit_y2 = min_t(s32, dirty->unit_y2,
+ crtc_height);
+
+ /* Clip left and top to crtc limits */
+ clip_left = min_t(s32, dirty->unit_x1, 0);
+ clip_top = min_t(s32, dirty->unit_y1, 0);
+ dirty->unit_x1 -= clip_left;
+ dirty->unit_y1 -= clip_top;
+ dirty->fb_x -= clip_left;
+ dirty->fb_y -= clip_top;
+
+ dirty->clip(dirty);
+ }
+
+ dirty->fifo_commit(dirty);
+ }
+
+ return 0;
+ }
+
+ /**
+ * vmw_kms_helper_buffer_prepare - Reserve and validate a buffer object before
+ * command submission.
+ *
+ * @dev_priv. Pointer to a device private structure.
+ * @buf: The buffer object
+ * @interruptible: Whether to perform waits as interruptible.
+ * @validate_as_mob: Whether the buffer should be validated as a MOB. If false,
+ * The buffer will be validated as a GMR. Already pinned buffers will not be
+ * validated.
+ *
+ * Returns 0 on success, negative error code on failure, -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible,
+ bool validate_as_mob)
+ {
+ struct ttm_buffer_object *bo = &buf->base;
+ int ret;
+
+ ttm_bo_reserve(bo, false, false, interruptible, NULL);
+ ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
+ validate_as_mob);
+ if (ret)
+ ttm_bo_unreserve(bo);
+
+ return ret;
+ }
+
+ /**
+ * vmw_kms_helper_buffer_revert - Undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ *
+ * @res: Pointer to the buffer object.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_buffer_prepare.
+ */
+ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf)
+ {
+ if (buf)
+ ttm_bo_unreserve(&buf->base);
+ }
+
+ /**
+ * vmw_kms_helper_buffer_finish - Unreserve and fence a buffer object after
+ * kms command submission.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @file_priv: Pointer to a struct drm_file representing the caller's
+ * connection. Must be set to NULL if @user_fence_rep is NULL, and conversely
+ * if non-NULL, @user_fence_rep must be non-NULL.
+ * @buf: The buffer object.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ * @user_fence_rep: Optional pointer to a user-space provided struct
+ * drm_vmw_fence_rep. If provided, @file_priv must also be provided and the
+ * function copies fence data to user-space in a fail-safe manner.
+ */
+ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_dma_buffer *buf,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep)
+ {
+ struct vmw_fence_obj *fence;
+ uint32_t handle;
+ int ret;
+
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
+ file_priv ? &handle : NULL);
+ if (buf)
+ vmw_fence_single_bo(&buf->base, fence);
+ if (file_priv)
+ vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
+ ret, user_fence_rep, fence,
+ handle);
+ if (out_fence)
+ *out_fence = fence;
+ else
+ vmw_fence_obj_unreference(&fence);
+
+ vmw_kms_helper_buffer_revert(buf);
+ }
+
+
+ /**
+ * vmw_kms_helper_resource_revert - Undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ *
+ * Helper to be used if an error forces the caller to undo the actions of
+ * vmw_kms_helper_resource_prepare.
+ */
+ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
+ {
+ vmw_kms_helper_buffer_revert(res->backup);
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ }
+
+ /**
+ * vmw_kms_helper_resource_prepare - Reserve and validate a resource before
+ * command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @interruptible: Whether to perform waits as interruptible.
+ *
+ * Reserves and validates also the backup buffer if a guest-backed resource.
+ * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
+ * interrupted by a signal.
+ */
+ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+ bool interruptible)
+ {
+ int ret = 0;
+
+ if (interruptible)
+ ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
+ else
+ mutex_lock(&res->dev_priv->cmdbuf_mutex);
+
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+
+ ret = vmw_resource_reserve(res, interruptible, false);
+ if (ret)
+ goto out_unlock;
+
+ if (res->backup) {
+ ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
+ interruptible,
+ res->dev_priv->has_mob);
+ if (ret)
+ goto out_unreserve;
+ }
+ ret = vmw_resource_validate(res);
+ if (ret)
+ goto out_revert;
+ return 0;
+
+ out_revert:
+ vmw_kms_helper_buffer_revert(res->backup);
+ out_unreserve:
+ vmw_resource_unreserve(res, false, NULL, 0);
+ out_unlock:
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ return ret;
+ }
+
+ /**
+ * vmw_kms_helper_resource_finish - Unreserve and fence a resource after
+ * kms command submission.
+ *
+ * @res: Pointer to the resource. Typically a surface.
+ * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
+ * ref-counted fence pointer is returned here.
+ */
+ void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+ struct vmw_fence_obj **out_fence)
+ {
+ if (res->backup || out_fence)
+ vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup,
+ out_fence, NULL);
+
+ vmw_resource_unreserve(res, false, NULL, 0);
+ mutex_unlock(&res->dev_priv->cmdbuf_mutex);
+ }
+
+ /**
+ * vmw_kms_update_proxy - Helper function to update a proxy surface from
+ * its backing MOB.
+ *
+ * @res: Pointer to the surface resource
+ * @clips: Clip rects in framebuffer (surface) space.
+ * @num_clips: Number of clips in @clips.
+ * @increment: Integer with which to increment the clip counter when looping.
+ * Used to skip a predetermined number of clip rects.
+ *
+ * This function makes sure the proxy surface is updated from its backing MOB
+ * using the region given by @clips. The surface resource @res and its backing
+ * MOB needs to be reserved and validated on call.
+ */
+ int vmw_kms_update_proxy(struct vmw_resource *res,
+ const struct drm_clip_rect *clips,
+ unsigned num_clips,
+ int increment)
+ {
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct drm_vmw_size *size = &vmw_res_to_srf(res)->base_size;
+ struct {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdUpdateGBImage body;
+ } *cmd;
+ SVGA3dBox *box;
+ size_t copy_size = 0;
+ int i;
+
+ if (!clips)
+ return 0;
+
+ cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+ if (!cmd) {
+ DRM_ERROR("Couldn't reserve fifo space for proxy surface "
+ "update.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
+ box = &cmd->body.box;
+
+ cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.image.sid = res->id;
+ cmd->body.image.face = 0;
+ cmd->body.image.mipmap = 0;
+
+ if (clips->x1 > size->width || clips->x2 > size->width ||
+ clips->y1 > size->height || clips->y2 > size->height) {
+ DRM_ERROR("Invalid clips outsize of framebuffer.\n");
+ return -EINVAL;
+ }
+
+ box->x = clips->x1;
+ box->y = clips->y1;
+ box->z = 0;
+ box->w = clips->x2 - clips->x1;
+ box->h = clips->y2 - clips->y1;
+ box->d = 1;
+
+ copy_size += sizeof(*cmd);
+ }
+
+ vmw_fifo_commit(dev_priv, copy_size);
+
+ return 0;
+ }
+
+ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+ unsigned unit,
+ u32 max_width,
+ u32 max_height,
+ struct drm_connector **p_con,
+ struct drm_crtc **p_crtc,
+ struct drm_display_mode **p_mode)
+ {
+ struct drm_connector *con;
+ struct vmw_display_unit *du;
+ struct drm_display_mode *mode;
+ int i = 0;
+
+ list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
+ head) {
+ if (i == unit)
+ break;
+
+ ++i;
+ }
+
+ if (i != unit) {
+ DRM_ERROR("Could not find initial display unit.\n");
+ return -EINVAL;
+ }
+
+ if (list_empty(&con->modes))
+ (void) vmw_du_connector_fill_modes(con, max_width, max_height);
+
+ if (list_empty(&con->modes)) {
+ DRM_ERROR("Could not find initial display mode.\n");
+ return -EINVAL;
+ }
+
+ du = vmw_connector_to_du(con);
+ *p_con = con;
+ *p_crtc = &du->crtc;
+
+ list_for_each_entry(mode, &con->modes, head) {
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ break;
+ }
+
+ if (mode->type & DRM_MODE_TYPE_PREFERRED)
+ *p_mode = mode;
+ else {
+ WARN_ONCE(true, "Could not find initial preferred mode.\n");
+ *p_mode = list_first_entry(&con->modes,
+ struct drm_display_mode,
+ head);
+ }
+
+ return 0;
+ }
/**************************************************************************
*
- * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
#include <drm/drm_crtc_helper.h>
#include "vmwgfx_drv.h"
+ /**
+ * struct vmw_kms_dirty - closure structure for the vmw_kms_helper_dirty
+ * function.
+ *
+ * @fifo_commit: Callback that is called once for each display unit after
+ * all clip rects. This function must commit the fifo space reserved by the
+ * helper. Set up by the caller.
+ * @clip: Callback that is called for each cliprect on each display unit.
+ * Set up by the caller.
+ * @fifo_reserve_size: Fifo size that the helper should try to allocat for
+ * each display unit. Set up by the caller.
+ * @dev_priv: Pointer to the device private. Set up by the helper.
+ * @unit: The current display unit. Set up by the helper before a call to @clip.
+ * @cmd: The allocated fifo space. Set up by the helper before the first @clip
+ * call.
+ * @num_hits: Number of clip rect commands for this display unit.
+ * Cleared by the helper before the first @clip call. Updated by the @clip
+ * callback.
+ * @fb_x: Clip rect left side in framebuffer coordinates.
+ * @fb_y: Clip rect right side in framebuffer coordinates.
+ * @unit_x1: Clip rect left side in crtc coordinates.
+ * @unit_y1: Clip rect top side in crtc coordinates.
+ * @unit_x2: Clip rect right side in crtc coordinates.
+ * @unit_y2: Clip rect bottom side in crtc coordinates.
+ *
+ * The clip rect coordinates are updated by the helper for each @clip call.
+ * Note that this may be derived from if more info needs to be passed between
+ * helper caller and helper callbacks.
+ */
+ struct vmw_kms_dirty {
+ void (*fifo_commit)(struct vmw_kms_dirty *);
+ void (*clip)(struct vmw_kms_dirty *);
+ size_t fifo_reserve_size;
+ struct vmw_private *dev_priv;
+ struct vmw_display_unit *unit;
+ void *cmd;
+ u32 num_hits;
+ s32 fb_x;
+ s32 fb_y;
+ s32 unit_x1;
+ s32 unit_y1;
+ s32 unit_x2;
+ s32 unit_y2;
+ };
+
#define VMWGFX_NUM_DISPLAY_UNITS 8
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
+ #define vmw_framebuffer_to_vfbs(x) \
+ container_of(x, struct vmw_framebuffer_surface, base.base)
+ #define vmw_framebuffer_to_vfbd(x) \
+ container_of(x, struct vmw_framebuffer_dmabuf, base.base)
/**
* Base class for framebuffers
uint32_t user_handle;
};
+ /*
+ * Clip rectangle
+ */
+ struct vmw_clip_rect {
+ int x1, x2, y1, y2;
+ };
+
+ struct vmw_framebuffer_surface {
+ struct vmw_framebuffer base;
+ struct vmw_surface *surface;
+ struct vmw_dma_buffer *buffer;
+ struct list_head head;
+ bool is_dmabuf_proxy; /* true if this is proxy surface for DMA buf */
+ };
+
+
+ struct vmw_framebuffer_dmabuf {
+ struct vmw_framebuffer base;
+ struct vmw_dma_buffer *buffer;
+ };
- #define vmw_crtc_to_du(x) \
- container_of(x, struct vmw_display_unit, crtc)
/*
* Basic cursor manipulation
/*
* Shared display unit functions - vmwgfx_kms.c
*/
- void vmw_display_unit_cleanup(struct vmw_display_unit *du);
- int vmw_du_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event,
- uint32_t page_flip_flags);
+ void vmw_du_cleanup(struct vmw_display_unit *du);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
-void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+int vmw_du_connector_dpms(struct drm_connector *connector, int mode);
void vmw_du_connector_save(struct drm_connector *connector);
void vmw_du_connector_restore(struct drm_connector *connector);
enum drm_connector_status
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val);
+ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ const struct drm_clip_rect *clips,
+ const struct drm_vmw_rect *vclips,
+ s32 dest_x, s32 dest_y,
+ int num_clips,
+ int increment,
+ struct vmw_kms_dirty *dirty);
+ int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buf,
+ bool interruptible,
+ bool validate_as_mob);
+ void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
+ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_dma_buffer *buf,
+ struct vmw_fence_obj **out_fence,
+ struct drm_vmw_fence_rep __user *
+ user_fence_rep);
+ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
+ bool interruptible);
+ void vmw_kms_helper_resource_revert(struct vmw_resource *res);
+ void vmw_kms_helper_resource_finish(struct vmw_resource *res,
+ struct vmw_fence_obj **out_fence);
+ int vmw_kms_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips);
+ struct vmw_framebuffer *
+ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *dmabuf,
+ struct vmw_surface *surface,
+ bool only_2d,
+ const struct drm_mode_fb_cmd *mode_cmd);
+ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
+ unsigned unit,
+ u32 max_width,
+ u32 max_height,
+ struct drm_connector **p_con,
+ struct drm_crtc **p_crtc,
+ struct drm_display_mode **p_mode);
+ void vmw_guess_mode_timing(struct drm_display_mode *mode);
/*
* Legacy display unit functions - vmwgfx_ldu.c
*/
- int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
- int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
+ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv);
+ int vmw_kms_ldu_close_display(struct vmw_private *dev_priv);
+ int vmw_kms_ldu_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ unsigned flags, unsigned color,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment);
+ int vmw_kms_update_proxy(struct vmw_resource *res,
+ const struct drm_clip_rect *clips,
+ unsigned num_clips,
+ int increment);
/*
* Screen Objects display functions - vmwgfx_scrn.c
*/
- int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
- int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
- int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
- struct drm_vmw_rect *rects);
- bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
- void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
- struct drm_crtc *crtc);
+ int vmw_kms_sou_init_display(struct vmw_private *dev_priv);
+ int vmw_kms_sou_close_display(struct vmw_private *dev_priv);
+ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence);
+ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ unsigned num_clips, int increment,
+ bool interruptible,
+ struct vmw_fence_obj **out_fence);
+ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips);
+
+ /*
+ * Screen Target Display Unit functions - vmwgfx_stdu.c
+ */
+ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv);
+ int vmw_kms_stdu_close_display(struct vmw_private *dev_priv);
+ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
+ struct vmw_framebuffer *framebuffer,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ struct vmw_resource *srf,
+ s32 dest_x,
+ s32 dest_y,
+ unsigned num_clips, int inc,
+ struct vmw_fence_obj **out_fence);
+ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ bool to_surface,
+ bool interruptible);
#endif
struct drm_pending_vblank_event {
struct drm_pending_event base;
- int pipe;
+ unsigned int pipe;
struct drm_event_vblank event;
};
struct timer_list disable_timer; /* delayed disable timer */
/* vblank counter, protected by dev->vblank_time_lock for writes */
- unsigned long count;
+ u32 count;
/* vblank timestamps, protected by dev->vblank_time_lock for writes */
struct timeval time[DRM_VBLANKTIME_RBSIZE];
/* for wraparound handling */
u32 last_wait; /* Last vblank seqno waited per CRTC */
unsigned int inmodeset; /* Display driver is setting mode */
- int crtc; /* crtc index */
+ unsigned int pipe; /* crtc index */
bool enabled; /* so we don't call enable more than
once per disable */
};
/*@{*/
/* Driver support (drm_drv.h) */
+ extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
extern int drm_irq_install(struct drm_device *dev, int irq);
extern int drm_irq_uninstall(struct drm_device *dev);
-extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
-extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count(struct drm_device *dev, int pipe);
extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
-extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
struct timeval *vblanktime);
-extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
- struct drm_pending_vblank_event *e);
+extern void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
+ struct drm_pending_vblank_event *e);
extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e);
-extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
-extern int drm_vblank_get(struct drm_device *dev, int crtc);
-extern void drm_vblank_put(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_put(struct drm_device *dev, unsigned int pipe);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
extern void drm_crtc_vblank_put(struct drm_crtc *crtc);
-extern void drm_wait_one_vblank(struct drm_device *dev, int crtc);
+extern void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
-extern void drm_vblank_off(struct drm_device *dev, int crtc);
-extern void drm_vblank_on(struct drm_device *dev, int crtc);
+extern void drm_vblank_off(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_on(struct drm_device *dev, unsigned int pipe);
extern void drm_crtc_vblank_off(struct drm_crtc *crtc);
extern void drm_crtc_vblank_reset(struct drm_crtc *crtc);
extern void drm_crtc_vblank_on(struct drm_crtc *crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- int crtc, int *max_error,
+ unsigned int pipe, int *max_error,
struct timeval *vblank_time,
unsigned flags,
const struct drm_crtc *refcrtc,
}
/* Modesetting support */
-extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
-extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
+extern void drm_vblank_pre_modeset(struct drm_device *dev, unsigned int pipe);
+extern void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe);
/* Stub support (drm_stub.h) */
extern struct drm_master *drm_master_get(struct drm_master *master);