1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Xen para-virtual DRM device
6 * Copyright (C) 2016-2018 EPAM Systems Inc.
11 #include <drm/drm_atomic.h>
12 #include <drm/drm_atomic_helper.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_fourcc.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_gem_framebuffer_helper.h>
17 #include <drm/drm_probe_helper.h>
18 #include <drm/drm_vblank.h>
20 #include "xen_drm_front.h"
21 #include "xen_drm_front_conn.h"
22 #include "xen_drm_front_kms.h"
25 * Timeout in ms to wait for frame done event from the backend:
26 * must be a bit more than IO time-out
28 #define FRAME_DONE_TO_MS (XEN_DRM_FRONT_WAIT_BACK_MS + 100)
30 static struct xen_drm_front_drm_pipeline *
31 to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
33 return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
36 static void fb_destroy(struct drm_framebuffer *fb)
38 struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
41 if (drm_dev_enter(fb->dev, &idx)) {
42 xen_drm_front_fb_detach(drm_info->front_info,
43 xen_drm_front_fb_to_cookie(fb));
46 drm_gem_fb_destroy(fb);
49 static const struct drm_framebuffer_funcs fb_funcs = {
50 .destroy = fb_destroy,
53 static struct drm_framebuffer *
54 fb_create(struct drm_device *dev, struct drm_file *filp,
55 const struct drm_mode_fb_cmd2 *mode_cmd)
57 struct xen_drm_front_drm_info *drm_info = dev->dev_private;
58 struct drm_framebuffer *fb;
59 struct drm_gem_object *gem_obj;
62 fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
63 if (IS_ERR_OR_NULL(fb))
66 gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
68 DRM_ERROR("Failed to lookup GEM object\n");
73 drm_gem_object_put_unlocked(gem_obj);
75 ret = xen_drm_front_fb_attach(drm_info->front_info,
76 xen_drm_front_dbuf_to_cookie(gem_obj),
77 xen_drm_front_fb_to_cookie(fb),
78 fb->width, fb->height,
81 DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
88 drm_gem_fb_destroy(fb);
92 static const struct drm_mode_config_funcs mode_config_funcs = {
93 .fb_create = fb_create,
94 .atomic_check = drm_atomic_helper_check,
95 .atomic_commit = drm_atomic_helper_commit,
98 static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
100 struct drm_crtc *crtc = &pipeline->pipe.crtc;
101 struct drm_device *dev = crtc->dev;
104 spin_lock_irqsave(&dev->event_lock, flags);
105 if (pipeline->pending_event)
106 drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
107 pipeline->pending_event = NULL;
108 spin_unlock_irqrestore(&dev->event_lock, flags);
111 static void display_enable(struct drm_simple_display_pipe *pipe,
112 struct drm_crtc_state *crtc_state,
113 struct drm_plane_state *plane_state)
115 struct xen_drm_front_drm_pipeline *pipeline =
116 to_xen_drm_pipeline(pipe);
117 struct drm_crtc *crtc = &pipe->crtc;
118 struct drm_framebuffer *fb = plane_state->fb;
121 if (!drm_dev_enter(pipe->crtc.dev, &idx))
124 ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
125 fb->width, fb->height,
126 fb->format->cpp[0] * 8,
127 xen_drm_front_fb_to_cookie(fb));
130 DRM_ERROR("Failed to enable display: %d\n", ret);
131 pipeline->conn_connected = false;
137 static void display_disable(struct drm_simple_display_pipe *pipe)
139 struct xen_drm_front_drm_pipeline *pipeline =
140 to_xen_drm_pipeline(pipe);
143 if (drm_dev_enter(pipe->crtc.dev, &idx)) {
144 ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
145 xen_drm_front_fb_to_cookie(NULL));
149 DRM_ERROR("Failed to disable display: %d\n", ret);
151 /* Make sure we can restart with enabled connector next time */
152 pipeline->conn_connected = true;
154 /* release stalled event if any */
155 send_pending_event(pipeline);
158 void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
162 * This runs in interrupt context, e.g. under
163 * drm_info->front_info->io_lock, so we cannot call _sync version
166 cancel_delayed_work(&pipeline->pflip_to_worker);
168 send_pending_event(pipeline);
171 static void pflip_to_worker(struct work_struct *work)
173 struct delayed_work *delayed_work = to_delayed_work(work);
174 struct xen_drm_front_drm_pipeline *pipeline =
175 container_of(delayed_work,
176 struct xen_drm_front_drm_pipeline,
179 DRM_ERROR("Frame done timed-out, releasing");
180 send_pending_event(pipeline);
183 static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
184 struct drm_plane_state *old_plane_state)
186 struct drm_plane_state *plane_state =
187 drm_atomic_get_new_plane_state(old_plane_state->state,
191 * If old_plane_state->fb is NULL and plane_state->fb is not,
192 * then this is an atomic commit which will enable display.
193 * If old_plane_state->fb is not NULL and plane_state->fb is,
194 * then this is an atomic commit which will disable display.
195 * Ignore these and do not send page flip as this framebuffer will be
196 * sent to the backend as a part of display_set_config call.
198 if (old_plane_state->fb && plane_state->fb) {
199 struct xen_drm_front_drm_pipeline *pipeline =
200 to_xen_drm_pipeline(pipe);
201 struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
204 schedule_delayed_work(&pipeline->pflip_to_worker,
205 msecs_to_jiffies(FRAME_DONE_TO_MS));
207 ret = xen_drm_front_page_flip(drm_info->front_info,
209 xen_drm_front_fb_to_cookie(plane_state->fb));
211 DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
213 pipeline->conn_connected = false;
215 * Report the flip not handled, so pending event is
216 * sent, unblocking user-space.
221 * Signal that page flip was handled, pending event will be sent
222 * on frame done event from the backend.
230 static void display_update(struct drm_simple_display_pipe *pipe,
231 struct drm_plane_state *old_plane_state)
233 struct xen_drm_front_drm_pipeline *pipeline =
234 to_xen_drm_pipeline(pipe);
235 struct drm_crtc *crtc = &pipe->crtc;
236 struct drm_pending_vblank_event *event;
239 event = crtc->state->event;
241 struct drm_device *dev = crtc->dev;
244 WARN_ON(pipeline->pending_event);
246 spin_lock_irqsave(&dev->event_lock, flags);
247 crtc->state->event = NULL;
249 pipeline->pending_event = event;
250 spin_unlock_irqrestore(&dev->event_lock, flags);
253 if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
254 send_pending_event(pipeline);
259 * Send page flip request to the backend *after* we have event cached
260 * above, so on page flip done event from the backend we can
261 * deliver it and there is no race condition between this code and
262 * event from the backend.
263 * If this is not a page flip, e.g. no flip done event from the backend
264 * is expected, then send now.
266 if (!display_send_page_flip(pipe, old_plane_state))
267 send_pending_event(pipeline);
272 static enum drm_mode_status
273 display_mode_valid(struct drm_simple_display_pipe *pipe,
274 const struct drm_display_mode *mode)
276 struct xen_drm_front_drm_pipeline *pipeline =
277 container_of(pipe, struct xen_drm_front_drm_pipeline,
280 if (mode->hdisplay != pipeline->width)
283 if (mode->vdisplay != pipeline->height)
289 static const struct drm_simple_display_pipe_funcs display_funcs = {
290 .mode_valid = display_mode_valid,
291 .enable = display_enable,
292 .disable = display_disable,
293 .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
294 .update = display_update,
297 static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
298 int index, struct xen_drm_front_cfg_connector *cfg,
299 struct xen_drm_front_drm_pipeline *pipeline)
301 struct drm_device *dev = drm_info->drm_dev;
306 pipeline->drm_info = drm_info;
307 pipeline->index = index;
308 pipeline->height = cfg->height;
309 pipeline->width = cfg->width;
311 INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
313 ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
317 formats = xen_drm_front_conn_get_formats(&format_count);
319 return drm_simple_display_pipe_init(dev, &pipeline->pipe,
320 &display_funcs, formats,
325 int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
327 struct drm_device *dev = drm_info->drm_dev;
330 drm_mode_config_init(dev);
332 dev->mode_config.min_width = 0;
333 dev->mode_config.min_height = 0;
334 dev->mode_config.max_width = 4095;
335 dev->mode_config.max_height = 2047;
336 dev->mode_config.funcs = &mode_config_funcs;
338 for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
339 struct xen_drm_front_cfg_connector *cfg =
340 &drm_info->front_info->cfg.connectors[i];
341 struct xen_drm_front_drm_pipeline *pipeline =
342 &drm_info->pipeline[i];
344 ret = display_pipe_init(drm_info, i, cfg, pipeline);
346 drm_mode_config_cleanup(dev);
351 drm_mode_config_reset(dev);
352 drm_kms_helper_poll_init(dev);
356 void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
360 for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
361 struct xen_drm_front_drm_pipeline *pipeline =
362 &drm_info->pipeline[i];
364 cancel_delayed_work_sync(&pipeline->pflip_to_worker);
366 send_pending_event(pipeline);