2 * Copyright (C) 2015 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/reservation.h>
11 #include <drm/drm_encoder.h>
12 #include <drm/drm_gem_cma_helper.h>
13 #include <drm/drm_atomic.h>
14 #include <drm/drm_syncobj.h>
16 #include "uapi/drm/vc4_drm.h"
18 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
21 enum vc4_kernel_bo_type {
22 /* Any kernel allocation (gem_create_object hook) before it
23 * gets another type set.
27 VC4_BO_TYPE_V3D_SHADER,
32 VC4_BO_TYPE_KERNEL_CACHE,
36 /* Performance monitor object. The perform lifetime is controlled by userspace
37 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
38 * request, and when this is the case, HW perf counters will be activated just
39 * before the submit_cl is submitted to the GPU and disabled when the job is
40 * done. This way, only events related to a specific job will be counted.
43 /* Tracks the number of users of the perfmon, when this counter reaches
44 * zero the perfmon is destroyed.
48 /* Number of counters activated in this perfmon instance
49 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
53 /* Events counted by the HW perf counters. */
54 u8 events[DRM_VC4_MAX_PERF_COUNTERS];
56 /* Storage for counter values. Counters are incremented by the HW
57 * perf counter values every time the perfmon is attached to a GPU job.
58 * This way, perfmon users don't have to retrieve the results after
59 * each job if they want to track events covering several submissions.
60 * Note that counter values can't be reset, but you can fake a reset by
61 * destroying the perfmon and creating a new one.
67 struct drm_device *dev;
69 struct vc4_hdmi *hdmi;
76 struct vc4_hang_state *hang_state;
78 /* The kernel-space BO cache. Tracks buffers that have been
79 * unreferenced by all other users (refcounts of 0!) but not
80 * yet freed, so we can do cheap allocations.
83 /* Array of list heads for entries in the BO cache,
84 * based on number of pages, so we can do O(1) lookups
85 * in the cache when allocating.
87 struct list_head *size_list;
88 uint32_t size_list_size;
90 /* List of all BOs in the cache, ordered by age, so we
91 * can do O(1) lookups when trying to free old
94 struct list_head time_list;
95 struct work_struct time_work;
96 struct timer_list time_timer;
106 /* Protects bo_cache and bo_labels. */
107 struct mutex bo_lock;
109 /* Purgeable BO pool. All BOs in this pool can have their memory
110 * reclaimed if the driver is unable to allocate new BOs. We also
111 * keep stats related to the purge mechanism here.
114 struct list_head list;
117 unsigned int purged_num;
122 uint64_t dma_fence_context;
124 /* Sequence number for the last job queued in bin_job_list.
125 * Starts at 0 (no jobs emitted).
129 /* Sequence number for the last completed job on the GPU.
130 * Starts at 0 (no jobs completed).
132 uint64_t finished_seqno;
134 /* List of all struct vc4_exec_info for jobs to be executed in
135 * the binner. The first job in the list is the one currently
136 * programmed into ct0ca for execution.
138 struct list_head bin_job_list;
140 /* List of all struct vc4_exec_info for jobs that have
141 * completed binning and are ready for rendering. The first
142 * job in the list is the one currently programmed into ct1ca
145 struct list_head render_job_list;
147 /* List of the finished vc4_exec_infos waiting to be freed by
150 struct list_head job_done_list;
151 /* Spinlock used to synchronize the job_list and seqno
152 * accesses between the IRQ handler and GEM ioctls.
155 wait_queue_head_t job_wait_queue;
156 struct work_struct job_done_work;
158 /* Used to track the active perfmon if any. Access to this field is
159 * protected by job_lock.
161 struct vc4_perfmon *active_perfmon;
163 /* List of struct vc4_seqno_cb for callbacks to be made from a
164 * workqueue when the given seqno is passed.
166 struct list_head seqno_cb_list;
168 /* The memory used for storing binner tile alloc, tile state,
169 * and overflow memory allocations. This is freed when V3D
172 struct vc4_bo *bin_bo;
174 /* Size of blocks allocated within bin_bo. */
175 uint32_t bin_alloc_size;
177 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
180 uint32_t bin_alloc_used;
182 /* Bitmask of the current bin_alloc used for overflow memory. */
183 uint32_t bin_alloc_overflow;
185 struct work_struct overflow_mem_work;
189 /* Mutex controlling the power refcount. */
190 struct mutex power_lock;
193 struct timer_list timer;
194 struct work_struct reset_work;
197 struct semaphore async_modeset;
199 struct drm_modeset_lock ctm_state_lock;
200 struct drm_private_obj ctm_manager;
203 static inline struct vc4_dev *
204 to_vc4_dev(struct drm_device *dev)
206 return (struct vc4_dev *)dev->dev_private;
210 struct drm_gem_cma_object base;
212 /* seqno of the last job to render using this BO. */
215 /* seqno of the last job to use the RCL to write to this BO.
217 * Note that this doesn't include binner overflow memory
220 uint64_t write_seqno;
224 /* List entry for the BO's position in either
225 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
227 struct list_head unref_head;
229 /* Time in jiffies when the BO was put in vc4->bo_cache. */
230 unsigned long free_time;
232 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
233 struct list_head size_head;
235 /* Struct for shader validation state, if created by
236 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
238 struct vc4_validated_shader_info *validated_shader;
240 /* normally (resv == &_resv) except for imported bo's */
241 struct reservation_object *resv;
242 struct reservation_object _resv;
244 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
245 * for user-allocated labels.
249 /* Count the number of active users. This is needed to determine
250 * whether we can move the BO to the purgeable list or not (when the BO
251 * is used by the GPU or the display engine we can't purge it).
255 /* Store purgeable/purged state here */
257 struct mutex madv_lock;
260 static inline struct vc4_bo *
261 to_vc4_bo(struct drm_gem_object *bo)
263 return (struct vc4_bo *)bo;
267 struct dma_fence base;
268 struct drm_device *dev;
269 /* vc4 seqno for signaled() test */
273 static inline struct vc4_fence *
274 to_vc4_fence(struct dma_fence *fence)
276 return (struct vc4_fence *)fence;
279 struct vc4_seqno_cb {
280 struct work_struct work;
282 void (*func)(struct vc4_seqno_cb *cb);
287 struct platform_device *pdev;
293 struct platform_device *pdev;
297 /* Memory manager for CRTCs to allocate space in the display
298 * list. Units are dwords.
300 struct drm_mm dlist_mm;
301 /* Memory manager for the LBM memory used by HVS scaling. */
302 struct drm_mm lbm_mm;
305 struct drm_mm_node mitchell_netravali_filter;
309 struct drm_plane base;
312 static inline struct vc4_plane *
313 to_vc4_plane(struct drm_plane *plane)
315 return (struct vc4_plane *)plane;
318 enum vc4_scaling_mode {
324 struct vc4_plane_state {
325 struct drm_plane_state base;
326 /* System memory copy of the display list for this element, computed
327 * at atomic_check time.
330 u32 dlist_size; /* Number of dwords allocated for the display list */
331 u32 dlist_count; /* Number of used dwords in the display list. */
333 /* Offset in the dlist to various words, for pageflip or
340 /* Offset where the plane's dlist was last stored in the
341 * hardware at vc4_crtc_atomic_flush() time.
343 u32 __iomem *hw_dlist;
345 /* Clipped coordinates of the plane on the display. */
346 int crtc_x, crtc_y, crtc_w, crtc_h;
347 /* Clipped area being scanned from in the FB. */
350 u32 src_w[2], src_h[2];
352 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
353 enum vc4_scaling_mode x_scaling[2], y_scaling[2];
357 /* Offset to start scanning out from the start of the plane's
362 /* Our allocation in LBM for temporary storage during scaling. */
363 struct drm_mm_node lbm;
365 /* Set when the plane has per-pixel alpha content or does not cover
366 * the entire screen. This is a hint to the CRTC that it might need
367 * to enable background color fill.
372 static inline struct vc4_plane_state *
373 to_vc4_plane_state(struct drm_plane_state *state)
375 return (struct vc4_plane_state *)state;
378 enum vc4_encoder_type {
379 VC4_ENCODER_TYPE_NONE,
380 VC4_ENCODER_TYPE_HDMI,
381 VC4_ENCODER_TYPE_VEC,
382 VC4_ENCODER_TYPE_DSI0,
383 VC4_ENCODER_TYPE_DSI1,
384 VC4_ENCODER_TYPE_SMI,
385 VC4_ENCODER_TYPE_DPI,
389 struct drm_encoder base;
390 enum vc4_encoder_type type;
394 static inline struct vc4_encoder *
395 to_vc4_encoder(struct drm_encoder *encoder)
397 return container_of(encoder, struct vc4_encoder, base);
400 struct vc4_crtc_data {
401 /* Which channel of the HVS this pixelvalve sources from. */
404 enum vc4_encoder_type encoder_types[4];
408 struct drm_crtc base;
409 const struct vc4_crtc_data *data;
412 /* Timestamp at start of vblank irq - unaffected by lock delays. */
415 /* Which HVS channel we're using for our CRTC. */
421 /* Size in pixels of the COB memory allocated to this CRTC. */
424 struct drm_pending_vblank_event *event;
427 static inline struct vc4_crtc *
428 to_vc4_crtc(struct drm_crtc *crtc)
430 return (struct vc4_crtc *)crtc;
433 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
434 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
435 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
436 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
438 struct vc4_exec_info {
439 /* Sequence number for this bin/render job. */
442 /* Latest write_seqno of any BO that binning depends on. */
443 uint64_t bin_dep_seqno;
445 struct dma_fence *fence;
447 /* Last current addresses the hardware was processing when the
448 * hangcheck timer checked on us.
450 uint32_t last_ct0ca, last_ct1ca;
452 /* Kernel-space copy of the ioctl arguments */
453 struct drm_vc4_submit_cl *args;
455 /* This is the array of BOs that were looked up at the start of exec.
456 * Command validation will use indices into this array.
458 struct drm_gem_cma_object **bo;
461 /* List of BOs that are being written by the RCL. Other than
462 * the binner temporary storage, this is all the BOs written
465 struct drm_gem_cma_object *rcl_write_bo[4];
466 uint32_t rcl_write_bo_count;
468 /* Pointers for our position in vc4->job_list */
469 struct list_head head;
471 /* List of other BOs used in the job that need to be released
472 * once the job is complete.
474 struct list_head unref_list;
476 /* Current unvalidated indices into @bo loaded by the non-hardware
477 * VC4_PACKET_GEM_HANDLES.
479 uint32_t bo_index[2];
481 /* This is the BO where we store the validated command lists, shader
482 * records, and uniforms.
484 struct drm_gem_cma_object *exec_bo;
487 * This tracks the per-shader-record state (packet 64) that
488 * determines the length of the shader record and the offset
489 * it's expected to be found at. It gets read in from the
492 struct vc4_shader_state {
494 /* Maximum vertex index referenced by any primitive using this
500 /** How many shader states the user declared they were using. */
501 uint32_t shader_state_size;
502 /** How many shader state records the validator has seen. */
503 uint32_t shader_state_count;
505 bool found_tile_binning_mode_config_packet;
506 bool found_start_tile_binning_packet;
507 bool found_increment_semaphore_packet;
509 uint8_t bin_tiles_x, bin_tiles_y;
510 /* Physical address of the start of the tile alloc array
511 * (where each tile's binned CL will start)
513 uint32_t tile_alloc_offset;
514 /* Bitmask of which binner slots are freed when this job completes. */
518 * Computed addresses pointing into exec_bo where we start the
519 * bin thread (ct0) and render thread (ct1).
521 uint32_t ct0ca, ct0ea;
522 uint32_t ct1ca, ct1ea;
524 /* Pointer to the unvalidated bin CL (if present). */
527 /* Pointers to the shader recs. These paddr gets incremented as CL
528 * packets are relocated in validate_gl_shader_state, and the vaddrs
529 * (u and v) get incremented and size decremented as the shader recs
530 * themselves are validated.
534 uint32_t shader_rec_p;
535 uint32_t shader_rec_size;
537 /* Pointers to the uniform data. These pointers are incremented, and
538 * size decremented, as each batch of uniforms is uploaded.
543 uint32_t uniforms_size;
545 /* Pointer to a performance monitor object if the user requested it,
548 struct vc4_perfmon *perfmon;
551 /* Per-open file private data. Any driver-specific resource that has to be
552 * released when the DRM file is closed should be placed here.
561 static inline struct vc4_exec_info *
562 vc4_first_bin_job(struct vc4_dev *vc4)
564 return list_first_entry_or_null(&vc4->bin_job_list,
565 struct vc4_exec_info, head);
568 static inline struct vc4_exec_info *
569 vc4_first_render_job(struct vc4_dev *vc4)
571 return list_first_entry_or_null(&vc4->render_job_list,
572 struct vc4_exec_info, head);
575 static inline struct vc4_exec_info *
576 vc4_last_render_job(struct vc4_dev *vc4)
578 if (list_empty(&vc4->render_job_list))
580 return list_last_entry(&vc4->render_job_list,
581 struct vc4_exec_info, head);
585 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
588 * This will be used at draw time to relocate the reference to the texture
589 * contents in p0, and validate that the offset combined with
590 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
591 * Note that the hardware treats unprovided config parameters as 0, so not all
592 * of them need to be set up for every texure sample, and we'll store ~0 as
593 * the offset to mark the unused ones.
595 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
596 * Setup") for definitions of the texture parameters.
598 struct vc4_texture_sample_info {
600 uint32_t p_offset[4];
604 * struct vc4_validated_shader_info - information about validated shaders that
605 * needs to be used from command list validation.
607 * For a given shader, each time a shader state record references it, we need
608 * to verify that the shader doesn't read more uniforms than the shader state
609 * record's uniform BO pointer can provide, and we need to apply relocations
610 * and validate the shader state record's uniforms that define the texture
613 struct vc4_validated_shader_info {
614 uint32_t uniforms_size;
615 uint32_t uniforms_src_size;
616 uint32_t num_texture_samples;
617 struct vc4_texture_sample_info *texture_samples;
619 uint32_t num_uniform_addr_offsets;
620 uint32_t *uniform_addr_offsets;
626 * _wait_for - magic (register) wait macro
628 * Does the right thing for modeset paths when run under kdgb or similar atomic
629 * contexts. Note that it's important that we check the condition again after
630 * having timed out, since the timeout could be due to preemption or similar and
631 * we've never had a chance to check the condition before the timeout.
633 #define _wait_for(COND, MS, W) ({ \
634 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
637 if (time_after(jiffies, timeout__)) { \
639 ret__ = -ETIMEDOUT; \
642 if (W && drm_can_sleep()) { \
651 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
654 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
655 void vc4_free_object(struct drm_gem_object *gem_obj);
656 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
657 bool from_cache, enum vc4_kernel_bo_type type);
658 int vc4_dumb_create(struct drm_file *file_priv,
659 struct drm_device *dev,
660 struct drm_mode_create_dumb *args);
661 struct dma_buf *vc4_prime_export(struct drm_device *dev,
662 struct drm_gem_object *obj, int flags);
663 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
664 struct drm_file *file_priv);
665 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
666 struct drm_file *file_priv);
667 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
668 struct drm_file *file_priv);
669 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
670 struct drm_file *file_priv);
671 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
672 struct drm_file *file_priv);
673 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
674 struct drm_file *file_priv);
675 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
676 struct drm_file *file_priv);
677 int vc4_fault(struct vm_fault *vmf);
678 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
679 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
680 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
681 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
682 struct dma_buf_attachment *attach,
683 struct sg_table *sgt);
684 void *vc4_prime_vmap(struct drm_gem_object *obj);
685 int vc4_bo_cache_init(struct drm_device *dev);
686 void vc4_bo_cache_destroy(struct drm_device *dev);
687 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
688 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
689 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
690 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
691 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
694 extern struct platform_driver vc4_crtc_driver;
695 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
696 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
697 bool in_vblank_irq, int *vpos, int *hpos,
698 ktime_t *stime, ktime_t *etime,
699 const struct drm_display_mode *mode);
702 int vc4_debugfs_init(struct drm_minor *minor);
705 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
708 extern struct platform_driver vc4_dpi_driver;
709 int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
712 extern struct platform_driver vc4_dsi_driver;
713 int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
716 extern const struct dma_fence_ops vc4_fence_ops;
719 void vc4_gem_init(struct drm_device *dev);
720 void vc4_gem_destroy(struct drm_device *dev);
721 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
722 struct drm_file *file_priv);
723 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
724 struct drm_file *file_priv);
725 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
726 struct drm_file *file_priv);
727 void vc4_submit_next_bin_job(struct drm_device *dev);
728 void vc4_submit_next_render_job(struct drm_device *dev);
729 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
730 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
731 uint64_t timeout_ns, bool interruptible);
732 void vc4_job_handle_completed(struct vc4_dev *vc4);
733 int vc4_queue_seqno_cb(struct drm_device *dev,
734 struct vc4_seqno_cb *cb, uint64_t seqno,
735 void (*func)(struct vc4_seqno_cb *cb));
736 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
737 struct drm_file *file_priv);
740 extern struct platform_driver vc4_hdmi_driver;
741 int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
744 extern struct platform_driver vc4_vec_driver;
745 int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
748 irqreturn_t vc4_irq(int irq, void *arg);
749 void vc4_irq_preinstall(struct drm_device *dev);
750 int vc4_irq_postinstall(struct drm_device *dev);
751 void vc4_irq_uninstall(struct drm_device *dev);
752 void vc4_irq_reset(struct drm_device *dev);
755 extern struct platform_driver vc4_hvs_driver;
756 void vc4_hvs_dump_state(struct drm_device *dev);
757 int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
760 int vc4_kms_load(struct drm_device *dev);
763 struct drm_plane *vc4_plane_init(struct drm_device *dev,
764 enum drm_plane_type type);
765 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
766 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
767 void vc4_plane_async_set_fb(struct drm_plane *plane,
768 struct drm_framebuffer *fb);
771 extern struct platform_driver vc4_v3d_driver;
772 int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
773 int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
774 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
778 vc4_validate_bin_cl(struct drm_device *dev,
781 struct vc4_exec_info *exec);
784 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
786 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
789 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
791 bool vc4_check_tex_size(struct vc4_exec_info *exec,
792 struct drm_gem_cma_object *fbo,
793 uint32_t offset, uint8_t tiling_format,
794 uint32_t width, uint32_t height, uint8_t cpp);
796 /* vc4_validate_shader.c */
797 struct vc4_validated_shader_info *
798 vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
801 void vc4_perfmon_get(struct vc4_perfmon *perfmon);
802 void vc4_perfmon_put(struct vc4_perfmon *perfmon);
803 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
804 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
806 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
807 void vc4_perfmon_open_file(struct vc4_file *vc4file);
808 void vc4_perfmon_close_file(struct vc4_file *vc4file);
809 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
810 struct drm_file *file_priv);
811 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
812 struct drm_file *file_priv);
813 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
814 struct drm_file *file_priv);