2 * Copyright (C) 2015 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/mm_types.h>
10 #include <linux/reservation.h>
12 #include <drm/drm_encoder.h>
13 #include <drm/drm_gem_cma_helper.h>
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_syncobj.h>
17 #include "uapi/drm/vc4_drm.h"
19 /* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
22 enum vc4_kernel_bo_type {
23 /* Any kernel allocation (gem_create_object hook) before it
24 * gets another type set.
28 VC4_BO_TYPE_V3D_SHADER,
33 VC4_BO_TYPE_KERNEL_CACHE,
37 /* Performance monitor object. The perform lifetime is controlled by userspace
38 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
39 * request, and when this is the case, HW perf counters will be activated just
40 * before the submit_cl is submitted to the GPU and disabled when the job is
41 * done. This way, only events related to a specific job will be counted.
44 /* Tracks the number of users of the perfmon, when this counter reaches
45 * zero the perfmon is destroyed.
49 /* Number of counters activated in this perfmon instance
50 * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
54 /* Events counted by the HW perf counters. */
55 u8 events[DRM_VC4_MAX_PERF_COUNTERS];
57 /* Storage for counter values. Counters are incremented by the HW
58 * perf counter values every time the perfmon is attached to a GPU job.
59 * This way, perfmon users don't have to retrieve the results after
60 * each job if they want to track events covering several submissions.
61 * Note that counter values can't be reset, but you can fake a reset by
62 * destroying the perfmon and creating a new one.
68 struct drm_device *dev;
70 struct vc4_hdmi *hdmi;
77 struct vc4_hang_state *hang_state;
79 /* The kernel-space BO cache. Tracks buffers that have been
80 * unreferenced by all other users (refcounts of 0!) but not
81 * yet freed, so we can do cheap allocations.
84 /* Array of list heads for entries in the BO cache,
85 * based on number of pages, so we can do O(1) lookups
86 * in the cache when allocating.
88 struct list_head *size_list;
89 uint32_t size_list_size;
91 /* List of all BOs in the cache, ordered by age, so we
92 * can do O(1) lookups when trying to free old
95 struct list_head time_list;
96 struct work_struct time_work;
97 struct timer_list time_timer;
107 /* Protects bo_cache and bo_labels. */
108 struct mutex bo_lock;
110 /* Purgeable BO pool. All BOs in this pool can have their memory
111 * reclaimed if the driver is unable to allocate new BOs. We also
112 * keep stats related to the purge mechanism here.
115 struct list_head list;
118 unsigned int purged_num;
123 uint64_t dma_fence_context;
125 /* Sequence number for the last job queued in bin_job_list.
126 * Starts at 0 (no jobs emitted).
130 /* Sequence number for the last completed job on the GPU.
131 * Starts at 0 (no jobs completed).
133 uint64_t finished_seqno;
135 /* List of all struct vc4_exec_info for jobs to be executed in
136 * the binner. The first job in the list is the one currently
137 * programmed into ct0ca for execution.
139 struct list_head bin_job_list;
141 /* List of all struct vc4_exec_info for jobs that have
142 * completed binning and are ready for rendering. The first
143 * job in the list is the one currently programmed into ct1ca
146 struct list_head render_job_list;
148 /* List of the finished vc4_exec_infos waiting to be freed by
151 struct list_head job_done_list;
152 /* Spinlock used to synchronize the job_list and seqno
153 * accesses between the IRQ handler and GEM ioctls.
156 wait_queue_head_t job_wait_queue;
157 struct work_struct job_done_work;
159 /* Used to track the active perfmon if any. Access to this field is
160 * protected by job_lock.
162 struct vc4_perfmon *active_perfmon;
164 /* List of struct vc4_seqno_cb for callbacks to be made from a
165 * workqueue when the given seqno is passed.
167 struct list_head seqno_cb_list;
169 /* The memory used for storing binner tile alloc, tile state,
170 * and overflow memory allocations. This is freed when V3D
173 struct vc4_bo *bin_bo;
175 /* Size of blocks allocated within bin_bo. */
176 uint32_t bin_alloc_size;
178 /* Bitmask of the bin_alloc_size chunks in bin_bo that are
181 uint32_t bin_alloc_used;
183 /* Bitmask of the current bin_alloc used for overflow memory. */
184 uint32_t bin_alloc_overflow;
186 struct work_struct overflow_mem_work;
190 /* Mutex controlling the power refcount. */
191 struct mutex power_lock;
194 struct timer_list timer;
195 struct work_struct reset_work;
198 struct semaphore async_modeset;
200 struct drm_modeset_lock ctm_state_lock;
201 struct drm_private_obj ctm_manager;
204 static inline struct vc4_dev *
205 to_vc4_dev(struct drm_device *dev)
207 return (struct vc4_dev *)dev->dev_private;
211 struct drm_gem_cma_object base;
213 /* seqno of the last job to render using this BO. */
216 /* seqno of the last job to use the RCL to write to this BO.
218 * Note that this doesn't include binner overflow memory
221 uint64_t write_seqno;
225 /* List entry for the BO's position in either
226 * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
228 struct list_head unref_head;
230 /* Time in jiffies when the BO was put in vc4->bo_cache. */
231 unsigned long free_time;
233 /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
234 struct list_head size_head;
236 /* Struct for shader validation state, if created by
237 * DRM_IOCTL_VC4_CREATE_SHADER_BO.
239 struct vc4_validated_shader_info *validated_shader;
241 /* normally (resv == &_resv) except for imported bo's */
242 struct reservation_object *resv;
243 struct reservation_object _resv;
245 /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
246 * for user-allocated labels.
250 /* Count the number of active users. This is needed to determine
251 * whether we can move the BO to the purgeable list or not (when the BO
252 * is used by the GPU or the display engine we can't purge it).
256 /* Store purgeable/purged state here */
258 struct mutex madv_lock;
261 static inline struct vc4_bo *
262 to_vc4_bo(struct drm_gem_object *bo)
264 return (struct vc4_bo *)bo;
268 struct dma_fence base;
269 struct drm_device *dev;
270 /* vc4 seqno for signaled() test */
274 static inline struct vc4_fence *
275 to_vc4_fence(struct dma_fence *fence)
277 return (struct vc4_fence *)fence;
280 struct vc4_seqno_cb {
281 struct work_struct work;
283 void (*func)(struct vc4_seqno_cb *cb);
288 struct platform_device *pdev;
294 struct platform_device *pdev;
298 /* Memory manager for CRTCs to allocate space in the display
299 * list. Units are dwords.
301 struct drm_mm dlist_mm;
302 /* Memory manager for the LBM memory used by HVS scaling. */
303 struct drm_mm lbm_mm;
306 struct drm_mm_node mitchell_netravali_filter;
310 struct drm_plane base;
313 static inline struct vc4_plane *
314 to_vc4_plane(struct drm_plane *plane)
316 return (struct vc4_plane *)plane;
319 enum vc4_scaling_mode {
325 struct vc4_plane_state {
326 struct drm_plane_state base;
327 /* System memory copy of the display list for this element, computed
328 * at atomic_check time.
331 u32 dlist_size; /* Number of dwords allocated for the display list */
332 u32 dlist_count; /* Number of used dwords in the display list. */
334 /* Offset in the dlist to various words, for pageflip or
341 /* Offset where the plane's dlist was last stored in the
342 * hardware at vc4_crtc_atomic_flush() time.
344 u32 __iomem *hw_dlist;
346 /* Clipped coordinates of the plane on the display. */
347 int crtc_x, crtc_y, crtc_w, crtc_h;
348 /* Clipped area being scanned from in the FB. */
351 u32 src_w[2], src_h[2];
353 /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
354 enum vc4_scaling_mode x_scaling[2], y_scaling[2];
358 /* Offset to start scanning out from the start of the plane's
363 /* Our allocation in LBM for temporary storage during scaling. */
364 struct drm_mm_node lbm;
366 /* Set when the plane has per-pixel alpha content or does not cover
367 * the entire screen. This is a hint to the CRTC that it might need
368 * to enable background color fill.
373 static inline struct vc4_plane_state *
374 to_vc4_plane_state(struct drm_plane_state *state)
376 return (struct vc4_plane_state *)state;
379 enum vc4_encoder_type {
380 VC4_ENCODER_TYPE_NONE,
381 VC4_ENCODER_TYPE_HDMI,
382 VC4_ENCODER_TYPE_VEC,
383 VC4_ENCODER_TYPE_DSI0,
384 VC4_ENCODER_TYPE_DSI1,
385 VC4_ENCODER_TYPE_SMI,
386 VC4_ENCODER_TYPE_DPI,
390 struct drm_encoder base;
391 enum vc4_encoder_type type;
395 static inline struct vc4_encoder *
396 to_vc4_encoder(struct drm_encoder *encoder)
398 return container_of(encoder, struct vc4_encoder, base);
401 struct vc4_crtc_data {
402 /* Which channel of the HVS this pixelvalve sources from. */
405 enum vc4_encoder_type encoder_types[4];
409 struct drm_crtc base;
410 const struct vc4_crtc_data *data;
413 /* Timestamp at start of vblank irq - unaffected by lock delays. */
416 /* Which HVS channel we're using for our CRTC. */
422 /* Size in pixels of the COB memory allocated to this CRTC. */
425 struct drm_pending_vblank_event *event;
428 static inline struct vc4_crtc *
429 to_vc4_crtc(struct drm_crtc *crtc)
431 return (struct vc4_crtc *)crtc;
434 #define V3D_READ(offset) readl(vc4->v3d->regs + offset)
435 #define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
436 #define HVS_READ(offset) readl(vc4->hvs->regs + offset)
437 #define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
439 struct vc4_exec_info {
440 /* Sequence number for this bin/render job. */
443 /* Latest write_seqno of any BO that binning depends on. */
444 uint64_t bin_dep_seqno;
446 struct dma_fence *fence;
448 /* Last current addresses the hardware was processing when the
449 * hangcheck timer checked on us.
451 uint32_t last_ct0ca, last_ct1ca;
453 /* Kernel-space copy of the ioctl arguments */
454 struct drm_vc4_submit_cl *args;
456 /* This is the array of BOs that were looked up at the start of exec.
457 * Command validation will use indices into this array.
459 struct drm_gem_cma_object **bo;
462 /* List of BOs that are being written by the RCL. Other than
463 * the binner temporary storage, this is all the BOs written
466 struct drm_gem_cma_object *rcl_write_bo[4];
467 uint32_t rcl_write_bo_count;
469 /* Pointers for our position in vc4->job_list */
470 struct list_head head;
472 /* List of other BOs used in the job that need to be released
473 * once the job is complete.
475 struct list_head unref_list;
477 /* Current unvalidated indices into @bo loaded by the non-hardware
478 * VC4_PACKET_GEM_HANDLES.
480 uint32_t bo_index[2];
482 /* This is the BO where we store the validated command lists, shader
483 * records, and uniforms.
485 struct drm_gem_cma_object *exec_bo;
488 * This tracks the per-shader-record state (packet 64) that
489 * determines the length of the shader record and the offset
490 * it's expected to be found at. It gets read in from the
493 struct vc4_shader_state {
495 /* Maximum vertex index referenced by any primitive using this
501 /** How many shader states the user declared they were using. */
502 uint32_t shader_state_size;
503 /** How many shader state records the validator has seen. */
504 uint32_t shader_state_count;
506 bool found_tile_binning_mode_config_packet;
507 bool found_start_tile_binning_packet;
508 bool found_increment_semaphore_packet;
510 uint8_t bin_tiles_x, bin_tiles_y;
511 /* Physical address of the start of the tile alloc array
512 * (where each tile's binned CL will start)
514 uint32_t tile_alloc_offset;
515 /* Bitmask of which binner slots are freed when this job completes. */
519 * Computed addresses pointing into exec_bo where we start the
520 * bin thread (ct0) and render thread (ct1).
522 uint32_t ct0ca, ct0ea;
523 uint32_t ct1ca, ct1ea;
525 /* Pointer to the unvalidated bin CL (if present). */
528 /* Pointers to the shader recs. These paddr gets incremented as CL
529 * packets are relocated in validate_gl_shader_state, and the vaddrs
530 * (u and v) get incremented and size decremented as the shader recs
531 * themselves are validated.
535 uint32_t shader_rec_p;
536 uint32_t shader_rec_size;
538 /* Pointers to the uniform data. These pointers are incremented, and
539 * size decremented, as each batch of uniforms is uploaded.
544 uint32_t uniforms_size;
546 /* Pointer to a performance monitor object if the user requested it,
549 struct vc4_perfmon *perfmon;
552 /* Per-open file private data. Any driver-specific resource that has to be
553 * released when the DRM file is closed should be placed here.
562 static inline struct vc4_exec_info *
563 vc4_first_bin_job(struct vc4_dev *vc4)
565 return list_first_entry_or_null(&vc4->bin_job_list,
566 struct vc4_exec_info, head);
569 static inline struct vc4_exec_info *
570 vc4_first_render_job(struct vc4_dev *vc4)
572 return list_first_entry_or_null(&vc4->render_job_list,
573 struct vc4_exec_info, head);
576 static inline struct vc4_exec_info *
577 vc4_last_render_job(struct vc4_dev *vc4)
579 if (list_empty(&vc4->render_job_list))
581 return list_last_entry(&vc4->render_job_list,
582 struct vc4_exec_info, head);
586 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
589 * This will be used at draw time to relocate the reference to the texture
590 * contents in p0, and validate that the offset combined with
591 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
592 * Note that the hardware treats unprovided config parameters as 0, so not all
593 * of them need to be set up for every texure sample, and we'll store ~0 as
594 * the offset to mark the unused ones.
596 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
597 * Setup") for definitions of the texture parameters.
599 struct vc4_texture_sample_info {
601 uint32_t p_offset[4];
605 * struct vc4_validated_shader_info - information about validated shaders that
606 * needs to be used from command list validation.
608 * For a given shader, each time a shader state record references it, we need
609 * to verify that the shader doesn't read more uniforms than the shader state
610 * record's uniform BO pointer can provide, and we need to apply relocations
611 * and validate the shader state record's uniforms that define the texture
614 struct vc4_validated_shader_info {
615 uint32_t uniforms_size;
616 uint32_t uniforms_src_size;
617 uint32_t num_texture_samples;
618 struct vc4_texture_sample_info *texture_samples;
620 uint32_t num_uniform_addr_offsets;
621 uint32_t *uniform_addr_offsets;
627 * _wait_for - magic (register) wait macro
629 * Does the right thing for modeset paths when run under kdgb or similar atomic
630 * contexts. Note that it's important that we check the condition again after
631 * having timed out, since the timeout could be due to preemption or similar and
632 * we've never had a chance to check the condition before the timeout.
634 #define _wait_for(COND, MS, W) ({ \
635 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
638 if (time_after(jiffies, timeout__)) { \
640 ret__ = -ETIMEDOUT; \
643 if (W && drm_can_sleep()) { \
652 #define wait_for(COND, MS) _wait_for(COND, MS, 1)
655 struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
656 void vc4_free_object(struct drm_gem_object *gem_obj);
657 struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
658 bool from_cache, enum vc4_kernel_bo_type type);
659 int vc4_dumb_create(struct drm_file *file_priv,
660 struct drm_device *dev,
661 struct drm_mode_create_dumb *args);
662 struct dma_buf *vc4_prime_export(struct drm_device *dev,
663 struct drm_gem_object *obj, int flags);
664 int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
665 struct drm_file *file_priv);
666 int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
667 struct drm_file *file_priv);
668 int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
669 struct drm_file *file_priv);
670 int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
671 struct drm_file *file_priv);
672 int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
673 struct drm_file *file_priv);
674 int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
675 struct drm_file *file_priv);
676 int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
677 struct drm_file *file_priv);
678 vm_fault_t vc4_fault(struct vm_fault *vmf);
679 int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
680 struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
681 int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
682 struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
683 struct dma_buf_attachment *attach,
684 struct sg_table *sgt);
685 void *vc4_prime_vmap(struct drm_gem_object *obj);
686 int vc4_bo_cache_init(struct drm_device *dev);
687 void vc4_bo_cache_destroy(struct drm_device *dev);
688 int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
689 int vc4_bo_inc_usecnt(struct vc4_bo *bo);
690 void vc4_bo_dec_usecnt(struct vc4_bo *bo);
691 void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
692 void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
695 extern struct platform_driver vc4_crtc_driver;
696 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
697 bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
698 bool in_vblank_irq, int *vpos, int *hpos,
699 ktime_t *stime, ktime_t *etime,
700 const struct drm_display_mode *mode);
703 int vc4_debugfs_init(struct drm_minor *minor);
706 void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
709 extern struct platform_driver vc4_dpi_driver;
710 int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
713 extern struct platform_driver vc4_dsi_driver;
714 int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
717 extern const struct dma_fence_ops vc4_fence_ops;
720 void vc4_gem_init(struct drm_device *dev);
721 void vc4_gem_destroy(struct drm_device *dev);
722 int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
723 struct drm_file *file_priv);
724 int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
725 struct drm_file *file_priv);
726 int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
727 struct drm_file *file_priv);
728 void vc4_submit_next_bin_job(struct drm_device *dev);
729 void vc4_submit_next_render_job(struct drm_device *dev);
730 void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
731 int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
732 uint64_t timeout_ns, bool interruptible);
733 void vc4_job_handle_completed(struct vc4_dev *vc4);
734 int vc4_queue_seqno_cb(struct drm_device *dev,
735 struct vc4_seqno_cb *cb, uint64_t seqno,
736 void (*func)(struct vc4_seqno_cb *cb));
737 int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
738 struct drm_file *file_priv);
741 extern struct platform_driver vc4_hdmi_driver;
742 int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
745 extern struct platform_driver vc4_vec_driver;
746 int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
749 irqreturn_t vc4_irq(int irq, void *arg);
750 void vc4_irq_preinstall(struct drm_device *dev);
751 int vc4_irq_postinstall(struct drm_device *dev);
752 void vc4_irq_uninstall(struct drm_device *dev);
753 void vc4_irq_reset(struct drm_device *dev);
756 extern struct platform_driver vc4_hvs_driver;
757 void vc4_hvs_dump_state(struct drm_device *dev);
758 int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
761 int vc4_kms_load(struct drm_device *dev);
764 struct drm_plane *vc4_plane_init(struct drm_device *dev,
765 enum drm_plane_type type);
766 u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
767 u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
768 void vc4_plane_async_set_fb(struct drm_plane *plane,
769 struct drm_framebuffer *fb);
772 extern struct platform_driver vc4_v3d_driver;
773 int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
774 int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
775 int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
779 vc4_validate_bin_cl(struct drm_device *dev,
782 struct vc4_exec_info *exec);
785 vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
787 struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
790 int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
792 bool vc4_check_tex_size(struct vc4_exec_info *exec,
793 struct drm_gem_cma_object *fbo,
794 uint32_t offset, uint8_t tiling_format,
795 uint32_t width, uint32_t height, uint8_t cpp);
797 /* vc4_validate_shader.c */
798 struct vc4_validated_shader_info *
799 vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
802 void vc4_perfmon_get(struct vc4_perfmon *perfmon);
803 void vc4_perfmon_put(struct vc4_perfmon *perfmon);
804 void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
805 void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
807 struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
808 void vc4_perfmon_open_file(struct vc4_file *vc4file);
809 void vc4_perfmon_close_file(struct vc4_file *vc4file);
810 int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
811 struct drm_file *file_priv);
812 int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
813 struct drm_file *file_priv);
814 int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
815 struct drm_file *file_priv);