]> Git Repo - linux.git/blob - drivers/gpu/drm/imagination/pvr_cccb.c
Merge tag 'x86-mm-2025-01-31' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux.git] / drivers / gpu / drm / imagination / pvr_cccb.c
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #include "pvr_ccb.h"
5 #include "pvr_cccb.h"
6 #include "pvr_device.h"
7 #include "pvr_gem.h"
8 #include "pvr_hwrt.h"
9
10 #include <linux/compiler.h>
11 #include <linux/delay.h>
12 #include <linux/jiffies.h>
13 #include <linux/mutex.h>
14 #include <linux/types.h>
15
16 static __always_inline u32
17 get_ccb_space(u32 w_off, u32 r_off, u32 ccb_size)
18 {
19         return (((r_off) - (w_off)) + ((ccb_size) - 1)) & ((ccb_size) - 1);
20 }
21
22 static void
23 cccb_ctrl_init(void *cpu_ptr, void *priv)
24 {
25         struct rogue_fwif_cccb_ctl *ctrl = cpu_ptr;
26         struct pvr_cccb *pvr_cccb = priv;
27
28         WRITE_ONCE(ctrl->write_offset, 0);
29         WRITE_ONCE(ctrl->read_offset, 0);
30         WRITE_ONCE(ctrl->dep_offset, 0);
31         WRITE_ONCE(ctrl->wrap_mask, pvr_cccb->wrap_mask);
32 }
33
34 /**
35  * pvr_cccb_init() - Initialise a Client CCB
36  * @pvr_dev: Device pointer.
37  * @pvr_cccb: Pointer to Client CCB structure to initialise.
38  * @size_log2: Log2 size of Client CCB in bytes.
39  * @name: Name of owner of Client CCB. Used for fence context.
40  *
41  * Return:
42  *  * Zero on success, or
43  *  * Any error code returned by pvr_fw_object_create_and_map().
44  */
45 int
46 pvr_cccb_init(struct pvr_device *pvr_dev, struct pvr_cccb *pvr_cccb,
47               u32 size_log2, const char *name)
48 {
49         size_t size = 1 << size_log2;
50         int err;
51
52         pvr_cccb->size = size;
53         pvr_cccb->write_offset = 0;
54         pvr_cccb->wrap_mask = size - 1;
55
56         /*
57          * Map CCCB and control structure as uncached, so we don't have to flush
58          * CPU cache repeatedly when polling for space.
59          */
60         pvr_cccb->ctrl = pvr_fw_object_create_and_map(pvr_dev, sizeof(*pvr_cccb->ctrl),
61                                                       PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
62                                                       cccb_ctrl_init, pvr_cccb,
63                                                       &pvr_cccb->ctrl_obj);
64         if (IS_ERR(pvr_cccb->ctrl))
65                 return PTR_ERR(pvr_cccb->ctrl);
66
67         pvr_cccb->cccb = pvr_fw_object_create_and_map(pvr_dev, size,
68                                                       PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
69                                                       NULL, NULL, &pvr_cccb->cccb_obj);
70         if (IS_ERR(pvr_cccb->cccb)) {
71                 err = PTR_ERR(pvr_cccb->cccb);
72                 goto err_free_ctrl;
73         }
74
75         pvr_fw_object_get_fw_addr(pvr_cccb->ctrl_obj, &pvr_cccb->ctrl_fw_addr);
76         pvr_fw_object_get_fw_addr(pvr_cccb->cccb_obj, &pvr_cccb->cccb_fw_addr);
77
78         return 0;
79
80 err_free_ctrl:
81         pvr_fw_object_unmap_and_destroy(pvr_cccb->ctrl_obj);
82
83         return err;
84 }
85
86 /**
87  * pvr_cccb_fini() - Release Client CCB structure
88  * @pvr_cccb: Client CCB to release.
89  */
90 void
91 pvr_cccb_fini(struct pvr_cccb *pvr_cccb)
92 {
93         pvr_fw_object_unmap_and_destroy(pvr_cccb->cccb_obj);
94         pvr_fw_object_unmap_and_destroy(pvr_cccb->ctrl_obj);
95 }
96
97 /**
98  * pvr_cccb_cmdseq_fits() - Check if a command sequence fits in the CCCB
99  * @pvr_cccb: Target Client CCB.
100  * @size: Size of the command sequence.
101  *
102  * Check if a command sequence fits in the CCCB we have at hand.
103  *
104  * Return:
105  *  * true if the command sequence fits in the CCCB, or
106  *  * false otherwise.
107  */
108 bool pvr_cccb_cmdseq_fits(struct pvr_cccb *pvr_cccb, size_t size)
109 {
110         struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl;
111         u32 read_offset, remaining;
112         bool fits = false;
113
114         read_offset = READ_ONCE(ctrl->read_offset);
115         remaining = pvr_cccb->size - pvr_cccb->write_offset;
116
117         /* Always ensure we have enough room for a padding command at the end of the CCCB.
118          * If our command sequence does not fit, reserve the remaining space for a padding
119          * command.
120          */
121         if (size + PADDING_COMMAND_SIZE > remaining)
122                 size += remaining;
123
124         if (get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size) >= size)
125                 fits = true;
126
127         return fits;
128 }
129
130 /**
131  * pvr_cccb_write_command_with_header() - Write a command + command header to a
132  *                                        Client CCB
133  * @pvr_cccb: Target Client CCB.
134  * @cmd_type: Client CCB command type. Must be one of %ROGUE_FWIF_CCB_CMD_TYPE_*.
135  * @cmd_size: Size of command in bytes.
136  * @cmd_data: Pointer to command to write.
137  * @ext_job_ref: External job reference.
138  * @int_job_ref: Internal job reference.
139  *
140  * Caller must make sure there's enough space in CCCB to queue this command. This
141  * can be done by calling pvr_cccb_cmdseq_fits().
142  *
143  * This function is not protected by any lock. The caller must ensure there's
144  * no concurrent caller, which should be guaranteed by the drm_sched model (job
145  * submission is serialized in drm_sched_main()).
146  */
147 void
148 pvr_cccb_write_command_with_header(struct pvr_cccb *pvr_cccb, u32 cmd_type, u32 cmd_size,
149                                    void *cmd_data, u32 ext_job_ref, u32 int_job_ref)
150 {
151         u32 sz_with_hdr = pvr_cccb_get_size_of_cmd_with_hdr(cmd_size);
152         struct rogue_fwif_ccb_cmd_header cmd_header = {
153                 .cmd_type = cmd_type,
154                 .cmd_size = ALIGN(cmd_size, 8),
155                 .ext_job_ref = ext_job_ref,
156                 .int_job_ref = int_job_ref,
157         };
158         struct rogue_fwif_cccb_ctl *ctrl = pvr_cccb->ctrl;
159         u32 remaining = pvr_cccb->size - pvr_cccb->write_offset;
160         u32 required_size, cccb_space, read_offset;
161
162         /*
163          * Always ensure we have enough room for a padding command at the end of
164          * the CCCB.
165          */
166         if (remaining < sz_with_hdr + PADDING_COMMAND_SIZE) {
167                 /*
168                  * Command would need to wrap, so we need to pad the remainder
169                  * of the CCCB.
170                  */
171                 required_size = sz_with_hdr + remaining;
172         } else {
173                 required_size = sz_with_hdr;
174         }
175
176         read_offset = READ_ONCE(ctrl->read_offset);
177         cccb_space = get_ccb_space(pvr_cccb->write_offset, read_offset, pvr_cccb->size);
178         if (WARN_ON(cccb_space < required_size))
179                 return;
180
181         if (required_size != sz_with_hdr) {
182                 /* Add padding command */
183                 struct rogue_fwif_ccb_cmd_header pad_cmd = {
184                         .cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_PADDING,
185                         .cmd_size = remaining - sizeof(pad_cmd),
186                 };
187
188                 memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset], &pad_cmd, sizeof(pad_cmd));
189                 pvr_cccb->write_offset = 0;
190         }
191
192         memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset], &cmd_header, sizeof(cmd_header));
193         memcpy(&pvr_cccb->cccb[pvr_cccb->write_offset + sizeof(cmd_header)], cmd_data, cmd_size);
194         pvr_cccb->write_offset += sz_with_hdr;
195 }
196
197 static void fill_cmd_kick_data(struct pvr_cccb *cccb, u32 ctx_fw_addr,
198                                struct pvr_hwrt_data *hwrt,
199                                struct rogue_fwif_kccb_cmd_kick_data *k)
200 {
201         k->context_fw_addr = ctx_fw_addr;
202         k->client_woff_update = cccb->write_offset;
203         k->client_wrap_mask_update = cccb->wrap_mask;
204
205         if (hwrt) {
206                 u32 cleanup_state_offset = offsetof(struct rogue_fwif_hwrtdata, cleanup_state);
207
208                 pvr_fw_object_get_fw_addr_offset(hwrt->fw_obj, cleanup_state_offset,
209                                                  &k->cleanup_ctl_fw_addr[k->num_cleanup_ctl++]);
210         }
211 }
212
213 /**
214  * pvr_cccb_send_kccb_kick: Send KCCB kick to trigger command processing
215  * @pvr_dev: Device pointer.
216  * @pvr_cccb: Pointer to CCCB to process.
217  * @cctx_fw_addr: FW virtual address for context owning this Client CCB.
218  * @hwrt: HWRT data set associated with this kick. May be %NULL.
219  *
220  * You must call pvr_kccb_reserve_slot() and wait for the returned fence to
221  * signal (if this function didn't return NULL) before calling
222  * pvr_cccb_send_kccb_kick().
223  */
224 void
225 pvr_cccb_send_kccb_kick(struct pvr_device *pvr_dev,
226                         struct pvr_cccb *pvr_cccb, u32 cctx_fw_addr,
227                         struct pvr_hwrt_data *hwrt)
228 {
229         struct rogue_fwif_kccb_cmd cmd_kick = {
230                 .cmd_type = ROGUE_FWIF_KCCB_CMD_KICK,
231         };
232
233         fill_cmd_kick_data(pvr_cccb, cctx_fw_addr, hwrt, &cmd_kick.cmd_data.cmd_kick_data);
234
235         /* Make sure the writes to the CCCB are flushed before sending the KICK. */
236         wmb();
237
238         pvr_kccb_send_cmd_reserved_powered(pvr_dev, &cmd_kick, NULL);
239 }
240
241 void
242 pvr_cccb_send_kccb_combined_kick(struct pvr_device *pvr_dev,
243                                  struct pvr_cccb *geom_cccb,
244                                  struct pvr_cccb *frag_cccb,
245                                  u32 geom_ctx_fw_addr,
246                                  u32 frag_ctx_fw_addr,
247                                  struct pvr_hwrt_data *hwrt,
248                                  bool frag_is_pr)
249 {
250         struct rogue_fwif_kccb_cmd cmd_kick = {
251                 .cmd_type = ROGUE_FWIF_KCCB_CMD_COMBINED_GEOM_FRAG_KICK,
252         };
253
254         fill_cmd_kick_data(geom_cccb, geom_ctx_fw_addr, hwrt,
255                            &cmd_kick.cmd_data.combined_geom_frag_cmd_kick_data.geom_cmd_kick_data);
256
257         /* If this is a partial-render job, we don't attach resources to cleanup-ctl array,
258          * because the resources are already retained by the geometry job.
259          */
260         fill_cmd_kick_data(frag_cccb, frag_ctx_fw_addr, frag_is_pr ? NULL : hwrt,
261                            &cmd_kick.cmd_data.combined_geom_frag_cmd_kick_data.frag_cmd_kick_data);
262
263         /* Make sure the writes to the CCCB are flushed before sending the KICK. */
264         wmb();
265
266         pvr_kccb_send_cmd_reserved_powered(pvr_dev, &cmd_kick, NULL);
267 }
This page took 0.135309 seconds and 4 git commands to generate.