]> Git Repo - linux.git/blob - drivers/gpu/drm/imagination/pvr_job.c
Merge tag 'ti-k3-dt-for-v6.11-part2' into ti-k3-dts-next
[linux.git] / drivers / gpu / drm / imagination / pvr_job.c
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #include "pvr_context.h"
5 #include "pvr_device.h"
6 #include "pvr_drv.h"
7 #include "pvr_gem.h"
8 #include "pvr_hwrt.h"
9 #include "pvr_job.h"
10 #include "pvr_mmu.h"
11 #include "pvr_power.h"
12 #include "pvr_rogue_fwif.h"
13 #include "pvr_rogue_fwif_client.h"
14 #include "pvr_stream.h"
15 #include "pvr_stream_defs.h"
16 #include "pvr_sync.h"
17
18 #include <drm/drm_exec.h>
19 #include <drm/drm_gem.h>
20 #include <linux/types.h>
21 #include <uapi/drm/pvr_drm.h>
22
23 static void pvr_job_release(struct kref *kref)
24 {
25         struct pvr_job *job = container_of(kref, struct pvr_job, ref_count);
26
27         xa_erase(&job->pvr_dev->job_ids, job->id);
28
29         pvr_hwrt_data_put(job->hwrt);
30         pvr_context_put(job->ctx);
31
32         WARN_ON(job->paired_job);
33
34         pvr_queue_job_cleanup(job);
35         pvr_job_release_pm_ref(job);
36
37         kfree(job->cmd);
38         kfree(job);
39 }
40
41 /**
42  * pvr_job_put() - Release reference on job
43  * @job: Target job.
44  */
45 void
46 pvr_job_put(struct pvr_job *job)
47 {
48         if (job)
49                 kref_put(&job->ref_count, pvr_job_release);
50 }
51
52 /**
53  * pvr_job_process_stream() - Build job FW structure from stream
54  * @pvr_dev: Device pointer.
55  * @cmd_defs: Stream definition.
56  * @stream: Pointer to command stream.
57  * @stream_size: Size of command stream, in bytes.
58  * @job: Pointer to job.
59  *
60  * Caller is responsible for freeing the output structure.
61  *
62  * Returns:
63  *  * 0 on success,
64  *  * -%ENOMEM on out of memory, or
65  *  * -%EINVAL on malformed stream.
66  */
67 static int
68 pvr_job_process_stream(struct pvr_device *pvr_dev, const struct pvr_stream_cmd_defs *cmd_defs,
69                        void *stream, u32 stream_size, struct pvr_job *job)
70 {
71         int err;
72
73         job->cmd = kzalloc(cmd_defs->dest_size, GFP_KERNEL);
74         if (!job->cmd)
75                 return -ENOMEM;
76
77         job->cmd_len = cmd_defs->dest_size;
78
79         err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, job->cmd);
80         if (err)
81                 kfree(job->cmd);
82
83         return err;
84 }
85
86 static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job,
87                            const struct pvr_stream_cmd_defs *stream_def,
88                            u64 stream_userptr, u32 stream_len)
89 {
90         void *stream;
91         int err;
92
93         stream = kzalloc(stream_len, GFP_KERNEL);
94         if (!stream)
95                 return -ENOMEM;
96
97         if (copy_from_user(stream, u64_to_user_ptr(stream_userptr), stream_len)) {
98                 err = -EFAULT;
99                 goto err_free_stream;
100         }
101
102         err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job);
103
104 err_free_stream:
105         kfree(stream);
106
107         return err;
108 }
109
110 static u32
111 convert_geom_flags(u32 in_flags)
112 {
113         u32 out_flags = 0;
114
115         if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST)
116                 out_flags |= ROGUE_GEOM_FLAGS_FIRSTKICK;
117         if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST)
118                 out_flags |= ROGUE_GEOM_FLAGS_LASTKICK;
119         if (in_flags & DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
120                 out_flags |= ROGUE_GEOM_FLAGS_SINGLE_CORE;
121
122         return out_flags;
123 }
124
125 static u32
126 convert_frag_flags(u32 in_flags)
127 {
128         u32 out_flags = 0;
129
130         if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE)
131                 out_flags |= ROGUE_FRAG_FLAGS_SINGLE_CORE;
132         if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER)
133                 out_flags |= ROGUE_FRAG_FLAGS_DEPTHBUFFER;
134         if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER)
135                 out_flags |= ROGUE_FRAG_FLAGS_STENCILBUFFER;
136         if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP)
137                 out_flags |= ROGUE_FRAG_FLAGS_PREVENT_CDM_OVERLAP;
138         if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER)
139                 out_flags |= ROGUE_FRAG_FLAGS_SCRATCHBUFFER;
140         if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS)
141                 out_flags |= ROGUE_FRAG_FLAGS_GET_VIS_RESULTS;
142         if (in_flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
143                 out_flags |= ROGUE_FRAG_FLAGS_DISABLE_PIXELMERGE;
144
145         return out_flags;
146 }
147
148 static int
149 pvr_geom_job_fw_cmd_init(struct pvr_job *job,
150                          struct drm_pvr_job *args)
151 {
152         struct rogue_fwif_cmd_geom *cmd;
153         int err;
154
155         if (args->flags & ~DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK)
156                 return -EINVAL;
157
158         if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER)
159                 return -EINVAL;
160
161         if (!job->hwrt)
162                 return -EINVAL;
163
164         job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_GEOM;
165         err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_geom_stream,
166                               args->cmd_stream, args->cmd_stream_len);
167         if (err)
168                 return err;
169
170         cmd = job->cmd;
171         cmd->cmd_shared.cmn.frame_num = 0;
172         cmd->flags = convert_geom_flags(args->flags);
173         pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
174         return 0;
175 }
176
177 static int
178 pvr_frag_job_fw_cmd_init(struct pvr_job *job,
179                          struct drm_pvr_job *args)
180 {
181         struct rogue_fwif_cmd_frag *cmd;
182         int err;
183
184         if (args->flags & ~DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK)
185                 return -EINVAL;
186
187         if (job->ctx->type != DRM_PVR_CTX_TYPE_RENDER)
188                 return -EINVAL;
189
190         if (!job->hwrt)
191                 return -EINVAL;
192
193         job->fw_ccb_cmd_type = (args->flags & DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER) ?
194                                ROGUE_FWIF_CCB_CMD_TYPE_FRAG_PR :
195                                ROGUE_FWIF_CCB_CMD_TYPE_FRAG;
196         err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_frag_stream,
197                               args->cmd_stream, args->cmd_stream_len);
198         if (err)
199                 return err;
200
201         cmd = job->cmd;
202         cmd->cmd_shared.cmn.frame_num = 0;
203         cmd->flags = convert_frag_flags(args->flags);
204         pvr_fw_object_get_fw_addr(job->hwrt->fw_obj, &cmd->cmd_shared.hwrt_data_fw_addr);
205         return 0;
206 }
207
208 static u32
209 convert_compute_flags(u32 in_flags)
210 {
211         u32 out_flags = 0;
212
213         if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP)
214                 out_flags |= ROGUE_COMPUTE_FLAG_PREVENT_ALL_OVERLAP;
215         if (in_flags & DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
216                 out_flags |= ROGUE_COMPUTE_FLAG_SINGLE_CORE;
217
218         return out_flags;
219 }
220
221 static int
222 pvr_compute_job_fw_cmd_init(struct pvr_job *job,
223                             struct drm_pvr_job *args)
224 {
225         struct rogue_fwif_cmd_compute *cmd;
226         int err;
227
228         if (args->flags & ~DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK)
229                 return -EINVAL;
230
231         if (job->ctx->type != DRM_PVR_CTX_TYPE_COMPUTE)
232                 return -EINVAL;
233
234         job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_CDM;
235         err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_compute_stream,
236                               args->cmd_stream, args->cmd_stream_len);
237         if (err)
238                 return err;
239
240         cmd = job->cmd;
241         cmd->common.frame_num = 0;
242         cmd->flags = convert_compute_flags(args->flags);
243         return 0;
244 }
245
246 static u32
247 convert_transfer_flags(u32 in_flags)
248 {
249         u32 out_flags = 0;
250
251         if (in_flags & DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE)
252                 out_flags |= ROGUE_TRANSFER_FLAGS_SINGLE_CORE;
253
254         return out_flags;
255 }
256
257 static int
258 pvr_transfer_job_fw_cmd_init(struct pvr_job *job,
259                              struct drm_pvr_job *args)
260 {
261         struct rogue_fwif_cmd_transfer *cmd;
262         int err;
263
264         if (args->flags & ~DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK)
265                 return -EINVAL;
266
267         if (job->ctx->type != DRM_PVR_CTX_TYPE_TRANSFER_FRAG)
268                 return -EINVAL;
269
270         job->fw_ccb_cmd_type = ROGUE_FWIF_CCB_CMD_TYPE_TQ_3D;
271         err = pvr_fw_cmd_init(job->pvr_dev, job, &pvr_cmd_transfer_stream,
272                               args->cmd_stream, args->cmd_stream_len);
273         if (err)
274                 return err;
275
276         cmd = job->cmd;
277         cmd->common.frame_num = 0;
278         cmd->flags = convert_transfer_flags(args->flags);
279         return 0;
280 }
281
282 static int
283 pvr_job_fw_cmd_init(struct pvr_job *job,
284                     struct drm_pvr_job *args)
285 {
286         switch (args->type) {
287         case DRM_PVR_JOB_TYPE_GEOMETRY:
288                 return pvr_geom_job_fw_cmd_init(job, args);
289
290         case DRM_PVR_JOB_TYPE_FRAGMENT:
291                 return pvr_frag_job_fw_cmd_init(job, args);
292
293         case DRM_PVR_JOB_TYPE_COMPUTE:
294                 return pvr_compute_job_fw_cmd_init(job, args);
295
296         case DRM_PVR_JOB_TYPE_TRANSFER_FRAG:
297                 return pvr_transfer_job_fw_cmd_init(job, args);
298
299         default:
300                 return -EINVAL;
301         }
302 }
303
304 /**
305  * struct pvr_job_data - Helper container for pairing jobs with the
306  * sync_ops supplied for them by the user.
307  */
308 struct pvr_job_data {
309         /** @job: Pointer to the job. */
310         struct pvr_job *job;
311
312         /** @sync_ops: Pointer to the sync_ops associated with @job. */
313         struct drm_pvr_sync_op *sync_ops;
314
315         /** @sync_op_count: Number of members of @sync_ops. */
316         u32 sync_op_count;
317 };
318
319 /**
320  * prepare_job_syncs() - Prepare all sync objects for a single job.
321  * @pvr_file: PowerVR file.
322  * @job_data: Precreated job and sync_ops array.
323  * @signal_array: xarray to receive signal sync objects.
324  *
325  * Returns:
326  *  * 0 on success, or
327  *  * Any error code returned by pvr_sync_signal_array_collect_ops(),
328  *    pvr_sync_add_deps_to_job(), drm_sched_job_add_resv_dependencies() or
329  *    pvr_sync_signal_array_update_fences().
330  */
331 static int
332 prepare_job_syncs(struct pvr_file *pvr_file,
333                   struct pvr_job_data *job_data,
334                   struct xarray *signal_array)
335 {
336         struct dma_fence *done_fence;
337         int err = pvr_sync_signal_array_collect_ops(signal_array,
338                                                     from_pvr_file(pvr_file),
339                                                     job_data->sync_op_count,
340                                                     job_data->sync_ops);
341
342         if (err)
343                 return err;
344
345         err = pvr_sync_add_deps_to_job(pvr_file, &job_data->job->base,
346                                        job_data->sync_op_count,
347                                        job_data->sync_ops, signal_array);
348         if (err)
349                 return err;
350
351         if (job_data->job->hwrt) {
352                 /* The geometry job writes the HWRT region headers, which are
353                  * then read by the fragment job.
354                  */
355                 struct drm_gem_object *obj =
356                         gem_from_pvr_gem(job_data->job->hwrt->fw_obj->gem);
357                 enum dma_resv_usage usage =
358                         dma_resv_usage_rw(job_data->job->type ==
359                                           DRM_PVR_JOB_TYPE_GEOMETRY);
360
361                 dma_resv_lock(obj->resv, NULL);
362                 err = drm_sched_job_add_resv_dependencies(&job_data->job->base,
363                                                           obj->resv, usage);
364                 dma_resv_unlock(obj->resv);
365                 if (err)
366                         return err;
367         }
368
369         /* We need to arm the job to get the job done fence. */
370         done_fence = pvr_queue_job_arm(job_data->job);
371
372         err = pvr_sync_signal_array_update_fences(signal_array,
373                                                   job_data->sync_op_count,
374                                                   job_data->sync_ops,
375                                                   done_fence);
376         return err;
377 }
378
379 /**
380  * prepare_job_syncs_for_each() - Prepare all sync objects for an array of jobs.
381  * @pvr_file: PowerVR file.
382  * @job_data: Array of precreated jobs and their sync_ops.
383  * @job_count: Number of jobs.
384  * @signal_array: xarray to receive signal sync objects.
385  *
386  * Returns:
387  *  * 0 on success, or
388  *  * Any error code returned by pvr_vm_bind_job_prepare_syncs().
389  */
390 static int
391 prepare_job_syncs_for_each(struct pvr_file *pvr_file,
392                            struct pvr_job_data *job_data,
393                            u32 *job_count,
394                            struct xarray *signal_array)
395 {
396         for (u32 i = 0; i < *job_count; i++) {
397                 int err = prepare_job_syncs(pvr_file, &job_data[i],
398                                             signal_array);
399
400                 if (err) {
401                         *job_count = i;
402                         return err;
403                 }
404         }
405
406         return 0;
407 }
408
409 static struct pvr_job *
410 create_job(struct pvr_device *pvr_dev,
411            struct pvr_file *pvr_file,
412            struct drm_pvr_job *args)
413 {
414         struct pvr_job *job = NULL;
415         int err;
416
417         if (!args->cmd_stream || !args->cmd_stream_len)
418                 return ERR_PTR(-EINVAL);
419
420         if (args->type != DRM_PVR_JOB_TYPE_GEOMETRY &&
421             args->type != DRM_PVR_JOB_TYPE_FRAGMENT &&
422             (args->hwrt.set_handle || args->hwrt.data_index))
423                 return ERR_PTR(-EINVAL);
424
425         job = kzalloc(sizeof(*job), GFP_KERNEL);
426         if (!job)
427                 return ERR_PTR(-ENOMEM);
428
429         kref_init(&job->ref_count);
430         job->type = args->type;
431         job->pvr_dev = pvr_dev;
432
433         err = xa_alloc(&pvr_dev->job_ids, &job->id, job, xa_limit_32b, GFP_KERNEL);
434         if (err)
435                 goto err_put_job;
436
437         job->ctx = pvr_context_lookup(pvr_file, args->context_handle);
438         if (!job->ctx) {
439                 err = -EINVAL;
440                 goto err_put_job;
441         }
442
443         if (args->hwrt.set_handle) {
444                 job->hwrt = pvr_hwrt_data_lookup(pvr_file, args->hwrt.set_handle,
445                                                  args->hwrt.data_index);
446                 if (!job->hwrt) {
447                         err = -EINVAL;
448                         goto err_put_job;
449                 }
450         }
451
452         err = pvr_job_fw_cmd_init(job, args);
453         if (err)
454                 goto err_put_job;
455
456         err = pvr_queue_job_init(job);
457         if (err)
458                 goto err_put_job;
459
460         return job;
461
462 err_put_job:
463         pvr_job_put(job);
464         return ERR_PTR(err);
465 }
466
467 /**
468  * pvr_job_data_fini() - Cleanup all allocs used to set up job submission.
469  * @job_data: Job data array.
470  * @job_count: Number of members of @job_data.
471  */
472 static void
473 pvr_job_data_fini(struct pvr_job_data *job_data, u32 job_count)
474 {
475         for (u32 i = 0; i < job_count; i++) {
476                 pvr_job_put(job_data[i].job);
477                 kvfree(job_data[i].sync_ops);
478         }
479 }
480
481 /**
482  * pvr_job_data_init() - Init an array of created jobs, associating them with
483  * the appropriate sync_ops args, which will be copied in.
484  * @pvr_dev: Target PowerVR device.
485  * @pvr_file: Pointer to PowerVR file structure.
486  * @job_args: Job args array copied from user.
487  * @job_count: Number of members of @job_args.
488  * @job_data_out: Job data array.
489  */
490 static int pvr_job_data_init(struct pvr_device *pvr_dev,
491                              struct pvr_file *pvr_file,
492                              struct drm_pvr_job *job_args,
493                              u32 *job_count,
494                              struct pvr_job_data *job_data_out)
495 {
496         int err = 0, i = 0;
497
498         for (; i < *job_count; i++) {
499                 job_data_out[i].job =
500                         create_job(pvr_dev, pvr_file, &job_args[i]);
501                 err = PTR_ERR_OR_ZERO(job_data_out[i].job);
502
503                 if (err) {
504                         *job_count = i;
505                         job_data_out[i].job = NULL;
506                         goto err_cleanup;
507                 }
508
509                 err = PVR_UOBJ_GET_ARRAY(job_data_out[i].sync_ops,
510                                          &job_args[i].sync_ops);
511                 if (err) {
512                         *job_count = i;
513
514                         /* Ensure the job created above is also cleaned up. */
515                         i++;
516                         goto err_cleanup;
517                 }
518
519                 job_data_out[i].sync_op_count = job_args[i].sync_ops.count;
520         }
521
522         return 0;
523
524 err_cleanup:
525         pvr_job_data_fini(job_data_out, i);
526
527         return err;
528 }
529
530 static void
531 push_jobs(struct pvr_job_data *job_data, u32 job_count)
532 {
533         for (u32 i = 0; i < job_count; i++)
534                 pvr_queue_job_push(job_data[i].job);
535 }
536
537 static int
538 prepare_fw_obj_resv(struct drm_exec *exec, struct pvr_fw_object *fw_obj)
539 {
540         return drm_exec_prepare_obj(exec, gem_from_pvr_gem(fw_obj->gem), 1);
541 }
542
543 static int
544 jobs_lock_all_objs(struct drm_exec *exec, struct pvr_job_data *job_data,
545                    u32 job_count)
546 {
547         for (u32 i = 0; i < job_count; i++) {
548                 struct pvr_job *job = job_data[i].job;
549
550                 /* Grab a lock on a the context, to guard against
551                  * concurrent submission to the same queue.
552                  */
553                 int err = drm_exec_lock_obj(exec,
554                                             gem_from_pvr_gem(job->ctx->fw_obj->gem));
555
556                 if (err)
557                         return err;
558
559                 if (job->hwrt) {
560                         err = prepare_fw_obj_resv(exec,
561                                                   job->hwrt->fw_obj);
562                         if (err)
563                                 return err;
564                 }
565         }
566
567         return 0;
568 }
569
570 static int
571 prepare_job_resvs_for_each(struct drm_exec *exec, struct pvr_job_data *job_data,
572                            u32 job_count)
573 {
574         drm_exec_until_all_locked(exec) {
575                 int err = jobs_lock_all_objs(exec, job_data, job_count);
576
577                 drm_exec_retry_on_contention(exec);
578                 if (err)
579                         return err;
580         }
581
582         return 0;
583 }
584
585 static void
586 update_job_resvs(struct pvr_job *job)
587 {
588         if (job->hwrt) {
589                 enum dma_resv_usage usage = job->type == DRM_PVR_JOB_TYPE_GEOMETRY ?
590                                             DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ;
591                 struct drm_gem_object *obj = gem_from_pvr_gem(job->hwrt->fw_obj->gem);
592
593                 dma_resv_add_fence(obj->resv, &job->base.s_fence->finished, usage);
594         }
595 }
596
597 static void
598 update_job_resvs_for_each(struct pvr_job_data *job_data, u32 job_count)
599 {
600         for (u32 i = 0; i < job_count; i++)
601                 update_job_resvs(job_data[i].job);
602 }
603
604 static bool can_combine_jobs(struct pvr_job *a, struct pvr_job *b)
605 {
606         struct pvr_job *geom_job = a, *frag_job = b;
607         struct dma_fence *fence;
608         unsigned long index;
609
610         /* Geometry and fragment jobs can be combined if they are queued to the
611          * same context and targeting the same HWRT.
612          */
613         if (a->type != DRM_PVR_JOB_TYPE_GEOMETRY ||
614             b->type != DRM_PVR_JOB_TYPE_FRAGMENT ||
615             a->ctx != b->ctx ||
616             a->hwrt != b->hwrt)
617                 return false;
618
619         xa_for_each(&frag_job->base.dependencies, index, fence) {
620                 /* We combine when we see an explicit geom -> frag dep. */
621                 if (&geom_job->base.s_fence->scheduled == fence)
622                         return true;
623         }
624
625         return false;
626 }
627
628 static struct dma_fence *
629 get_last_queued_job_scheduled_fence(struct pvr_queue *queue,
630                                     struct pvr_job_data *job_data,
631                                     u32 cur_job_pos)
632 {
633         /* We iterate over the current job array in reverse order to grab the
634          * last to-be-queued job targeting the same queue.
635          */
636         for (u32 i = cur_job_pos; i > 0; i--) {
637                 struct pvr_job *job = job_data[i - 1].job;
638
639                 if (job->ctx == queue->ctx && job->type == queue->type)
640                         return dma_fence_get(&job->base.s_fence->scheduled);
641         }
642
643         /* If we didn't find any, we just return the last queued job scheduled
644          * fence attached to the queue.
645          */
646         return dma_fence_get(queue->last_queued_job_scheduled_fence);
647 }
648
649 static int
650 pvr_jobs_link_geom_frag(struct pvr_job_data *job_data, u32 *job_count)
651 {
652         for (u32 i = 0; i < *job_count - 1; i++) {
653                 struct pvr_job *geom_job = job_data[i].job;
654                 struct pvr_job *frag_job = job_data[i + 1].job;
655                 struct pvr_queue *frag_queue;
656                 struct dma_fence *f;
657
658                 if (!can_combine_jobs(job_data[i].job, job_data[i + 1].job))
659                         continue;
660
661                 /* The fragment job will be submitted by the geometry queue. We
662                  * need to make sure it comes after all the other fragment jobs
663                  * queued before it.
664                  */
665                 frag_queue = pvr_context_get_queue_for_job(frag_job->ctx,
666                                                            frag_job->type);
667                 f = get_last_queued_job_scheduled_fence(frag_queue, job_data,
668                                                         i);
669                 if (f) {
670                         int err = drm_sched_job_add_dependency(&geom_job->base,
671                                                                f);
672                         if (err) {
673                                 *job_count = i;
674                                 return err;
675                         }
676                 }
677
678                 /* The KCCB slot will be reserved by the geometry job, so we can
679                  * drop the KCCB fence on the fragment job.
680                  */
681                 pvr_kccb_fence_put(frag_job->kccb_fence);
682                 frag_job->kccb_fence = NULL;
683
684                 geom_job->paired_job = frag_job;
685                 frag_job->paired_job = geom_job;
686
687                 /* Skip the fragment job we just paired to the geometry job. */
688                 i++;
689         }
690
691         return 0;
692 }
693
694 /**
695  * pvr_submit_jobs() - Submit jobs to the GPU
696  * @pvr_dev: Target PowerVR device.
697  * @pvr_file: Pointer to PowerVR file structure.
698  * @args: Ioctl args.
699  *
700  * This initial implementation is entirely synchronous; on return the GPU will
701  * be idle. This will not be the case for future implementations.
702  *
703  * Returns:
704  *  * 0 on success,
705  *  * -%EFAULT if arguments can not be copied from user space, or
706  *  * -%EINVAL on invalid arguments, or
707  *  * Any other error.
708  */
709 int
710 pvr_submit_jobs(struct pvr_device *pvr_dev, struct pvr_file *pvr_file,
711                 struct drm_pvr_ioctl_submit_jobs_args *args)
712 {
713         struct pvr_job_data *job_data = NULL;
714         struct drm_pvr_job *job_args;
715         struct xarray signal_array;
716         u32 jobs_alloced = 0;
717         struct drm_exec exec;
718         int err;
719
720         if (!args->jobs.count)
721                 return -EINVAL;
722
723         err = PVR_UOBJ_GET_ARRAY(job_args, &args->jobs);
724         if (err)
725                 return err;
726
727         job_data = kvmalloc_array(args->jobs.count, sizeof(*job_data),
728                                   GFP_KERNEL | __GFP_ZERO);
729         if (!job_data) {
730                 err = -ENOMEM;
731                 goto out_free;
732         }
733
734         err = pvr_job_data_init(pvr_dev, pvr_file, job_args, &args->jobs.count,
735                                 job_data);
736         if (err)
737                 goto out_free;
738
739         jobs_alloced = args->jobs.count;
740
741         /*
742          * Flush MMU if needed - this has been deferred until now to avoid
743          * overuse of this expensive operation.
744          */
745         err = pvr_mmu_flush_exec(pvr_dev, false);
746         if (err)
747                 goto out_job_data_cleanup;
748
749         drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES, 0);
750
751         xa_init_flags(&signal_array, XA_FLAGS_ALLOC);
752
753         err = prepare_job_syncs_for_each(pvr_file, job_data, &args->jobs.count,
754                                          &signal_array);
755         if (err)
756                 goto out_exec_fini;
757
758         err = prepare_job_resvs_for_each(&exec, job_data, args->jobs.count);
759         if (err)
760                 goto out_exec_fini;
761
762         err = pvr_jobs_link_geom_frag(job_data, &args->jobs.count);
763         if (err)
764                 goto out_exec_fini;
765
766         /* Anything after that point must succeed because we start exposing job
767          * finished fences to the outside world.
768          */
769         update_job_resvs_for_each(job_data, args->jobs.count);
770         push_jobs(job_data, args->jobs.count);
771         pvr_sync_signal_array_push_fences(&signal_array);
772         err = 0;
773
774 out_exec_fini:
775         drm_exec_fini(&exec);
776         pvr_sync_signal_array_cleanup(&signal_array);
777
778 out_job_data_cleanup:
779         pvr_job_data_fini(job_data, jobs_alloced);
780
781 out_free:
782         kvfree(job_data);
783         kvfree(job_args);
784
785         return err;
786 }
This page took 0.080556 seconds and 4 git commands to generate.