]> Git Repo - J-linux.git/blob - drivers/gpu/drm/xe/xe_gpu_scheduler.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / gpu / drm / xe / xe_gpu_scheduler.c
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5
6 #include "xe_gpu_scheduler.h"
7
8 static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
9 {
10         if (!READ_ONCE(sched->base.pause_submit))
11                 queue_work(sched->base.submit_wq, &sched->work_process_msg);
12 }
13
14 static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
15 {
16         struct xe_sched_msg *msg;
17
18         xe_sched_msg_lock(sched);
19         msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
20         if (msg)
21                 xe_sched_process_msg_queue(sched);
22         xe_sched_msg_unlock(sched);
23 }
24
25 static struct xe_sched_msg *
26 xe_sched_get_msg(struct xe_gpu_scheduler *sched)
27 {
28         struct xe_sched_msg *msg;
29
30         xe_sched_msg_lock(sched);
31         msg = list_first_entry_or_null(&sched->msgs,
32                                        struct xe_sched_msg, link);
33         if (msg)
34                 list_del_init(&msg->link);
35         xe_sched_msg_unlock(sched);
36
37         return msg;
38 }
39
40 static void xe_sched_process_msg_work(struct work_struct *w)
41 {
42         struct xe_gpu_scheduler *sched =
43                 container_of(w, struct xe_gpu_scheduler, work_process_msg);
44         struct xe_sched_msg *msg;
45
46         if (READ_ONCE(sched->base.pause_submit))
47                 return;
48
49         msg = xe_sched_get_msg(sched);
50         if (msg) {
51                 sched->ops->process_msg(msg);
52
53                 xe_sched_process_msg_queue_if_ready(sched);
54         }
55 }
56
57 int xe_sched_init(struct xe_gpu_scheduler *sched,
58                   const struct drm_sched_backend_ops *ops,
59                   const struct xe_sched_backend_ops *xe_ops,
60                   struct workqueue_struct *submit_wq,
61                   uint32_t hw_submission, unsigned hang_limit,
62                   long timeout, struct workqueue_struct *timeout_wq,
63                   atomic_t *score, const char *name,
64                   struct device *dev)
65 {
66         sched->ops = xe_ops;
67         INIT_LIST_HEAD(&sched->msgs);
68         INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
69
70         return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
71                               hang_limit, timeout, timeout_wq, score, name,
72                               dev);
73 }
74
75 void xe_sched_fini(struct xe_gpu_scheduler *sched)
76 {
77         xe_sched_submission_stop(sched);
78         drm_sched_fini(&sched->base);
79 }
80
81 void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
82 {
83         drm_sched_wqueue_start(&sched->base);
84         queue_work(sched->base.submit_wq, &sched->work_process_msg);
85 }
86
87 void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
88 {
89         drm_sched_wqueue_stop(&sched->base);
90         cancel_work_sync(&sched->work_process_msg);
91 }
92
93 void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
94 {
95         drm_sched_resume_timeout(&sched->base, sched->base.timeout);
96 }
97
98 void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
99                       struct xe_sched_msg *msg)
100 {
101         xe_sched_msg_lock(sched);
102         xe_sched_add_msg_locked(sched, msg);
103         xe_sched_msg_unlock(sched);
104 }
105
106 void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
107                              struct xe_sched_msg *msg)
108 {
109         lockdep_assert_held(&sched->base.job_list_lock);
110
111         list_add_tail(&msg->link, &sched->msgs);
112         xe_sched_process_msg_queue(sched);
113 }
This page took 0.033465 seconds and 4 git commands to generate.