1 // SPDX-License-Identifier: MIT
3 * Copyright © 2023 Intel Corporation
6 #include "xe_gpu_scheduler.h"
8 static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
10 if (!READ_ONCE(sched->base.pause_submit))
11 queue_work(sched->base.submit_wq, &sched->work_process_msg);
14 static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
16 struct xe_sched_msg *msg;
18 xe_sched_msg_lock(sched);
19 msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
21 xe_sched_process_msg_queue(sched);
22 xe_sched_msg_unlock(sched);
25 static struct xe_sched_msg *
26 xe_sched_get_msg(struct xe_gpu_scheduler *sched)
28 struct xe_sched_msg *msg;
30 xe_sched_msg_lock(sched);
31 msg = list_first_entry_or_null(&sched->msgs,
32 struct xe_sched_msg, link);
34 list_del_init(&msg->link);
35 xe_sched_msg_unlock(sched);
40 static void xe_sched_process_msg_work(struct work_struct *w)
42 struct xe_gpu_scheduler *sched =
43 container_of(w, struct xe_gpu_scheduler, work_process_msg);
44 struct xe_sched_msg *msg;
46 if (READ_ONCE(sched->base.pause_submit))
49 msg = xe_sched_get_msg(sched);
51 sched->ops->process_msg(msg);
53 xe_sched_process_msg_queue_if_ready(sched);
57 int xe_sched_init(struct xe_gpu_scheduler *sched,
58 const struct drm_sched_backend_ops *ops,
59 const struct xe_sched_backend_ops *xe_ops,
60 struct workqueue_struct *submit_wq,
61 uint32_t hw_submission, unsigned hang_limit,
62 long timeout, struct workqueue_struct *timeout_wq,
63 atomic_t *score, const char *name,
67 INIT_LIST_HEAD(&sched->msgs);
68 INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
70 return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
71 hang_limit, timeout, timeout_wq, score, name,
75 void xe_sched_fini(struct xe_gpu_scheduler *sched)
77 xe_sched_submission_stop(sched);
78 drm_sched_fini(&sched->base);
81 void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
83 drm_sched_wqueue_start(&sched->base);
84 queue_work(sched->base.submit_wq, &sched->work_process_msg);
87 void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
89 drm_sched_wqueue_stop(&sched->base);
90 cancel_work_sync(&sched->work_process_msg);
93 void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched)
95 drm_sched_resume_timeout(&sched->base, sched->base.timeout);
98 void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
99 struct xe_sched_msg *msg)
101 xe_sched_msg_lock(sched);
102 xe_sched_add_msg_locked(sched, msg);
103 xe_sched_msg_unlock(sched);
106 void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
107 struct xe_sched_msg *msg)
109 lockdep_assert_held(&sched->base.job_list_lock);
111 list_add_tail(&msg->link, &sched->msgs);
112 xe_sched_process_msg_queue(sched);