]> Git Repo - J-linux.git/blob - fs/bcachefs/clock.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / fs / bcachefs / clock.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "clock.h"
4
5 #include <linux/freezer.h>
6 #include <linux/kthread.h>
7 #include <linux/preempt.h>
8
9 static inline bool io_timer_cmp(const void *l, const void *r, void __always_unused *args)
10 {
11         struct io_timer **_l = (struct io_timer **)l;
12         struct io_timer **_r = (struct io_timer **)r;
13
14         return (*_l)->expire < (*_r)->expire;
15 }
16
17 static const struct min_heap_callbacks callbacks = {
18         .less = io_timer_cmp,
19         .swp = NULL,
20 };
21
22 void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
23 {
24         spin_lock(&clock->timer_lock);
25
26         if (time_after_eq64((u64) atomic64_read(&clock->now), timer->expire)) {
27                 spin_unlock(&clock->timer_lock);
28                 timer->fn(timer);
29                 return;
30         }
31
32         for (size_t i = 0; i < clock->timers.nr; i++)
33                 if (clock->timers.data[i] == timer)
34                         goto out;
35
36         BUG_ON(!min_heap_push(&clock->timers, &timer, &callbacks, NULL));
37 out:
38         spin_unlock(&clock->timer_lock);
39 }
40
41 void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
42 {
43         spin_lock(&clock->timer_lock);
44
45         for (size_t i = 0; i < clock->timers.nr; i++)
46                 if (clock->timers.data[i] == timer) {
47                         min_heap_del(&clock->timers, i, &callbacks, NULL);
48                         break;
49                 }
50
51         spin_unlock(&clock->timer_lock);
52 }
53
54 struct io_clock_wait {
55         struct io_timer         io_timer;
56         struct timer_list       cpu_timer;
57         struct task_struct      *task;
58         int                     expired;
59 };
60
61 static void io_clock_wait_fn(struct io_timer *timer)
62 {
63         struct io_clock_wait *wait = container_of(timer,
64                                 struct io_clock_wait, io_timer);
65
66         wait->expired = 1;
67         wake_up_process(wait->task);
68 }
69
70 static void io_clock_cpu_timeout(struct timer_list *timer)
71 {
72         struct io_clock_wait *wait = container_of(timer,
73                                 struct io_clock_wait, cpu_timer);
74
75         wait->expired = 1;
76         wake_up_process(wait->task);
77 }
78
79 void bch2_io_clock_schedule_timeout(struct io_clock *clock, u64 until)
80 {
81         struct io_clock_wait wait = {
82                 .io_timer.expire        = until,
83                 .io_timer.fn            = io_clock_wait_fn,
84                 .io_timer.fn2           = (void *) _RET_IP_,
85                 .task                   = current,
86         };
87
88         bch2_io_timer_add(clock, &wait.io_timer);
89         schedule();
90         bch2_io_timer_del(clock, &wait.io_timer);
91 }
92
93 void bch2_kthread_io_clock_wait(struct io_clock *clock,
94                                 u64 io_until, unsigned long cpu_timeout)
95 {
96         bool kthread = (current->flags & PF_KTHREAD) != 0;
97         struct io_clock_wait wait = {
98                 .io_timer.expire        = io_until,
99                 .io_timer.fn            = io_clock_wait_fn,
100                 .io_timer.fn2           = (void *) _RET_IP_,
101                 .task                   = current,
102         };
103
104         bch2_io_timer_add(clock, &wait.io_timer);
105
106         timer_setup_on_stack(&wait.cpu_timer, io_clock_cpu_timeout, 0);
107
108         if (cpu_timeout != MAX_SCHEDULE_TIMEOUT)
109                 mod_timer(&wait.cpu_timer, cpu_timeout + jiffies);
110
111         do {
112                 set_current_state(TASK_INTERRUPTIBLE);
113                 if (kthread && kthread_should_stop())
114                         break;
115
116                 if (wait.expired)
117                         break;
118
119                 schedule();
120                 try_to_freeze();
121         } while (0);
122
123         __set_current_state(TASK_RUNNING);
124         del_timer_sync(&wait.cpu_timer);
125         destroy_timer_on_stack(&wait.cpu_timer);
126         bch2_io_timer_del(clock, &wait.io_timer);
127 }
128
129 static struct io_timer *get_expired_timer(struct io_clock *clock, u64 now)
130 {
131         struct io_timer *ret = NULL;
132
133         if (clock->timers.nr &&
134             time_after_eq64(now, clock->timers.data[0]->expire)) {
135                 ret = *min_heap_peek(&clock->timers);
136                 min_heap_pop(&clock->timers, &callbacks, NULL);
137         }
138
139         return ret;
140 }
141
142 void __bch2_increment_clock(struct io_clock *clock, u64 sectors)
143 {
144         struct io_timer *timer;
145         u64 now = atomic64_add_return(sectors, &clock->now);
146
147         spin_lock(&clock->timer_lock);
148         while ((timer = get_expired_timer(clock, now)))
149                 timer->fn(timer);
150         spin_unlock(&clock->timer_lock);
151 }
152
153 void bch2_io_timers_to_text(struct printbuf *out, struct io_clock *clock)
154 {
155         out->atomic++;
156         spin_lock(&clock->timer_lock);
157         u64 now = atomic64_read(&clock->now);
158
159         printbuf_tabstop_push(out, 40);
160         prt_printf(out, "current time:\t%llu\n", now);
161
162         for (unsigned i = 0; i < clock->timers.nr; i++)
163                 prt_printf(out, "%ps %ps:\t%llu\n",
164                        clock->timers.data[i]->fn,
165                        clock->timers.data[i]->fn2,
166                        clock->timers.data[i]->expire);
167         spin_unlock(&clock->timer_lock);
168         --out->atomic;
169 }
170
171 void bch2_io_clock_exit(struct io_clock *clock)
172 {
173         free_heap(&clock->timers);
174         free_percpu(clock->pcpu_buf);
175 }
176
177 int bch2_io_clock_init(struct io_clock *clock)
178 {
179         atomic64_set(&clock->now, 0);
180         spin_lock_init(&clock->timer_lock);
181
182         clock->max_slop = IO_CLOCK_PCPU_SECTORS * num_possible_cpus();
183
184         clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
185         if (!clock->pcpu_buf)
186                 return -BCH_ERR_ENOMEM_io_clock_init;
187
188         if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
189                 return -BCH_ERR_ENOMEM_io_clock_init;
190
191         return 0;
192 }
This page took 0.037445 seconds and 4 git commands to generate.