]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/pdflush.c - worker threads for writing back filesystem data | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds. | |
5 | * | |
6 | * 09Apr2002 [email protected] | |
7 | * Initial version | |
8 | * 29Feb2004 [email protected] | |
9 | * Move worker thread creation to kthread to avoid chewing | |
10 | * up stack space with nested calls to kernel_thread. | |
11 | */ | |
12 | ||
13 | #include <linux/sched.h> | |
14 | #include <linux/list.h> | |
15 | #include <linux/signal.h> | |
16 | #include <linux/spinlock.h> | |
17 | #include <linux/gfp.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/fs.h> // Needed by writeback.h | |
21 | #include <linux/writeback.h> // Prototypes pdflush_operation() | |
22 | #include <linux/kthread.h> | |
23 | ||
24 | ||
25 | /* | |
26 | * Minimum and maximum number of pdflush instances | |
27 | */ | |
28 | #define MIN_PDFLUSH_THREADS 2 | |
29 | #define MAX_PDFLUSH_THREADS 8 | |
30 | ||
31 | static void start_one_pdflush_thread(void); | |
32 | ||
33 | ||
34 | /* | |
35 | * The pdflush threads are worker threads for writing back dirty data. | |
36 | * Ideally, we'd like one thread per active disk spindle. But the disk | |
37 | * topology is very hard to divine at this level. Instead, we take | |
38 | * care in various places to prevent more than one pdflush thread from | |
39 | * performing writeback against a single filesystem. pdflush threads | |
40 | * have the PF_FLUSHER flag set in current->flags to aid in this. | |
41 | */ | |
42 | ||
43 | /* | |
44 | * All the pdflush threads. Protected by pdflush_lock | |
45 | */ | |
46 | static LIST_HEAD(pdflush_list); | |
47 | static DEFINE_SPINLOCK(pdflush_lock); | |
48 | ||
49 | /* | |
50 | * The count of currently-running pdflush threads. Protected | |
51 | * by pdflush_lock. | |
52 | * | |
53 | * Readable by sysctl, but not writable. Published to userspace at | |
54 | * /proc/sys/vm/nr_pdflush_threads. | |
55 | */ | |
56 | int nr_pdflush_threads = 0; | |
57 | ||
58 | /* | |
59 | * The time at which the pdflush thread pool last went empty | |
60 | */ | |
61 | static unsigned long last_empty_jifs; | |
62 | ||
63 | /* | |
64 | * The pdflush thread. | |
65 | * | |
66 | * Thread pool management algorithm: | |
67 | * | |
68 | * - The minimum and maximum number of pdflush instances are bound | |
69 | * by MIN_PDFLUSH_THREADS and MAX_PDFLUSH_THREADS. | |
70 | * | |
71 | * - If there have been no idle pdflush instances for 1 second, create | |
72 | * a new one. | |
73 | * | |
74 | * - If the least-recently-went-to-sleep pdflush thread has been asleep | |
75 | * for more than one second, terminate a thread. | |
76 | */ | |
77 | ||
78 | /* | |
79 | * A structure for passing work to a pdflush thread. Also for passing | |
80 | * state information between pdflush threads. Protected by pdflush_lock. | |
81 | */ | |
82 | struct pdflush_work { | |
83 | struct task_struct *who; /* The thread */ | |
84 | void (*fn)(unsigned long); /* A callback function */ | |
85 | unsigned long arg0; /* An argument to the callback */ | |
86 | struct list_head list; /* On pdflush_list, when idle */ | |
87 | unsigned long when_i_went_to_sleep; | |
88 | }; | |
89 | ||
90 | static int __pdflush(struct pdflush_work *my_work) | |
91 | { | |
92 | current->flags |= PF_FLUSHER; | |
93 | my_work->fn = NULL; | |
94 | my_work->who = current; | |
95 | INIT_LIST_HEAD(&my_work->list); | |
96 | ||
97 | spin_lock_irq(&pdflush_lock); | |
98 | nr_pdflush_threads++; | |
99 | for ( ; ; ) { | |
100 | struct pdflush_work *pdf; | |
101 | ||
102 | set_current_state(TASK_INTERRUPTIBLE); | |
103 | list_move(&my_work->list, &pdflush_list); | |
104 | my_work->when_i_went_to_sleep = jiffies; | |
105 | spin_unlock_irq(&pdflush_lock); | |
106 | ||
107 | schedule(); | |
3e1d1d28 | 108 | if (try_to_freeze()) { |
1da177e4 LT |
109 | spin_lock_irq(&pdflush_lock); |
110 | continue; | |
111 | } | |
112 | ||
113 | spin_lock_irq(&pdflush_lock); | |
114 | if (!list_empty(&my_work->list)) { | |
115 | printk("pdflush: bogus wakeup!\n"); | |
116 | my_work->fn = NULL; | |
117 | continue; | |
118 | } | |
119 | if (my_work->fn == NULL) { | |
120 | printk("pdflush: NULL work function\n"); | |
121 | continue; | |
122 | } | |
123 | spin_unlock_irq(&pdflush_lock); | |
124 | ||
125 | (*my_work->fn)(my_work->arg0); | |
126 | ||
127 | /* | |
128 | * Thread creation: For how long have there been zero | |
129 | * available threads? | |
130 | */ | |
131 | if (jiffies - last_empty_jifs > 1 * HZ) { | |
132 | /* unlocked list_empty() test is OK here */ | |
133 | if (list_empty(&pdflush_list)) { | |
134 | /* unlocked test is OK here */ | |
135 | if (nr_pdflush_threads < MAX_PDFLUSH_THREADS) | |
136 | start_one_pdflush_thread(); | |
137 | } | |
138 | } | |
139 | ||
140 | spin_lock_irq(&pdflush_lock); | |
141 | my_work->fn = NULL; | |
142 | ||
143 | /* | |
144 | * Thread destruction: For how long has the sleepiest | |
145 | * thread slept? | |
146 | */ | |
147 | if (list_empty(&pdflush_list)) | |
148 | continue; | |
149 | if (nr_pdflush_threads <= MIN_PDFLUSH_THREADS) | |
150 | continue; | |
151 | pdf = list_entry(pdflush_list.prev, struct pdflush_work, list); | |
152 | if (jiffies - pdf->when_i_went_to_sleep > 1 * HZ) { | |
153 | /* Limit exit rate */ | |
154 | pdf->when_i_went_to_sleep = jiffies; | |
155 | break; /* exeunt */ | |
156 | } | |
157 | } | |
158 | nr_pdflush_threads--; | |
159 | spin_unlock_irq(&pdflush_lock); | |
160 | return 0; | |
161 | } | |
162 | ||
163 | /* | |
164 | * Of course, my_work wants to be just a local in __pdflush(). It is | |
165 | * separated out in this manner to hopefully prevent the compiler from | |
166 | * performing unfortunate optimisations against the auto variables. Because | |
167 | * these are visible to other tasks and CPUs. (No problem has actually | |
168 | * been observed. This is just paranoia). | |
169 | */ | |
170 | static int pdflush(void *dummy) | |
171 | { | |
172 | struct pdflush_work my_work; | |
173 | ||
174 | /* | |
175 | * pdflush can spend a lot of time doing encryption via dm-crypt. We | |
176 | * don't want to do that at keventd's priority. | |
177 | */ | |
178 | set_user_nice(current, 0); | |
179 | return __pdflush(&my_work); | |
180 | } | |
181 | ||
182 | /* | |
183 | * Attempt to wake up a pdflush thread, and get it to do some work for you. | |
184 | * Returns zero if it indeed managed to find a worker thread, and passed your | |
185 | * payload to it. | |
186 | */ | |
187 | int pdflush_operation(void (*fn)(unsigned long), unsigned long arg0) | |
188 | { | |
189 | unsigned long flags; | |
190 | int ret = 0; | |
191 | ||
192 | if (fn == NULL) | |
193 | BUG(); /* Hard to diagnose if it's deferred */ | |
194 | ||
195 | spin_lock_irqsave(&pdflush_lock, flags); | |
196 | if (list_empty(&pdflush_list)) { | |
197 | spin_unlock_irqrestore(&pdflush_lock, flags); | |
198 | ret = -1; | |
199 | } else { | |
200 | struct pdflush_work *pdf; | |
201 | ||
202 | pdf = list_entry(pdflush_list.next, struct pdflush_work, list); | |
203 | list_del_init(&pdf->list); | |
204 | if (list_empty(&pdflush_list)) | |
205 | last_empty_jifs = jiffies; | |
206 | pdf->fn = fn; | |
207 | pdf->arg0 = arg0; | |
208 | wake_up_process(pdf->who); | |
209 | spin_unlock_irqrestore(&pdflush_lock, flags); | |
210 | } | |
211 | return ret; | |
212 | } | |
213 | ||
214 | static void start_one_pdflush_thread(void) | |
215 | { | |
216 | kthread_run(pdflush, NULL, "pdflush"); | |
217 | } | |
218 | ||
219 | static int __init pdflush_init(void) | |
220 | { | |
221 | int i; | |
222 | ||
223 | for (i = 0; i < MIN_PDFLUSH_THREADS; i++) | |
224 | start_one_pdflush_thread(); | |
225 | return 0; | |
226 | } | |
227 | ||
228 | module_init(pdflush_init); |