]>
Commit | Line | Data |
---|---|---|
b8441ed2 TH |
1 | /* |
2 | * fs/kernfs/file.c - kernfs file implementation | |
3 | * | |
4 | * Copyright (c) 2001-3 Patrick Mochel | |
5 | * Copyright (c) 2007 SUSE Linux Products GmbH | |
6 | * Copyright (c) 2007, 2013 Tejun Heo <[email protected]> | |
7 | * | |
8 | * This file is released under the GPLv2. | |
9 | */ | |
414985ae TH |
10 | |
11 | #include <linux/fs.h> | |
12 | #include <linux/seq_file.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/poll.h> | |
15 | #include <linux/pagemap.h> | |
414985ae | 16 | #include <linux/sched.h> |
d911d987 | 17 | #include <linux/fsnotify.h> |
414985ae TH |
18 | |
19 | #include "kernfs-internal.h" | |
20 | ||
21 | /* | |
c525aadd | 22 | * There's one kernfs_open_file for each open file and one kernfs_open_node |
324a56e1 | 23 | * for each kernfs_node with one or more open files. |
414985ae | 24 | * |
c525aadd TH |
25 | * kernfs_node->attr.open points to kernfs_open_node. attr.open is |
26 | * protected by kernfs_open_node_lock. | |
414985ae TH |
27 | * |
28 | * filp->private_data points to seq_file whose ->private points to | |
c525aadd TH |
29 | * kernfs_open_file. kernfs_open_files are chained at |
30 | * kernfs_open_node->files, which is protected by kernfs_open_file_mutex. | |
414985ae | 31 | */ |
c525aadd TH |
32 | static DEFINE_SPINLOCK(kernfs_open_node_lock); |
33 | static DEFINE_MUTEX(kernfs_open_file_mutex); | |
414985ae | 34 | |
c525aadd | 35 | struct kernfs_open_node { |
414985ae TH |
36 | atomic_t refcnt; |
37 | atomic_t event; | |
38 | wait_queue_head_t poll; | |
c525aadd | 39 | struct list_head files; /* goes through kernfs_open_file.list */ |
414985ae TH |
40 | }; |
41 | ||
ecca47ce TH |
42 | /* |
43 | * kernfs_notify() may be called from any context and bounces notifications | |
44 | * through a work item. To minimize space overhead in kernfs_node, the | |
45 | * pending queue is implemented as a singly linked list of kernfs_nodes. | |
46 | * The list is terminated with the self pointer so that whether a | |
47 | * kernfs_node is on the list or not can be determined by testing the next | |
48 | * pointer for NULL. | |
49 | */ | |
50 | #define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list) | |
51 | ||
52 | static DEFINE_SPINLOCK(kernfs_notify_lock); | |
53 | static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL; | |
54 | ||
c525aadd | 55 | static struct kernfs_open_file *kernfs_of(struct file *file) |
414985ae TH |
56 | { |
57 | return ((struct seq_file *)file->private_data)->private; | |
58 | } | |
59 | ||
60 | /* | |
324a56e1 | 61 | * Determine the kernfs_ops for the given kernfs_node. This function must |
414985ae TH |
62 | * be called while holding an active reference. |
63 | */ | |
324a56e1 | 64 | static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn) |
414985ae | 65 | { |
df23fc39 | 66 | if (kn->flags & KERNFS_LOCKDEP) |
324a56e1 | 67 | lockdep_assert_held(kn); |
adc5e8b5 | 68 | return kn->attr.ops; |
414985ae TH |
69 | } |
70 | ||
bb305947 TH |
71 | /* |
72 | * As kernfs_seq_stop() is also called after kernfs_seq_start() or | |
73 | * kernfs_seq_next() failure, it needs to distinguish whether it's stopping | |
74 | * a seq_file iteration which is fully initialized with an active reference | |
75 | * or an aborted kernfs_seq_start() due to get_active failure. The | |
76 | * position pointer is the only context for each seq_file iteration and | |
77 | * thus the stop condition should be encoded in it. As the return value is | |
78 | * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable | |
79 | * choice to indicate get_active failure. | |
80 | * | |
81 | * Unfortunately, this is complicated due to the optional custom seq_file | |
82 | * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop() | |
83 | * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or | |
84 | * custom seq_file operations and thus can't decide whether put_active | |
85 | * should be performed or not only on ERR_PTR(-ENODEV). | |
86 | * | |
87 | * This is worked around by factoring out the custom seq_stop() and | |
88 | * put_active part into kernfs_seq_stop_active(), skipping it from | |
89 | * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after | |
90 | * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures | |
91 | * that kernfs_seq_stop_active() is skipped only after get_active failure. | |
92 | */ | |
93 | static void kernfs_seq_stop_active(struct seq_file *sf, void *v) | |
94 | { | |
95 | struct kernfs_open_file *of = sf->private; | |
96 | const struct kernfs_ops *ops = kernfs_ops(of->kn); | |
97 | ||
98 | if (ops->seq_stop) | |
99 | ops->seq_stop(sf, v); | |
100 | kernfs_put_active(of->kn); | |
101 | } | |
102 | ||
414985ae TH |
103 | static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) |
104 | { | |
c525aadd | 105 | struct kernfs_open_file *of = sf->private; |
414985ae TH |
106 | const struct kernfs_ops *ops; |
107 | ||
108 | /* | |
2b75869b | 109 | * @of->mutex nests outside active ref and is primarily to ensure that |
414985ae TH |
110 | * the ops aren't called concurrently for the same open file. |
111 | */ | |
112 | mutex_lock(&of->mutex); | |
c637b8ac | 113 | if (!kernfs_get_active(of->kn)) |
414985ae TH |
114 | return ERR_PTR(-ENODEV); |
115 | ||
324a56e1 | 116 | ops = kernfs_ops(of->kn); |
414985ae | 117 | if (ops->seq_start) { |
bb305947 TH |
118 | void *next = ops->seq_start(sf, ppos); |
119 | /* see the comment above kernfs_seq_stop_active() */ | |
120 | if (next == ERR_PTR(-ENODEV)) | |
121 | kernfs_seq_stop_active(sf, next); | |
122 | return next; | |
414985ae TH |
123 | } else { |
124 | /* | |
125 | * The same behavior and code as single_open(). Returns | |
126 | * !NULL if pos is at the beginning; otherwise, NULL. | |
127 | */ | |
128 | return NULL + !*ppos; | |
129 | } | |
130 | } | |
131 | ||
132 | static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos) | |
133 | { | |
c525aadd | 134 | struct kernfs_open_file *of = sf->private; |
324a56e1 | 135 | const struct kernfs_ops *ops = kernfs_ops(of->kn); |
414985ae TH |
136 | |
137 | if (ops->seq_next) { | |
bb305947 TH |
138 | void *next = ops->seq_next(sf, v, ppos); |
139 | /* see the comment above kernfs_seq_stop_active() */ | |
140 | if (next == ERR_PTR(-ENODEV)) | |
141 | kernfs_seq_stop_active(sf, next); | |
142 | return next; | |
414985ae TH |
143 | } else { |
144 | /* | |
145 | * The same behavior and code as single_open(), always | |
146 | * terminate after the initial read. | |
147 | */ | |
148 | ++*ppos; | |
149 | return NULL; | |
150 | } | |
151 | } | |
152 | ||
153 | static void kernfs_seq_stop(struct seq_file *sf, void *v) | |
154 | { | |
c525aadd | 155 | struct kernfs_open_file *of = sf->private; |
414985ae | 156 | |
bb305947 TH |
157 | if (v != ERR_PTR(-ENODEV)) |
158 | kernfs_seq_stop_active(sf, v); | |
414985ae TH |
159 | mutex_unlock(&of->mutex); |
160 | } | |
161 | ||
162 | static int kernfs_seq_show(struct seq_file *sf, void *v) | |
163 | { | |
c525aadd | 164 | struct kernfs_open_file *of = sf->private; |
414985ae | 165 | |
adc5e8b5 | 166 | of->event = atomic_read(&of->kn->attr.open->event); |
414985ae | 167 | |
adc5e8b5 | 168 | return of->kn->attr.ops->seq_show(sf, v); |
414985ae TH |
169 | } |
170 | ||
171 | static const struct seq_operations kernfs_seq_ops = { | |
172 | .start = kernfs_seq_start, | |
173 | .next = kernfs_seq_next, | |
174 | .stop = kernfs_seq_stop, | |
175 | .show = kernfs_seq_show, | |
176 | }; | |
177 | ||
178 | /* | |
179 | * As reading a bin file can have side-effects, the exact offset and bytes | |
180 | * specified in read(2) call should be passed to the read callback making | |
181 | * it difficult to use seq_file. Implement simplistic custom buffering for | |
182 | * bin files. | |
183 | */ | |
c525aadd | 184 | static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of, |
414985ae TH |
185 | char __user *user_buf, size_t count, |
186 | loff_t *ppos) | |
187 | { | |
188 | ssize_t len = min_t(size_t, count, PAGE_SIZE); | |
189 | const struct kernfs_ops *ops; | |
190 | char *buf; | |
191 | ||
4ef67a8c | 192 | buf = of->prealloc_buf; |
e4234a1f CW |
193 | if (buf) |
194 | mutex_lock(&of->prealloc_mutex); | |
195 | else | |
4ef67a8c | 196 | buf = kmalloc(len, GFP_KERNEL); |
414985ae TH |
197 | if (!buf) |
198 | return -ENOMEM; | |
199 | ||
200 | /* | |
4ef67a8c | 201 | * @of->mutex nests outside active ref and is used both to ensure that |
e4234a1f | 202 | * the ops aren't called concurrently for the same open file. |
414985ae TH |
203 | */ |
204 | mutex_lock(&of->mutex); | |
c637b8ac | 205 | if (!kernfs_get_active(of->kn)) { |
414985ae TH |
206 | len = -ENODEV; |
207 | mutex_unlock(&of->mutex); | |
208 | goto out_free; | |
209 | } | |
210 | ||
7cff4b18 | 211 | of->event = atomic_read(&of->kn->attr.open->event); |
324a56e1 | 212 | ops = kernfs_ops(of->kn); |
414985ae TH |
213 | if (ops->read) |
214 | len = ops->read(of, buf, len, *ppos); | |
215 | else | |
216 | len = -EINVAL; | |
217 | ||
e4234a1f CW |
218 | kernfs_put_active(of->kn); |
219 | mutex_unlock(&of->mutex); | |
220 | ||
414985ae | 221 | if (len < 0) |
e4234a1f | 222 | goto out_free; |
414985ae TH |
223 | |
224 | if (copy_to_user(user_buf, buf, len)) { | |
225 | len = -EFAULT; | |
e4234a1f | 226 | goto out_free; |
414985ae TH |
227 | } |
228 | ||
229 | *ppos += len; | |
230 | ||
231 | out_free: | |
e4234a1f CW |
232 | if (buf == of->prealloc_buf) |
233 | mutex_unlock(&of->prealloc_mutex); | |
234 | else | |
4ef67a8c | 235 | kfree(buf); |
414985ae TH |
236 | return len; |
237 | } | |
238 | ||
239 | /** | |
c637b8ac | 240 | * kernfs_fop_read - kernfs vfs read callback |
414985ae TH |
241 | * @file: file pointer |
242 | * @user_buf: data to write | |
243 | * @count: number of bytes | |
244 | * @ppos: starting offset | |
245 | */ | |
c637b8ac TH |
246 | static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf, |
247 | size_t count, loff_t *ppos) | |
414985ae | 248 | { |
c525aadd | 249 | struct kernfs_open_file *of = kernfs_of(file); |
414985ae | 250 | |
df23fc39 | 251 | if (of->kn->flags & KERNFS_HAS_SEQ_SHOW) |
414985ae TH |
252 | return seq_read(file, user_buf, count, ppos); |
253 | else | |
254 | return kernfs_file_direct_read(of, user_buf, count, ppos); | |
255 | } | |
256 | ||
257 | /** | |
c637b8ac | 258 | * kernfs_fop_write - kernfs vfs write callback |
414985ae TH |
259 | * @file: file pointer |
260 | * @user_buf: data to write | |
261 | * @count: number of bytes | |
262 | * @ppos: starting offset | |
263 | * | |
264 | * Copy data in from userland and pass it to the matching kernfs write | |
265 | * operation. | |
266 | * | |
267 | * There is no easy way for us to know if userspace is only doing a partial | |
268 | * write, so we don't support them. We expect the entire buffer to come on | |
269 | * the first write. Hint: if you're writing a value, first read the file, | |
270 | * modify only the the value you're changing, then write entire buffer | |
271 | * back. | |
272 | */ | |
c637b8ac TH |
273 | static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, |
274 | size_t count, loff_t *ppos) | |
414985ae | 275 | { |
c525aadd | 276 | struct kernfs_open_file *of = kernfs_of(file); |
414985ae | 277 | const struct kernfs_ops *ops; |
b7ce40cf TH |
278 | size_t len; |
279 | char *buf; | |
4d3773c4 | 280 | |
b7ce40cf | 281 | if (of->atomic_write_len) { |
4d3773c4 | 282 | len = count; |
b7ce40cf TH |
283 | if (len > of->atomic_write_len) |
284 | return -E2BIG; | |
4d3773c4 TH |
285 | } else { |
286 | len = min_t(size_t, count, PAGE_SIZE); | |
287 | } | |
288 | ||
2b75869b | 289 | buf = of->prealloc_buf; |
e4234a1f CW |
290 | if (buf) |
291 | mutex_lock(&of->prealloc_mutex); | |
292 | else | |
2b75869b | 293 | buf = kmalloc(len + 1, GFP_KERNEL); |
b7ce40cf TH |
294 | if (!buf) |
295 | return -ENOMEM; | |
414985ae | 296 | |
e4234a1f CW |
297 | if (copy_from_user(buf, user_buf, len)) { |
298 | len = -EFAULT; | |
299 | goto out_free; | |
300 | } | |
301 | buf[len] = '\0'; /* guarantee string termination */ | |
302 | ||
b7ce40cf | 303 | /* |
2b75869b | 304 | * @of->mutex nests outside active ref and is used both to ensure that |
e4234a1f | 305 | * the ops aren't called concurrently for the same open file. |
b7ce40cf TH |
306 | */ |
307 | mutex_lock(&of->mutex); | |
308 | if (!kernfs_get_active(of->kn)) { | |
309 | mutex_unlock(&of->mutex); | |
310 | len = -ENODEV; | |
311 | goto out_free; | |
312 | } | |
313 | ||
314 | ops = kernfs_ops(of->kn); | |
315 | if (ops->write) | |
316 | len = ops->write(of, buf, len, *ppos); | |
317 | else | |
318 | len = -EINVAL; | |
319 | ||
e4234a1f CW |
320 | kernfs_put_active(of->kn); |
321 | mutex_unlock(&of->mutex); | |
322 | ||
414985ae TH |
323 | if (len > 0) |
324 | *ppos += len; | |
2b75869b | 325 | |
b7ce40cf | 326 | out_free: |
e4234a1f CW |
327 | if (buf == of->prealloc_buf) |
328 | mutex_unlock(&of->prealloc_mutex); | |
329 | else | |
2b75869b | 330 | kfree(buf); |
414985ae TH |
331 | return len; |
332 | } | |
333 | ||
334 | static void kernfs_vma_open(struct vm_area_struct *vma) | |
335 | { | |
336 | struct file *file = vma->vm_file; | |
c525aadd | 337 | struct kernfs_open_file *of = kernfs_of(file); |
414985ae TH |
338 | |
339 | if (!of->vm_ops) | |
340 | return; | |
341 | ||
c637b8ac | 342 | if (!kernfs_get_active(of->kn)) |
414985ae TH |
343 | return; |
344 | ||
345 | if (of->vm_ops->open) | |
346 | of->vm_ops->open(vma); | |
347 | ||
c637b8ac | 348 | kernfs_put_active(of->kn); |
414985ae TH |
349 | } |
350 | ||
351 | static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
352 | { | |
353 | struct file *file = vma->vm_file; | |
c525aadd | 354 | struct kernfs_open_file *of = kernfs_of(file); |
414985ae TH |
355 | int ret; |
356 | ||
357 | if (!of->vm_ops) | |
358 | return VM_FAULT_SIGBUS; | |
359 | ||
c637b8ac | 360 | if (!kernfs_get_active(of->kn)) |
414985ae TH |
361 | return VM_FAULT_SIGBUS; |
362 | ||
363 | ret = VM_FAULT_SIGBUS; | |
364 | if (of->vm_ops->fault) | |
365 | ret = of->vm_ops->fault(vma, vmf); | |
366 | ||
c637b8ac | 367 | kernfs_put_active(of->kn); |
414985ae TH |
368 | return ret; |
369 | } | |
370 | ||
371 | static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma, | |
372 | struct vm_fault *vmf) | |
373 | { | |
374 | struct file *file = vma->vm_file; | |
c525aadd | 375 | struct kernfs_open_file *of = kernfs_of(file); |
414985ae TH |
376 | int ret; |
377 | ||
378 | if (!of->vm_ops) | |
379 | return VM_FAULT_SIGBUS; | |
380 | ||
c637b8ac | 381 | if (!kernfs_get_active(of->kn)) |
414985ae TH |
382 | return VM_FAULT_SIGBUS; |
383 | ||
384 | ret = 0; | |
385 | if (of->vm_ops->page_mkwrite) | |
386 | ret = of->vm_ops->page_mkwrite(vma, vmf); | |
387 | else | |
388 | file_update_time(file); | |
389 | ||
c637b8ac | 390 | kernfs_put_active(of->kn); |
414985ae TH |
391 | return ret; |
392 | } | |
393 | ||
394 | static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, | |
395 | void *buf, int len, int write) | |
396 | { | |
397 | struct file *file = vma->vm_file; | |
c525aadd | 398 | struct kernfs_open_file *of = kernfs_of(file); |
414985ae TH |
399 | int ret; |
400 | ||
401 | if (!of->vm_ops) | |
402 | return -EINVAL; | |
403 | ||
c637b8ac | 404 | if (!kernfs_get_active(of->kn)) |
414985ae TH |
405 | return -EINVAL; |
406 | ||
407 | ret = -EINVAL; | |
408 | if (of->vm_ops->access) | |
409 | ret = of->vm_ops->access(vma, addr, buf, len, write); | |
410 | ||
c637b8ac | 411 | kernfs_put_active(of->kn); |
414985ae TH |
412 | return ret; |
413 | } | |
414 | ||
415 | #ifdef CONFIG_NUMA | |
416 | static int kernfs_vma_set_policy(struct vm_area_struct *vma, | |
417 | struct mempolicy *new) | |
418 | { | |
419 | struct file *file = vma->vm_file; | |
c525aadd | 420 | struct kernfs_open_file *of = kernfs_of(file); |
414985ae TH |
421 | int ret; |
422 | ||
423 | if (!of->vm_ops) | |
424 | return 0; | |
425 | ||
c637b8ac | 426 | if (!kernfs_get_active(of->kn)) |
414985ae TH |
427 | return -EINVAL; |
428 | ||
429 | ret = 0; | |
430 | if (of->vm_ops->set_policy) | |
431 | ret = of->vm_ops->set_policy(vma, new); | |
432 | ||
c637b8ac | 433 | kernfs_put_active(of->kn); |
414985ae TH |
434 | return ret; |
435 | } | |
436 | ||
437 | static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma, | |
438 | unsigned long addr) | |
439 | { | |
440 | struct file *file = vma->vm_file; | |
c525aadd | 441 | struct kernfs_open_file *of = kernfs_of(file); |
414985ae TH |
442 | struct mempolicy *pol; |
443 | ||
444 | if (!of->vm_ops) | |
445 | return vma->vm_policy; | |
446 | ||
c637b8ac | 447 | if (!kernfs_get_active(of->kn)) |
414985ae TH |
448 | return vma->vm_policy; |
449 | ||
450 | pol = vma->vm_policy; | |
451 | if (of->vm_ops->get_policy) | |
452 | pol = of->vm_ops->get_policy(vma, addr); | |
453 | ||
c637b8ac | 454 | kernfs_put_active(of->kn); |
414985ae TH |
455 | return pol; |
456 | } | |
457 | ||
414985ae TH |
458 | #endif |
459 | ||
460 | static const struct vm_operations_struct kernfs_vm_ops = { | |
461 | .open = kernfs_vma_open, | |
462 | .fault = kernfs_vma_fault, | |
463 | .page_mkwrite = kernfs_vma_page_mkwrite, | |
464 | .access = kernfs_vma_access, | |
465 | #ifdef CONFIG_NUMA | |
466 | .set_policy = kernfs_vma_set_policy, | |
467 | .get_policy = kernfs_vma_get_policy, | |
414985ae TH |
468 | #endif |
469 | }; | |
470 | ||
c637b8ac | 471 | static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) |
414985ae | 472 | { |
c525aadd | 473 | struct kernfs_open_file *of = kernfs_of(file); |
414985ae TH |
474 | const struct kernfs_ops *ops; |
475 | int rc; | |
476 | ||
9b2db6e1 TH |
477 | /* |
478 | * mmap path and of->mutex are prone to triggering spurious lockdep | |
479 | * warnings and we don't want to add spurious locking dependency | |
480 | * between the two. Check whether mmap is actually implemented | |
481 | * without grabbing @of->mutex by testing HAS_MMAP flag. See the | |
482 | * comment in kernfs_file_open() for more details. | |
483 | */ | |
df23fc39 | 484 | if (!(of->kn->flags & KERNFS_HAS_MMAP)) |
9b2db6e1 TH |
485 | return -ENODEV; |
486 | ||
414985ae TH |
487 | mutex_lock(&of->mutex); |
488 | ||
489 | rc = -ENODEV; | |
c637b8ac | 490 | if (!kernfs_get_active(of->kn)) |
414985ae TH |
491 | goto out_unlock; |
492 | ||
324a56e1 | 493 | ops = kernfs_ops(of->kn); |
9b2db6e1 | 494 | rc = ops->mmap(of, vma); |
b44b2140 TH |
495 | if (rc) |
496 | goto out_put; | |
414985ae TH |
497 | |
498 | /* | |
499 | * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup() | |
500 | * to satisfy versions of X which crash if the mmap fails: that | |
501 | * substitutes a new vm_file, and we don't then want bin_vm_ops. | |
502 | */ | |
503 | if (vma->vm_file != file) | |
504 | goto out_put; | |
505 | ||
506 | rc = -EINVAL; | |
507 | if (of->mmapped && of->vm_ops != vma->vm_ops) | |
508 | goto out_put; | |
509 | ||
510 | /* | |
511 | * It is not possible to successfully wrap close. | |
512 | * So error if someone is trying to use close. | |
513 | */ | |
514 | rc = -EINVAL; | |
515 | if (vma->vm_ops && vma->vm_ops->close) | |
516 | goto out_put; | |
517 | ||
518 | rc = 0; | |
519 | of->mmapped = 1; | |
520 | of->vm_ops = vma->vm_ops; | |
521 | vma->vm_ops = &kernfs_vm_ops; | |
522 | out_put: | |
c637b8ac | 523 | kernfs_put_active(of->kn); |
414985ae TH |
524 | out_unlock: |
525 | mutex_unlock(&of->mutex); | |
526 | ||
527 | return rc; | |
528 | } | |
529 | ||
530 | /** | |
c637b8ac | 531 | * kernfs_get_open_node - get or create kernfs_open_node |
324a56e1 | 532 | * @kn: target kernfs_node |
c525aadd | 533 | * @of: kernfs_open_file for this instance of open |
414985ae | 534 | * |
adc5e8b5 TH |
535 | * If @kn->attr.open exists, increment its reference count; otherwise, |
536 | * create one. @of is chained to the files list. | |
414985ae TH |
537 | * |
538 | * LOCKING: | |
539 | * Kernel thread context (may sleep). | |
540 | * | |
541 | * RETURNS: | |
542 | * 0 on success, -errno on failure. | |
543 | */ | |
c637b8ac TH |
544 | static int kernfs_get_open_node(struct kernfs_node *kn, |
545 | struct kernfs_open_file *of) | |
414985ae | 546 | { |
c525aadd | 547 | struct kernfs_open_node *on, *new_on = NULL; |
414985ae TH |
548 | |
549 | retry: | |
c525aadd TH |
550 | mutex_lock(&kernfs_open_file_mutex); |
551 | spin_lock_irq(&kernfs_open_node_lock); | |
414985ae | 552 | |
c525aadd TH |
553 | if (!kn->attr.open && new_on) { |
554 | kn->attr.open = new_on; | |
555 | new_on = NULL; | |
414985ae TH |
556 | } |
557 | ||
c525aadd TH |
558 | on = kn->attr.open; |
559 | if (on) { | |
560 | atomic_inc(&on->refcnt); | |
561 | list_add_tail(&of->list, &on->files); | |
414985ae TH |
562 | } |
563 | ||
c525aadd TH |
564 | spin_unlock_irq(&kernfs_open_node_lock); |
565 | mutex_unlock(&kernfs_open_file_mutex); | |
414985ae | 566 | |
c525aadd TH |
567 | if (on) { |
568 | kfree(new_on); | |
414985ae TH |
569 | return 0; |
570 | } | |
571 | ||
572 | /* not there, initialize a new one and retry */ | |
c525aadd TH |
573 | new_on = kmalloc(sizeof(*new_on), GFP_KERNEL); |
574 | if (!new_on) | |
414985ae TH |
575 | return -ENOMEM; |
576 | ||
c525aadd TH |
577 | atomic_set(&new_on->refcnt, 0); |
578 | atomic_set(&new_on->event, 1); | |
579 | init_waitqueue_head(&new_on->poll); | |
580 | INIT_LIST_HEAD(&new_on->files); | |
414985ae TH |
581 | goto retry; |
582 | } | |
583 | ||
584 | /** | |
c637b8ac | 585 | * kernfs_put_open_node - put kernfs_open_node |
324a56e1 | 586 | * @kn: target kernfs_nodet |
c525aadd | 587 | * @of: associated kernfs_open_file |
414985ae | 588 | * |
adc5e8b5 | 589 | * Put @kn->attr.open and unlink @of from the files list. If |
414985ae TH |
590 | * reference count reaches zero, disassociate and free it. |
591 | * | |
592 | * LOCKING: | |
593 | * None. | |
594 | */ | |
c637b8ac TH |
595 | static void kernfs_put_open_node(struct kernfs_node *kn, |
596 | struct kernfs_open_file *of) | |
414985ae | 597 | { |
c525aadd | 598 | struct kernfs_open_node *on = kn->attr.open; |
414985ae TH |
599 | unsigned long flags; |
600 | ||
c525aadd TH |
601 | mutex_lock(&kernfs_open_file_mutex); |
602 | spin_lock_irqsave(&kernfs_open_node_lock, flags); | |
414985ae TH |
603 | |
604 | if (of) | |
605 | list_del(&of->list); | |
606 | ||
c525aadd | 607 | if (atomic_dec_and_test(&on->refcnt)) |
adc5e8b5 | 608 | kn->attr.open = NULL; |
414985ae | 609 | else |
c525aadd | 610 | on = NULL; |
414985ae | 611 | |
c525aadd TH |
612 | spin_unlock_irqrestore(&kernfs_open_node_lock, flags); |
613 | mutex_unlock(&kernfs_open_file_mutex); | |
414985ae | 614 | |
c525aadd | 615 | kfree(on); |
414985ae TH |
616 | } |
617 | ||
c637b8ac | 618 | static int kernfs_fop_open(struct inode *inode, struct file *file) |
414985ae | 619 | { |
324a56e1 | 620 | struct kernfs_node *kn = file->f_path.dentry->d_fsdata; |
555724a8 | 621 | struct kernfs_root *root = kernfs_root(kn); |
414985ae | 622 | const struct kernfs_ops *ops; |
c525aadd | 623 | struct kernfs_open_file *of; |
414985ae TH |
624 | bool has_read, has_write, has_mmap; |
625 | int error = -EACCES; | |
626 | ||
c637b8ac | 627 | if (!kernfs_get_active(kn)) |
414985ae TH |
628 | return -ENODEV; |
629 | ||
324a56e1 | 630 | ops = kernfs_ops(kn); |
414985ae TH |
631 | |
632 | has_read = ops->seq_show || ops->read || ops->mmap; | |
633 | has_write = ops->write || ops->mmap; | |
634 | has_mmap = ops->mmap; | |
635 | ||
555724a8 TH |
636 | /* see the flag definition for details */ |
637 | if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) { | |
638 | if ((file->f_mode & FMODE_WRITE) && | |
639 | (!(inode->i_mode & S_IWUGO) || !has_write)) | |
640 | goto err_out; | |
414985ae | 641 | |
555724a8 TH |
642 | if ((file->f_mode & FMODE_READ) && |
643 | (!(inode->i_mode & S_IRUGO) || !has_read)) | |
644 | goto err_out; | |
645 | } | |
414985ae | 646 | |
c525aadd | 647 | /* allocate a kernfs_open_file for the file */ |
414985ae | 648 | error = -ENOMEM; |
c525aadd | 649 | of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL); |
414985ae TH |
650 | if (!of) |
651 | goto err_out; | |
652 | ||
653 | /* | |
654 | * The following is done to give a different lockdep key to | |
655 | * @of->mutex for files which implement mmap. This is a rather | |
656 | * crude way to avoid false positive lockdep warning around | |
657 | * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and | |
658 | * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under | |
659 | * which mm->mmap_sem nests, while holding @of->mutex. As each | |
660 | * open file has a separate mutex, it's okay as long as those don't | |
661 | * happen on the same file. At this point, we can't easily give | |
662 | * each file a separate locking class. Let's differentiate on | |
663 | * whether the file has mmap or not for now. | |
9b2db6e1 TH |
664 | * |
665 | * Both paths of the branch look the same. They're supposed to | |
666 | * look that way and give @of->mutex different static lockdep keys. | |
414985ae TH |
667 | */ |
668 | if (has_mmap) | |
669 | mutex_init(&of->mutex); | |
670 | else | |
671 | mutex_init(&of->mutex); | |
672 | ||
324a56e1 | 673 | of->kn = kn; |
414985ae TH |
674 | of->file = file; |
675 | ||
b7ce40cf TH |
676 | /* |
677 | * Write path needs to atomic_write_len outside active reference. | |
678 | * Cache it in open_file. See kernfs_fop_write() for details. | |
679 | */ | |
680 | of->atomic_write_len = ops->atomic_write_len; | |
681 | ||
4ef67a8c N |
682 | error = -EINVAL; |
683 | /* | |
684 | * ->seq_show is incompatible with ->prealloc, | |
685 | * as seq_read does its own allocation. | |
686 | * ->read must be used instead. | |
687 | */ | |
688 | if (ops->prealloc && ops->seq_show) | |
689 | goto err_free; | |
2b75869b N |
690 | if (ops->prealloc) { |
691 | int len = of->atomic_write_len ?: PAGE_SIZE; | |
692 | of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL); | |
693 | error = -ENOMEM; | |
694 | if (!of->prealloc_buf) | |
695 | goto err_free; | |
e4234a1f | 696 | mutex_init(&of->prealloc_mutex); |
2b75869b N |
697 | } |
698 | ||
414985ae TH |
699 | /* |
700 | * Always instantiate seq_file even if read access doesn't use | |
701 | * seq_file or is not requested. This unifies private data access | |
702 | * and readable regular files are the vast majority anyway. | |
703 | */ | |
704 | if (ops->seq_show) | |
705 | error = seq_open(file, &kernfs_seq_ops); | |
706 | else | |
707 | error = seq_open(file, NULL); | |
708 | if (error) | |
709 | goto err_free; | |
710 | ||
711 | ((struct seq_file *)file->private_data)->private = of; | |
712 | ||
713 | /* seq_file clears PWRITE unconditionally, restore it if WRITE */ | |
714 | if (file->f_mode & FMODE_WRITE) | |
715 | file->f_mode |= FMODE_PWRITE; | |
716 | ||
c637b8ac TH |
717 | /* make sure we have open node struct */ |
718 | error = kernfs_get_open_node(kn, of); | |
414985ae TH |
719 | if (error) |
720 | goto err_close; | |
721 | ||
722 | /* open succeeded, put active references */ | |
c637b8ac | 723 | kernfs_put_active(kn); |
414985ae TH |
724 | return 0; |
725 | ||
726 | err_close: | |
727 | seq_release(inode, file); | |
728 | err_free: | |
2b75869b | 729 | kfree(of->prealloc_buf); |
414985ae TH |
730 | kfree(of); |
731 | err_out: | |
c637b8ac | 732 | kernfs_put_active(kn); |
414985ae TH |
733 | return error; |
734 | } | |
735 | ||
c637b8ac | 736 | static int kernfs_fop_release(struct inode *inode, struct file *filp) |
414985ae | 737 | { |
324a56e1 | 738 | struct kernfs_node *kn = filp->f_path.dentry->d_fsdata; |
c525aadd | 739 | struct kernfs_open_file *of = kernfs_of(filp); |
414985ae | 740 | |
c637b8ac | 741 | kernfs_put_open_node(kn, of); |
414985ae | 742 | seq_release(inode, filp); |
2b75869b | 743 | kfree(of->prealloc_buf); |
414985ae TH |
744 | kfree(of); |
745 | ||
746 | return 0; | |
747 | } | |
748 | ||
55f6e30d | 749 | void kernfs_unmap_bin_file(struct kernfs_node *kn) |
414985ae | 750 | { |
c525aadd TH |
751 | struct kernfs_open_node *on; |
752 | struct kernfs_open_file *of; | |
414985ae | 753 | |
55f6e30d GKH |
754 | if (!(kn->flags & KERNFS_HAS_MMAP)) |
755 | return; | |
756 | ||
c525aadd TH |
757 | spin_lock_irq(&kernfs_open_node_lock); |
758 | on = kn->attr.open; | |
759 | if (on) | |
760 | atomic_inc(&on->refcnt); | |
761 | spin_unlock_irq(&kernfs_open_node_lock); | |
762 | if (!on) | |
414985ae TH |
763 | return; |
764 | ||
c525aadd TH |
765 | mutex_lock(&kernfs_open_file_mutex); |
766 | list_for_each_entry(of, &on->files, list) { | |
414985ae TH |
767 | struct inode *inode = file_inode(of->file); |
768 | unmap_mapping_range(inode->i_mapping, 0, 0, 1); | |
769 | } | |
c525aadd | 770 | mutex_unlock(&kernfs_open_file_mutex); |
414985ae | 771 | |
c637b8ac | 772 | kernfs_put_open_node(kn, NULL); |
414985ae TH |
773 | } |
774 | ||
c637b8ac TH |
775 | /* |
776 | * Kernfs attribute files are pollable. The idea is that you read | |
414985ae TH |
777 | * the content and then you use 'poll' or 'select' to wait for |
778 | * the content to change. When the content changes (assuming the | |
779 | * manager for the kobject supports notification), poll will | |
780 | * return POLLERR|POLLPRI, and select will return the fd whether | |
781 | * it is waiting for read, write, or exceptions. | |
782 | * Once poll/select indicates that the value has changed, you | |
783 | * need to close and re-open the file, or seek to 0 and read again. | |
784 | * Reminder: this only works for attributes which actively support | |
785 | * it, and it is not possible to test an attribute from userspace | |
786 | * to see if it supports poll (Neither 'poll' nor 'select' return | |
787 | * an appropriate error code). When in doubt, set a suitable timeout value. | |
788 | */ | |
c637b8ac | 789 | static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait) |
414985ae | 790 | { |
c525aadd | 791 | struct kernfs_open_file *of = kernfs_of(filp); |
324a56e1 | 792 | struct kernfs_node *kn = filp->f_path.dentry->d_fsdata; |
c525aadd | 793 | struct kernfs_open_node *on = kn->attr.open; |
414985ae | 794 | |
c637b8ac | 795 | if (!kernfs_get_active(kn)) |
414985ae TH |
796 | goto trigger; |
797 | ||
c525aadd | 798 | poll_wait(filp, &on->poll, wait); |
414985ae | 799 | |
c637b8ac | 800 | kernfs_put_active(kn); |
414985ae | 801 | |
c525aadd | 802 | if (of->event != atomic_read(&on->event)) |
414985ae TH |
803 | goto trigger; |
804 | ||
805 | return DEFAULT_POLLMASK; | |
806 | ||
807 | trigger: | |
808 | return DEFAULT_POLLMASK|POLLERR|POLLPRI; | |
809 | } | |
810 | ||
ecca47ce | 811 | static void kernfs_notify_workfn(struct work_struct *work) |
414985ae | 812 | { |
ecca47ce | 813 | struct kernfs_node *kn; |
c525aadd | 814 | struct kernfs_open_node *on; |
d911d987 | 815 | struct kernfs_super_info *info; |
ecca47ce TH |
816 | repeat: |
817 | /* pop one off the notify_list */ | |
818 | spin_lock_irq(&kernfs_notify_lock); | |
819 | kn = kernfs_notify_list; | |
820 | if (kn == KERNFS_NOTIFY_EOL) { | |
821 | spin_unlock_irq(&kernfs_notify_lock); | |
d911d987 | 822 | return; |
ecca47ce TH |
823 | } |
824 | kernfs_notify_list = kn->attr.notify_next; | |
825 | kn->attr.notify_next = NULL; | |
826 | spin_unlock_irq(&kernfs_notify_lock); | |
d911d987 TH |
827 | |
828 | /* kick poll */ | |
ecca47ce | 829 | spin_lock_irq(&kernfs_open_node_lock); |
414985ae | 830 | |
d911d987 TH |
831 | on = kn->attr.open; |
832 | if (on) { | |
833 | atomic_inc(&on->event); | |
834 | wake_up_interruptible(&on->poll); | |
414985ae TH |
835 | } |
836 | ||
ecca47ce | 837 | spin_unlock_irq(&kernfs_open_node_lock); |
d911d987 TH |
838 | |
839 | /* kick fsnotify */ | |
840 | mutex_lock(&kernfs_mutex); | |
841 | ||
ecca47ce | 842 | list_for_each_entry(info, &kernfs_root(kn)->supers, node) { |
df6a58c5 | 843 | struct kernfs_node *parent; |
d911d987 | 844 | struct inode *inode; |
d911d987 | 845 | |
df6a58c5 TH |
846 | /* |
847 | * We want fsnotify_modify() on @kn but as the | |
848 | * modifications aren't originating from userland don't | |
849 | * have the matching @file available. Look up the inodes | |
850 | * and generate the events manually. | |
851 | */ | |
d911d987 TH |
852 | inode = ilookup(info->sb, kn->ino); |
853 | if (!inode) | |
854 | continue; | |
855 | ||
df6a58c5 TH |
856 | parent = kernfs_get_parent(kn); |
857 | if (parent) { | |
858 | struct inode *p_inode; | |
859 | ||
860 | p_inode = ilookup(info->sb, parent->ino); | |
861 | if (p_inode) { | |
862 | fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD, | |
863 | inode, FSNOTIFY_EVENT_INODE, kn->name, 0); | |
864 | iput(p_inode); | |
865 | } | |
866 | ||
867 | kernfs_put(parent); | |
d911d987 TH |
868 | } |
869 | ||
df6a58c5 TH |
870 | fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, |
871 | kn->name, 0); | |
d911d987 TH |
872 | iput(inode); |
873 | } | |
874 | ||
875 | mutex_unlock(&kernfs_mutex); | |
ecca47ce TH |
876 | kernfs_put(kn); |
877 | goto repeat; | |
878 | } | |
879 | ||
880 | /** | |
881 | * kernfs_notify - notify a kernfs file | |
882 | * @kn: file to notify | |
883 | * | |
884 | * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any | |
885 | * context. | |
886 | */ | |
887 | void kernfs_notify(struct kernfs_node *kn) | |
888 | { | |
889 | static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn); | |
890 | unsigned long flags; | |
891 | ||
892 | if (WARN_ON(kernfs_type(kn) != KERNFS_FILE)) | |
893 | return; | |
894 | ||
895 | spin_lock_irqsave(&kernfs_notify_lock, flags); | |
896 | if (!kn->attr.notify_next) { | |
897 | kernfs_get(kn); | |
898 | kn->attr.notify_next = kernfs_notify_list; | |
899 | kernfs_notify_list = kn; | |
900 | schedule_work(&kernfs_notify_work); | |
901 | } | |
902 | spin_unlock_irqrestore(&kernfs_notify_lock, flags); | |
414985ae TH |
903 | } |
904 | EXPORT_SYMBOL_GPL(kernfs_notify); | |
905 | ||
a797bfc3 | 906 | const struct file_operations kernfs_file_fops = { |
c637b8ac TH |
907 | .read = kernfs_fop_read, |
908 | .write = kernfs_fop_write, | |
414985ae | 909 | .llseek = generic_file_llseek, |
c637b8ac TH |
910 | .mmap = kernfs_fop_mmap, |
911 | .open = kernfs_fop_open, | |
912 | .release = kernfs_fop_release, | |
913 | .poll = kernfs_fop_poll, | |
2a9becdd | 914 | .fsync = noop_fsync, |
414985ae TH |
915 | }; |
916 | ||
917 | /** | |
2063d608 | 918 | * __kernfs_create_file - kernfs internal function to create a file |
414985ae TH |
919 | * @parent: directory to create the file in |
920 | * @name: name of the file | |
921 | * @mode: mode of the file | |
922 | * @size: size of the file | |
923 | * @ops: kernfs operations for the file | |
924 | * @priv: private data for the file | |
925 | * @ns: optional namespace tag of the file | |
926 | * @key: lockdep key for the file's active_ref, %NULL to disable lockdep | |
927 | * | |
928 | * Returns the created node on success, ERR_PTR() value on error. | |
929 | */ | |
2063d608 TH |
930 | struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, |
931 | const char *name, | |
932 | umode_t mode, loff_t size, | |
933 | const struct kernfs_ops *ops, | |
934 | void *priv, const void *ns, | |
2063d608 | 935 | struct lock_class_key *key) |
414985ae | 936 | { |
324a56e1 | 937 | struct kernfs_node *kn; |
2063d608 | 938 | unsigned flags; |
414985ae TH |
939 | int rc; |
940 | ||
2063d608 | 941 | flags = KERNFS_FILE; |
2063d608 | 942 | |
db4aad20 | 943 | kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags); |
324a56e1 | 944 | if (!kn) |
414985ae TH |
945 | return ERR_PTR(-ENOMEM); |
946 | ||
adc5e8b5 TH |
947 | kn->attr.ops = ops; |
948 | kn->attr.size = size; | |
949 | kn->ns = ns; | |
324a56e1 | 950 | kn->priv = priv; |
414985ae TH |
951 | |
952 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
953 | if (key) { | |
324a56e1 | 954 | lockdep_init_map(&kn->dep_map, "s_active", key, 0); |
df23fc39 | 955 | kn->flags |= KERNFS_LOCKDEP; |
414985ae TH |
956 | } |
957 | #endif | |
958 | ||
959 | /* | |
adc5e8b5 | 960 | * kn->attr.ops is accesible only while holding active ref. We |
414985ae TH |
961 | * need to know whether some ops are implemented outside active |
962 | * ref. Cache their existence in flags. | |
963 | */ | |
964 | if (ops->seq_show) | |
df23fc39 | 965 | kn->flags |= KERNFS_HAS_SEQ_SHOW; |
414985ae | 966 | if (ops->mmap) |
df23fc39 | 967 | kn->flags |= KERNFS_HAS_MMAP; |
414985ae | 968 | |
988cd7af | 969 | rc = kernfs_add_one(kn); |
414985ae | 970 | if (rc) { |
324a56e1 | 971 | kernfs_put(kn); |
414985ae TH |
972 | return ERR_PTR(rc); |
973 | } | |
324a56e1 | 974 | return kn; |
414985ae | 975 | } |