]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * (C) 2001 Clemson University and The University of Chicago | |
4 | * Copyright 2018 Omnibond Systems, L.L.C. | |
5 | * | |
6 | * See COPYING in top-level directory. | |
7 | */ | |
8 | ||
9 | /* | |
10 | * Linux VFS file operations. | |
11 | */ | |
12 | ||
13 | #include "protocol.h" | |
14 | #include "orangefs-kernel.h" | |
15 | #include "orangefs-bufmap.h" | |
16 | #include <linux/fs.h> | |
17 | #include <linux/pagemap.h> | |
18 | ||
19 | static int flush_racache(struct inode *inode) | |
20 | { | |
21 | struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); | |
22 | struct orangefs_kernel_op_s *new_op; | |
23 | int ret; | |
24 | ||
25 | gossip_debug(GOSSIP_UTILS_DEBUG, | |
26 | "%s: %pU: Handle is %pU | fs_id %d\n", __func__, | |
27 | get_khandle_from_ino(inode), &orangefs_inode->refn.khandle, | |
28 | orangefs_inode->refn.fs_id); | |
29 | ||
30 | new_op = op_alloc(ORANGEFS_VFS_OP_RA_FLUSH); | |
31 | if (!new_op) | |
32 | return -ENOMEM; | |
33 | new_op->upcall.req.ra_cache_flush.refn = orangefs_inode->refn; | |
34 | ||
35 | ret = service_operation(new_op, "orangefs_flush_racache", | |
36 | get_interruptible_flag(inode)); | |
37 | ||
38 | gossip_debug(GOSSIP_UTILS_DEBUG, "%s: got return value of %d\n", | |
39 | __func__, ret); | |
40 | ||
41 | op_release(new_op); | |
42 | return ret; | |
43 | } | |
44 | ||
45 | /* | |
46 | * Post and wait for the I/O upcall to finish | |
47 | */ | |
48 | ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode, | |
49 | loff_t *offset, struct iov_iter *iter, size_t total_size, | |
50 | loff_t readahead_size, struct orangefs_write_range *wr, | |
51 | int *index_return, struct file *file) | |
52 | { | |
53 | struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); | |
54 | struct orangefs_khandle *handle = &orangefs_inode->refn.khandle; | |
55 | struct orangefs_kernel_op_s *new_op = NULL; | |
56 | int buffer_index; | |
57 | ssize_t ret; | |
58 | size_t copy_amount; | |
59 | int open_for_read; | |
60 | int open_for_write; | |
61 | ||
62 | new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO); | |
63 | if (!new_op) | |
64 | return -ENOMEM; | |
65 | ||
66 | /* synchronous I/O */ | |
67 | new_op->upcall.req.io.readahead_size = readahead_size; | |
68 | new_op->upcall.req.io.io_type = type; | |
69 | new_op->upcall.req.io.refn = orangefs_inode->refn; | |
70 | ||
71 | populate_shared_memory: | |
72 | /* get a shared buffer index */ | |
73 | buffer_index = orangefs_bufmap_get(); | |
74 | if (buffer_index < 0) { | |
75 | ret = buffer_index; | |
76 | gossip_debug(GOSSIP_FILE_DEBUG, | |
77 | "%s: orangefs_bufmap_get failure (%zd)\n", | |
78 | __func__, ret); | |
79 | goto out; | |
80 | } | |
81 | gossip_debug(GOSSIP_FILE_DEBUG, | |
82 | "%s(%pU): GET op %p -> buffer_index %d\n", | |
83 | __func__, | |
84 | handle, | |
85 | new_op, | |
86 | buffer_index); | |
87 | ||
88 | new_op->uses_shared_memory = 1; | |
89 | new_op->upcall.req.io.buf_index = buffer_index; | |
90 | new_op->upcall.req.io.count = total_size; | |
91 | new_op->upcall.req.io.offset = *offset; | |
92 | if (type == ORANGEFS_IO_WRITE && wr) { | |
93 | new_op->upcall.uid = from_kuid(&init_user_ns, wr->uid); | |
94 | new_op->upcall.gid = from_kgid(&init_user_ns, wr->gid); | |
95 | } | |
96 | /* | |
97 | * Orangefs has no open, and orangefs checks file permissions | |
98 | * on each file access. Posix requires that file permissions | |
99 | * be checked on open and nowhere else. Orangefs-through-the-kernel | |
100 | * needs to seem posix compliant. | |
101 | * | |
102 | * The VFS opens files, even if the filesystem provides no | |
103 | * method. We can see if a file was successfully opened for | |
104 | * read and or for write by looking at file->f_mode. | |
105 | * | |
106 | * When writes are flowing from the page cache, file is no | |
107 | * longer available. We can trust the VFS to have checked | |
108 | * file->f_mode before writing to the page cache. | |
109 | * | |
110 | * The mode of a file might change between when it is opened | |
111 | * and IO commences, or it might be created with an arbitrary mode. | |
112 | * | |
113 | * We'll make sure we don't hit EACCES during the IO stage by | |
114 | * using UID 0. Some of the time we have access without changing | |
115 | * to UID 0 - how to check? | |
116 | */ | |
117 | if (file) { | |
118 | open_for_write = file->f_mode & FMODE_WRITE; | |
119 | open_for_read = file->f_mode & FMODE_READ; | |
120 | } else { | |
121 | open_for_write = 1; | |
122 | open_for_read = 0; /* not relevant? */ | |
123 | } | |
124 | if ((type == ORANGEFS_IO_WRITE) && open_for_write) | |
125 | new_op->upcall.uid = 0; | |
126 | if ((type == ORANGEFS_IO_READ) && open_for_read) | |
127 | new_op->upcall.uid = 0; | |
128 | ||
129 | gossip_debug(GOSSIP_FILE_DEBUG, | |
130 | "%s(%pU): offset: %llu total_size: %zd\n", | |
131 | __func__, | |
132 | handle, | |
133 | llu(*offset), | |
134 | total_size); | |
135 | /* | |
136 | * Stage 1: copy the buffers into client-core's address space | |
137 | */ | |
138 | if (type == ORANGEFS_IO_WRITE && total_size) { | |
139 | ret = orangefs_bufmap_copy_from_iovec(iter, buffer_index, | |
140 | total_size); | |
141 | if (ret < 0) { | |
142 | gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n", | |
143 | __func__, (long)ret); | |
144 | goto out; | |
145 | } | |
146 | } | |
147 | ||
148 | gossip_debug(GOSSIP_FILE_DEBUG, | |
149 | "%s(%pU): Calling post_io_request with tag (%llu)\n", | |
150 | __func__, | |
151 | handle, | |
152 | llu(new_op->tag)); | |
153 | ||
154 | /* Stage 2: Service the I/O operation */ | |
155 | ret = service_operation(new_op, | |
156 | type == ORANGEFS_IO_WRITE ? | |
157 | "file_write" : | |
158 | "file_read", | |
159 | get_interruptible_flag(inode)); | |
160 | ||
161 | /* | |
162 | * If service_operation() returns -EAGAIN #and# the operation was | |
163 | * purged from orangefs_request_list or htable_ops_in_progress, then | |
164 | * we know that the client was restarted, causing the shared memory | |
165 | * area to be wiped clean. To restart a write operation in this | |
166 | * case, we must re-copy the data from the user's iovec to a NEW | |
167 | * shared memory location. To restart a read operation, we must get | |
168 | * a new shared memory location. | |
169 | */ | |
170 | if (ret == -EAGAIN && op_state_purged(new_op)) { | |
171 | orangefs_bufmap_put(buffer_index); | |
172 | if (type == ORANGEFS_IO_WRITE) | |
173 | iov_iter_revert(iter, total_size); | |
174 | gossip_debug(GOSSIP_FILE_DEBUG, | |
175 | "%s:going to repopulate_shared_memory.\n", | |
176 | __func__); | |
177 | goto populate_shared_memory; | |
178 | } | |
179 | ||
180 | if (ret < 0) { | |
181 | if (ret == -EINTR) { | |
182 | /* | |
183 | * We can't return EINTR if any data was written, | |
184 | * it's not POSIX. It is minimally acceptable | |
185 | * to give a partial write, the way NFS does. | |
186 | * | |
187 | * It would be optimal to return all or nothing, | |
188 | * but if a userspace write is bigger than | |
189 | * an IO buffer, and the interrupt occurs | |
190 | * between buffer writes, that would not be | |
191 | * possible. | |
192 | */ | |
193 | switch (new_op->op_state - OP_VFS_STATE_GIVEN_UP) { | |
194 | /* | |
195 | * If the op was waiting when the interrupt | |
196 | * occurred, then the client-core did not | |
197 | * trigger the write. | |
198 | */ | |
199 | case OP_VFS_STATE_WAITING: | |
200 | if (*offset == 0) | |
201 | ret = -EINTR; | |
202 | else | |
203 | ret = 0; | |
204 | break; | |
205 | /* | |
206 | * If the op was in progress when the interrupt | |
207 | * occurred, then the client-core was able to | |
208 | * trigger the write. | |
209 | */ | |
210 | case OP_VFS_STATE_INPROGR: | |
211 | if (type == ORANGEFS_IO_READ) | |
212 | ret = -EINTR; | |
213 | else | |
214 | ret = total_size; | |
215 | break; | |
216 | default: | |
217 | gossip_err("%s: unexpected op state :%d:.\n", | |
218 | __func__, | |
219 | new_op->op_state); | |
220 | ret = 0; | |
221 | break; | |
222 | } | |
223 | gossip_debug(GOSSIP_FILE_DEBUG, | |
224 | "%s: got EINTR, state:%d: %p\n", | |
225 | __func__, | |
226 | new_op->op_state, | |
227 | new_op); | |
228 | } else { | |
229 | gossip_err("%s: error in %s handle %pU, returning %zd\n", | |
230 | __func__, | |
231 | type == ORANGEFS_IO_READ ? | |
232 | "read from" : "write to", | |
233 | handle, ret); | |
234 | } | |
235 | if (orangefs_cancel_op_in_progress(new_op)) | |
236 | return ret; | |
237 | ||
238 | goto out; | |
239 | } | |
240 | ||
241 | /* | |
242 | * Stage 3: Post copy buffers from client-core's address space | |
243 | */ | |
244 | if (type == ORANGEFS_IO_READ && new_op->downcall.resp.io.amt_complete) { | |
245 | /* | |
246 | * NOTE: the iovector can either contain addresses which | |
247 | * can futher be kernel-space or user-space addresses. | |
248 | * or it can pointers to struct page's | |
249 | */ | |
250 | ||
251 | /* | |
252 | * When reading, readahead_size will only be zero when | |
253 | * we're doing O_DIRECT, otherwise we got here from | |
254 | * orangefs_readpage. | |
255 | * | |
256 | * If we got here from orangefs_readpage we want to | |
257 | * copy either a page or the whole file into the io | |
258 | * vector, whichever is smaller. | |
259 | */ | |
260 | if (readahead_size) | |
261 | copy_amount = | |
262 | min(new_op->downcall.resp.io.amt_complete, | |
263 | (__s64)PAGE_SIZE); | |
264 | else | |
265 | copy_amount = new_op->downcall.resp.io.amt_complete; | |
266 | ||
267 | ret = orangefs_bufmap_copy_to_iovec(iter, buffer_index, | |
268 | copy_amount); | |
269 | if (ret < 0) { | |
270 | gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n", | |
271 | __func__, (long)ret); | |
272 | goto out; | |
273 | } | |
274 | } | |
275 | gossip_debug(GOSSIP_FILE_DEBUG, | |
276 | "%s(%pU): Amount %s, returned by the sys-io call:%d\n", | |
277 | __func__, | |
278 | handle, | |
279 | type == ORANGEFS_IO_READ ? "read" : "written", | |
280 | (int)new_op->downcall.resp.io.amt_complete); | |
281 | ||
282 | ret = new_op->downcall.resp.io.amt_complete; | |
283 | ||
284 | out: | |
285 | if (buffer_index >= 0) { | |
286 | if ((readahead_size) && (type == ORANGEFS_IO_READ)) { | |
287 | /* readpage */ | |
288 | *index_return = buffer_index; | |
289 | gossip_debug(GOSSIP_FILE_DEBUG, | |
290 | "%s: hold on to buffer_index :%d:\n", | |
291 | __func__, buffer_index); | |
292 | } else { | |
293 | /* O_DIRECT */ | |
294 | orangefs_bufmap_put(buffer_index); | |
295 | gossip_debug(GOSSIP_FILE_DEBUG, | |
296 | "%s(%pU): PUT buffer_index %d\n", | |
297 | __func__, handle, buffer_index); | |
298 | } | |
299 | } | |
300 | op_release(new_op); | |
301 | return ret; | |
302 | } | |
303 | ||
304 | int orangefs_revalidate_mapping(struct inode *inode) | |
305 | { | |
306 | struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); | |
307 | struct address_space *mapping = inode->i_mapping; | |
308 | unsigned long *bitlock = &orangefs_inode->bitlock; | |
309 | int ret; | |
310 | ||
311 | while (1) { | |
312 | ret = wait_on_bit(bitlock, 1, TASK_KILLABLE); | |
313 | if (ret) | |
314 | return ret; | |
315 | spin_lock(&inode->i_lock); | |
316 | if (test_bit(1, bitlock)) { | |
317 | spin_unlock(&inode->i_lock); | |
318 | continue; | |
319 | } | |
320 | if (!time_before(jiffies, orangefs_inode->mapping_time)) | |
321 | break; | |
322 | spin_unlock(&inode->i_lock); | |
323 | return 0; | |
324 | } | |
325 | ||
326 | set_bit(1, bitlock); | |
327 | smp_wmb(); | |
328 | spin_unlock(&inode->i_lock); | |
329 | ||
330 | unmap_mapping_range(mapping, 0, 0, 0); | |
331 | ret = filemap_write_and_wait(mapping); | |
332 | if (!ret) | |
333 | ret = invalidate_inode_pages2(mapping); | |
334 | ||
335 | orangefs_inode->mapping_time = jiffies + | |
336 | orangefs_cache_timeout_msecs*HZ/1000; | |
337 | ||
338 | clear_bit(1, bitlock); | |
339 | smp_mb__after_atomic(); | |
340 | wake_up_bit(bitlock, 1); | |
341 | ||
342 | return ret; | |
343 | } | |
344 | ||
345 | static ssize_t orangefs_file_read_iter(struct kiocb *iocb, | |
346 | struct iov_iter *iter) | |
347 | { | |
348 | int ret; | |
349 | orangefs_stats.reads++; | |
350 | ||
351 | down_read(&file_inode(iocb->ki_filp)->i_rwsem); | |
352 | ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp)); | |
353 | if (ret) | |
354 | goto out; | |
355 | ||
356 | ret = generic_file_read_iter(iocb, iter); | |
357 | out: | |
358 | up_read(&file_inode(iocb->ki_filp)->i_rwsem); | |
359 | return ret; | |
360 | } | |
361 | ||
362 | static ssize_t orangefs_file_write_iter(struct kiocb *iocb, | |
363 | struct iov_iter *iter) | |
364 | { | |
365 | int ret; | |
366 | orangefs_stats.writes++; | |
367 | ||
368 | if (iocb->ki_pos > i_size_read(file_inode(iocb->ki_filp))) { | |
369 | ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp)); | |
370 | if (ret) | |
371 | return ret; | |
372 | } | |
373 | ||
374 | ret = generic_file_write_iter(iocb, iter); | |
375 | return ret; | |
376 | } | |
377 | ||
378 | static int orangefs_getflags(struct inode *inode, unsigned long *uval) | |
379 | { | |
380 | __u64 val = 0; | |
381 | int ret; | |
382 | ||
383 | ret = orangefs_inode_getxattr(inode, | |
384 | "user.pvfs2.meta_hint", | |
385 | &val, sizeof(val)); | |
386 | if (ret < 0 && ret != -ENODATA) | |
387 | return ret; | |
388 | else if (ret == -ENODATA) | |
389 | val = 0; | |
390 | *uval = val; | |
391 | return 0; | |
392 | } | |
393 | ||
394 | /* | |
395 | * Perform a miscellaneous operation on a file. | |
396 | */ | |
397 | static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |
398 | { | |
399 | struct inode *inode = file_inode(file); | |
400 | int ret = -ENOTTY; | |
401 | __u64 val = 0; | |
402 | unsigned long uval; | |
403 | ||
404 | gossip_debug(GOSSIP_FILE_DEBUG, | |
405 | "orangefs_ioctl: called with cmd %d\n", | |
406 | cmd); | |
407 | ||
408 | /* | |
409 | * we understand some general ioctls on files, such as the immutable | |
410 | * and append flags | |
411 | */ | |
412 | if (cmd == FS_IOC_GETFLAGS) { | |
413 | ret = orangefs_getflags(inode, &uval); | |
414 | if (ret) | |
415 | return ret; | |
416 | gossip_debug(GOSSIP_FILE_DEBUG, | |
417 | "orangefs_ioctl: FS_IOC_GETFLAGS: %llu\n", | |
418 | (unsigned long long)uval); | |
419 | return put_user(uval, (int __user *)arg); | |
420 | } else if (cmd == FS_IOC_SETFLAGS) { | |
421 | unsigned long old_uval; | |
422 | ||
423 | ret = 0; | |
424 | if (get_user(uval, (int __user *)arg)) | |
425 | return -EFAULT; | |
426 | /* | |
427 | * ORANGEFS_MIRROR_FL is set internally when the mirroring mode | |
428 | * is turned on for a file. The user is not allowed to turn | |
429 | * on this bit, but the bit is present if the user first gets | |
430 | * the flags and then updates the flags with some new | |
431 | * settings. So, we ignore it in the following edit. bligon. | |
432 | */ | |
433 | if ((uval & ~ORANGEFS_MIRROR_FL) & | |
434 | (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) { | |
435 | gossip_err("orangefs_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n"); | |
436 | return -EINVAL; | |
437 | } | |
438 | ret = orangefs_getflags(inode, &old_uval); | |
439 | if (ret) | |
440 | return ret; | |
441 | ret = vfs_ioc_setflags_prepare(inode, old_uval, uval); | |
442 | if (ret) | |
443 | return ret; | |
444 | val = uval; | |
445 | gossip_debug(GOSSIP_FILE_DEBUG, | |
446 | "orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n", | |
447 | (unsigned long long)val); | |
448 | ret = orangefs_inode_setxattr(inode, | |
449 | "user.pvfs2.meta_hint", | |
450 | &val, sizeof(val), 0); | |
451 | } | |
452 | ||
453 | return ret; | |
454 | } | |
455 | ||
456 | static vm_fault_t orangefs_fault(struct vm_fault *vmf) | |
457 | { | |
458 | struct file *file = vmf->vma->vm_file; | |
459 | int ret; | |
460 | ret = orangefs_inode_getattr(file->f_mapping->host, | |
461 | ORANGEFS_GETATTR_SIZE); | |
462 | if (ret == -ESTALE) | |
463 | ret = -EIO; | |
464 | if (ret) { | |
465 | gossip_err("%s: orangefs_inode_getattr failed, " | |
466 | "ret:%d:.\n", __func__, ret); | |
467 | return VM_FAULT_SIGBUS; | |
468 | } | |
469 | return filemap_fault(vmf); | |
470 | } | |
471 | ||
472 | static const struct vm_operations_struct orangefs_file_vm_ops = { | |
473 | .fault = orangefs_fault, | |
474 | .map_pages = filemap_map_pages, | |
475 | .page_mkwrite = orangefs_page_mkwrite, | |
476 | }; | |
477 | ||
478 | /* | |
479 | * Memory map a region of a file. | |
480 | */ | |
481 | static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma) | |
482 | { | |
483 | int ret; | |
484 | ||
485 | ret = orangefs_revalidate_mapping(file_inode(file)); | |
486 | if (ret) | |
487 | return ret; | |
488 | ||
489 | gossip_debug(GOSSIP_FILE_DEBUG, | |
490 | "orangefs_file_mmap: called on %s\n", | |
491 | (file ? | |
492 | (char *)file->f_path.dentry->d_name.name : | |
493 | (char *)"Unknown")); | |
494 | ||
495 | /* set the sequential readahead hint */ | |
496 | vma->vm_flags |= VM_SEQ_READ; | |
497 | vma->vm_flags &= ~VM_RAND_READ; | |
498 | ||
499 | file_accessed(file); | |
500 | vma->vm_ops = &orangefs_file_vm_ops; | |
501 | return 0; | |
502 | } | |
503 | ||
504 | #define mapping_nrpages(idata) ((idata)->nrpages) | |
505 | ||
506 | /* | |
507 | * Called to notify the module that there are no more references to | |
508 | * this file (i.e. no processes have it open). | |
509 | * | |
510 | * \note Not called when each file is closed. | |
511 | */ | |
512 | static int orangefs_file_release(struct inode *inode, struct file *file) | |
513 | { | |
514 | gossip_debug(GOSSIP_FILE_DEBUG, | |
515 | "orangefs_file_release: called on %pD\n", | |
516 | file); | |
517 | ||
518 | /* | |
519 | * remove all associated inode pages from the page cache and | |
520 | * readahead cache (if any); this forces an expensive refresh of | |
521 | * data for the next caller of mmap (or 'get_block' accesses) | |
522 | */ | |
523 | if (file_inode(file) && | |
524 | file_inode(file)->i_mapping && | |
525 | mapping_nrpages(&file_inode(file)->i_data)) { | |
526 | if (orangefs_features & ORANGEFS_FEATURE_READAHEAD) { | |
527 | gossip_debug(GOSSIP_INODE_DEBUG, | |
528 | "calling flush_racache on %pU\n", | |
529 | get_khandle_from_ino(inode)); | |
530 | flush_racache(inode); | |
531 | gossip_debug(GOSSIP_INODE_DEBUG, | |
532 | "flush_racache finished\n"); | |
533 | } | |
534 | ||
535 | } | |
536 | return 0; | |
537 | } | |
538 | ||
539 | /* | |
540 | * Push all data for a specific file onto permanent storage. | |
541 | */ | |
542 | static int orangefs_fsync(struct file *file, | |
543 | loff_t start, | |
544 | loff_t end, | |
545 | int datasync) | |
546 | { | |
547 | int ret; | |
548 | struct orangefs_inode_s *orangefs_inode = | |
549 | ORANGEFS_I(file_inode(file)); | |
550 | struct orangefs_kernel_op_s *new_op = NULL; | |
551 | ||
552 | ret = filemap_write_and_wait_range(file_inode(file)->i_mapping, | |
553 | start, end); | |
554 | if (ret < 0) | |
555 | return ret; | |
556 | ||
557 | new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC); | |
558 | if (!new_op) | |
559 | return -ENOMEM; | |
560 | new_op->upcall.req.fsync.refn = orangefs_inode->refn; | |
561 | ||
562 | ret = service_operation(new_op, | |
563 | "orangefs_fsync", | |
564 | get_interruptible_flag(file_inode(file))); | |
565 | ||
566 | gossip_debug(GOSSIP_FILE_DEBUG, | |
567 | "orangefs_fsync got return value of %d\n", | |
568 | ret); | |
569 | ||
570 | op_release(new_op); | |
571 | return ret; | |
572 | } | |
573 | ||
574 | /* | |
575 | * Change the file pointer position for an instance of an open file. | |
576 | * | |
577 | * \note If .llseek is overriden, we must acquire lock as described in | |
578 | * Documentation/filesystems/locking.rst. | |
579 | * | |
580 | * Future upgrade could support SEEK_DATA and SEEK_HOLE but would | |
581 | * require much changes to the FS | |
582 | */ | |
583 | static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin) | |
584 | { | |
585 | int ret = -EINVAL; | |
586 | struct inode *inode = file_inode(file); | |
587 | ||
588 | if (origin == SEEK_END) { | |
589 | /* | |
590 | * revalidate the inode's file size. | |
591 | * NOTE: We are only interested in file size here, | |
592 | * so we set mask accordingly. | |
593 | */ | |
594 | ret = orangefs_inode_getattr(file->f_mapping->host, | |
595 | ORANGEFS_GETATTR_SIZE); | |
596 | if (ret == -ESTALE) | |
597 | ret = -EIO; | |
598 | if (ret) { | |
599 | gossip_debug(GOSSIP_FILE_DEBUG, | |
600 | "%s:%s:%d calling make bad inode\n", | |
601 | __FILE__, | |
602 | __func__, | |
603 | __LINE__); | |
604 | return ret; | |
605 | } | |
606 | } | |
607 | ||
608 | gossip_debug(GOSSIP_FILE_DEBUG, | |
609 | "orangefs_file_llseek: offset is %ld | origin is %d" | |
610 | " | inode size is %lu\n", | |
611 | (long)offset, | |
612 | origin, | |
613 | (unsigned long)i_size_read(inode)); | |
614 | ||
615 | return generic_file_llseek(file, offset, origin); | |
616 | } | |
617 | ||
618 | /* | |
619 | * Support local locks (locks that only this kernel knows about) | |
620 | * if Orangefs was mounted -o local_lock. | |
621 | */ | |
622 | static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl) | |
623 | { | |
624 | int rc = -EINVAL; | |
625 | ||
626 | if (ORANGEFS_SB(file_inode(filp)->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) { | |
627 | if (cmd == F_GETLK) { | |
628 | rc = 0; | |
629 | posix_test_lock(filp, fl); | |
630 | } else { | |
631 | rc = posix_lock_file(filp, fl, NULL); | |
632 | } | |
633 | } | |
634 | ||
635 | return rc; | |
636 | } | |
637 | ||
638 | static int orangefs_flush(struct file *file, fl_owner_t id) | |
639 | { | |
640 | /* | |
641 | * This is vfs_fsync_range(file, 0, LLONG_MAX, 0) without the | |
642 | * service_operation in orangefs_fsync. | |
643 | * | |
644 | * Do not send fsync to OrangeFS server on a close. Do send fsync | |
645 | * on an explicit fsync call. This duplicates historical OrangeFS | |
646 | * behavior. | |
647 | */ | |
648 | int r; | |
649 | ||
650 | r = filemap_write_and_wait_range(file->f_mapping, 0, LLONG_MAX); | |
651 | if (r > 0) | |
652 | return 0; | |
653 | else | |
654 | return r; | |
655 | } | |
656 | ||
657 | /** ORANGEFS implementation of VFS file operations */ | |
658 | const struct file_operations orangefs_file_operations = { | |
659 | .llseek = orangefs_file_llseek, | |
660 | .read_iter = orangefs_file_read_iter, | |
661 | .write_iter = orangefs_file_write_iter, | |
662 | .lock = orangefs_lock, | |
663 | .unlocked_ioctl = orangefs_ioctl, | |
664 | .mmap = orangefs_file_mmap, | |
665 | .open = generic_file_open, | |
666 | .splice_read = generic_file_splice_read, | |
667 | .splice_write = iter_file_splice_write, | |
668 | .flush = orangefs_flush, | |
669 | .release = orangefs_file_release, | |
670 | .fsync = orangefs_fsync, | |
671 | }; |