1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
15 #include "mds_client.h"
18 static __le32 ceph_flags_sys2wire(u32 flags)
22 switch (flags & O_ACCMODE) {
24 wire_flags |= CEPH_O_RDONLY;
27 wire_flags |= CEPH_O_WRONLY;
30 wire_flags |= CEPH_O_RDWR;
36 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
38 ceph_sys2wire(O_CREAT);
39 ceph_sys2wire(O_EXCL);
40 ceph_sys2wire(O_TRUNC);
41 ceph_sys2wire(O_DIRECTORY);
42 ceph_sys2wire(O_NOFOLLOW);
47 dout("unused open flags: %x\n", flags);
49 return cpu_to_le32(wire_flags);
53 * Ceph file operations
55 * Implement basic open/close functionality, and implement
58 * We implement three modes of file I/O:
59 * - buffered uses the generic_file_aio_{read,write} helpers
61 * - synchronous is used when there is multi-client read/write
62 * sharing, avoids the page cache, and synchronously waits for an
65 * - direct io takes the variant of the sync path that references
66 * user pages directly.
68 * fsync() flushes and waits on dirty pages, but just queues metadata
69 * for writeback: since the MDS can recover size and mtime there is no
70 * need to wait for MDS acknowledgement.
74 * How many pages to get in one call to iov_iter_get_pages(). This
75 * determines the size of the on-stack array used as a buffer.
77 #define ITER_GET_BVECS_PAGES 64
79 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
80 struct bio_vec *bvecs)
85 if (maxsize > iov_iter_count(iter))
86 maxsize = iov_iter_count(iter);
88 while (size < maxsize) {
89 struct page *pages[ITER_GET_BVECS_PAGES];
94 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
95 ITER_GET_BVECS_PAGES, &start);
99 iov_iter_advance(iter, bytes);
102 for ( ; bytes; idx++, bvec_idx++) {
103 struct bio_vec bv = {
104 .bv_page = pages[idx],
105 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
109 bvecs[bvec_idx] = bv;
119 * iov_iter_get_pages() only considers one iov_iter segment, no matter
120 * what maxsize or maxpages are given. For ITER_BVEC that is a single
123 * Attempt to get up to @maxsize bytes worth of pages from @iter.
124 * Return the number of bytes in the created bio_vec array, or an error.
126 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
127 struct bio_vec **bvecs, int *num_bvecs)
130 size_t orig_count = iov_iter_count(iter);
134 iov_iter_truncate(iter, maxsize);
135 npages = iov_iter_npages(iter, INT_MAX);
136 iov_iter_reexpand(iter, orig_count);
139 * __iter_get_bvecs() may populate only part of the array -- zero it
142 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
146 bytes = __iter_get_bvecs(iter, maxsize, bv);
149 * No pages were pinned -- just free the array.
160 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
164 for (i = 0; i < num_bvecs; i++) {
165 if (bvecs[i].bv_page) {
167 set_page_dirty_lock(bvecs[i].bv_page);
168 put_page(bvecs[i].bv_page);
175 * Prepare an open request. Preallocate ceph_cap to avoid an
176 * inopportune ENOMEM later.
178 static struct ceph_mds_request *
179 prepare_open_request(struct super_block *sb, int flags, int create_mode)
181 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
182 struct ceph_mds_client *mdsc = fsc->mdsc;
183 struct ceph_mds_request *req;
184 int want_auth = USE_ANY_MDS;
185 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
187 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
188 want_auth = USE_AUTH_MDS;
190 req = ceph_mdsc_create_request(mdsc, op, want_auth);
193 req->r_fmode = ceph_flags_to_mode(flags);
194 req->r_args.open.flags = ceph_flags_sys2wire(flags);
195 req->r_args.open.mode = cpu_to_le32(create_mode);
200 static int ceph_init_file_info(struct inode *inode, struct file *file,
201 int fmode, bool isdir)
203 struct ceph_file_info *fi;
205 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
206 inode->i_mode, isdir ? "dir" : "regular");
207 BUG_ON(inode->i_fop->release != ceph_release);
210 struct ceph_dir_file_info *dfi =
211 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
213 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
217 file->private_data = dfi;
218 fi = &dfi->file_info;
219 dfi->next_offset = 2;
220 dfi->readdir_cache_idx = -1;
222 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
224 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
228 file->private_data = fi;
232 spin_lock_init(&fi->rw_contexts_lock);
233 INIT_LIST_HEAD(&fi->rw_contexts);
239 * initialize private struct file data.
240 * if we fail, clean up by dropping fmode reference on the ceph_inode
242 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
246 switch (inode->i_mode & S_IFMT) {
248 ceph_fscache_register_inode_cookie(inode);
249 ceph_fscache_file_set_cookie(inode, file);
251 ret = ceph_init_file_info(inode, file, fmode,
252 S_ISDIR(inode->i_mode));
258 dout("init_file %p %p 0%o (symlink)\n", inode, file,
260 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
264 dout("init_file %p %p 0%o (special)\n", inode, file,
267 * we need to drop the open ref now, since we don't
268 * have .release set to ceph_release.
270 ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
271 BUG_ON(inode->i_fop->release == ceph_release);
273 /* call the proper open fop */
274 ret = inode->i_fop->open(inode, file);
280 * try renew caps after session gets killed.
282 int ceph_renew_caps(struct inode *inode)
284 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
285 struct ceph_inode_info *ci = ceph_inode(inode);
286 struct ceph_mds_request *req;
287 int err, flags, wanted;
289 spin_lock(&ci->i_ceph_lock);
290 wanted = __ceph_caps_file_wanted(ci);
291 if (__ceph_is_any_real_caps(ci) &&
292 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
293 int issued = __ceph_caps_issued(ci, NULL);
294 spin_unlock(&ci->i_ceph_lock);
295 dout("renew caps %p want %s issued %s updating mds_wanted\n",
296 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
297 ceph_check_caps(ci, 0, NULL);
300 spin_unlock(&ci->i_ceph_lock);
303 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
305 else if (wanted & CEPH_CAP_FILE_RD)
307 else if (wanted & CEPH_CAP_FILE_WR)
310 if (wanted & CEPH_CAP_FILE_LAZYIO)
314 req = prepare_open_request(inode->i_sb, flags, 0);
320 req->r_inode = inode;
325 err = ceph_mdsc_do_request(mdsc, NULL, req);
326 ceph_mdsc_put_request(req);
328 dout("renew caps %p open result=%d\n", inode, err);
329 return err < 0 ? err : 0;
333 * If we already have the requisite capabilities, we can satisfy
334 * the open request locally (no need to request new caps from the
335 * MDS). We do, however, need to inform the MDS (asynchronously)
336 * if our wanted caps set expands.
338 int ceph_open(struct inode *inode, struct file *file)
340 struct ceph_inode_info *ci = ceph_inode(inode);
341 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
342 struct ceph_mds_client *mdsc = fsc->mdsc;
343 struct ceph_mds_request *req;
344 struct ceph_file_info *fi = file->private_data;
346 int flags, fmode, wanted;
349 dout("open file %p is already opened\n", file);
353 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
354 flags = file->f_flags & ~(O_CREAT|O_EXCL);
355 if (S_ISDIR(inode->i_mode))
356 flags = O_DIRECTORY; /* mds likes to know */
358 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
359 ceph_vinop(inode), file, flags, file->f_flags);
360 fmode = ceph_flags_to_mode(flags);
361 wanted = ceph_caps_for_mode(fmode);
363 /* snapped files are read-only */
364 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
367 /* trivially open snapdir */
368 if (ceph_snap(inode) == CEPH_SNAPDIR) {
369 spin_lock(&ci->i_ceph_lock);
370 __ceph_get_fmode(ci, fmode);
371 spin_unlock(&ci->i_ceph_lock);
372 return ceph_init_file(inode, file, fmode);
376 * No need to block if we have caps on the auth MDS (for
377 * write) or any MDS (for read). Update wanted set
380 spin_lock(&ci->i_ceph_lock);
381 if (__ceph_is_any_real_caps(ci) &&
382 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
383 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
384 int issued = __ceph_caps_issued(ci, NULL);
386 dout("open %p fmode %d want %s issued %s using existing\n",
387 inode, fmode, ceph_cap_string(wanted),
388 ceph_cap_string(issued));
389 __ceph_get_fmode(ci, fmode);
390 spin_unlock(&ci->i_ceph_lock);
393 if ((issued & wanted) != wanted &&
394 (mds_wanted & wanted) != wanted &&
395 ceph_snap(inode) != CEPH_SNAPDIR)
396 ceph_check_caps(ci, 0, NULL);
398 return ceph_init_file(inode, file, fmode);
399 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
400 (ci->i_snap_caps & wanted) == wanted) {
401 __ceph_get_fmode(ci, fmode);
402 spin_unlock(&ci->i_ceph_lock);
403 return ceph_init_file(inode, file, fmode);
406 spin_unlock(&ci->i_ceph_lock);
408 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
409 req = prepare_open_request(inode->i_sb, flags, 0);
414 req->r_inode = inode;
418 err = ceph_mdsc_do_request(mdsc, NULL, req);
420 err = ceph_init_file(inode, file, req->r_fmode);
421 ceph_mdsc_put_request(req);
422 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
429 * Do a lookup + open with a single request. If we get a non-existent
430 * file or symlink, return 1 so the VFS can retry.
432 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
433 struct file *file, unsigned flags, umode_t mode)
435 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
436 struct ceph_mds_client *mdsc = fsc->mdsc;
437 struct ceph_mds_request *req;
439 struct ceph_acls_info acls = {};
443 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
445 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
447 if (dentry->d_name.len > NAME_MAX)
448 return -ENAMETOOLONG;
450 if (flags & O_CREAT) {
451 if (ceph_quota_is_max_files_exceeded(dir))
453 err = ceph_pre_init_acls(dir, &mode, &acls);
459 req = prepare_open_request(dir->i_sb, flags, mode);
464 req->r_dentry = dget(dentry);
466 if (flags & O_CREAT) {
467 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
468 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
470 req->r_pagelist = acls.pagelist;
471 acls.pagelist = NULL;
475 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
476 if (ceph_security_xattr_wanted(dir))
477 mask |= CEPH_CAP_XATTR_SHARED;
478 req->r_args.open.mask = cpu_to_le32(mask);
481 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
482 err = ceph_mdsc_do_request(mdsc,
483 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
485 err = ceph_handle_snapdir(req, dentry, err);
489 if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
490 err = ceph_handle_notrace_create(dir, dentry);
492 if (d_in_lookup(dentry)) {
493 dn = ceph_finish_lookup(req, dentry, err);
497 /* we were given a hashed negative dentry */
502 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
503 /* make vfs retry on splice, ENOENT, or symlink */
504 dout("atomic_open finish_no_open on dn %p\n", dn);
505 err = finish_no_open(file, dn);
507 dout("atomic_open finish_open on dn %p\n", dn);
508 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
509 ceph_init_inode_acls(d_inode(dentry), &acls);
510 file->f_mode |= FMODE_CREATED;
512 err = finish_open(file, dentry, ceph_open);
515 if (!req->r_err && req->r_target_inode)
516 ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
517 ceph_mdsc_put_request(req);
519 ceph_release_acls_info(&acls);
520 dout("atomic_open result=%d\n", err);
524 int ceph_release(struct inode *inode, struct file *file)
526 struct ceph_inode_info *ci = ceph_inode(inode);
528 if (S_ISDIR(inode->i_mode)) {
529 struct ceph_dir_file_info *dfi = file->private_data;
530 dout("release inode %p dir file %p\n", inode, file);
531 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
533 ceph_put_fmode(ci, dfi->file_info.fmode);
535 if (dfi->last_readdir)
536 ceph_mdsc_put_request(dfi->last_readdir);
537 kfree(dfi->last_name);
538 kfree(dfi->dir_info);
539 kmem_cache_free(ceph_dir_file_cachep, dfi);
541 struct ceph_file_info *fi = file->private_data;
542 dout("release inode %p regular file %p\n", inode, file);
543 WARN_ON(!list_empty(&fi->rw_contexts));
545 ceph_put_fmode(ci, fi->fmode);
546 kmem_cache_free(ceph_file_cachep, fi);
549 /* wake up anyone waiting for caps on this inode */
550 wake_up_all(&ci->i_cap_wq);
561 * Completely synchronous read and write methods. Direct from __user
562 * buffer to osd, or directly to user pages (if O_DIRECT).
564 * If the read spans object boundary, just do multiple reads. (That's not
565 * atomic, but good enough for now.)
567 * If we get a short result from the OSD, check against i_size; we need to
568 * only return a short read to the caller if we hit EOF.
570 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
573 struct file *file = iocb->ki_filp;
574 struct inode *inode = file_inode(file);
575 struct ceph_inode_info *ci = ceph_inode(inode);
576 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
577 struct ceph_osd_client *osdc = &fsc->client->osdc;
579 u64 off = iocb->ki_pos;
580 u64 len = iov_iter_count(to);
582 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
583 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
588 * flush any page cache pages in this range. this
589 * will make concurrent normal and sync io slow,
590 * but it will at least behave sensibly when they are
593 ret = filemap_write_and_wait_range(inode->i_mapping, off, off + len);
598 while ((len = iov_iter_count(to)) > 0) {
599 struct ceph_osd_request *req;
606 req = ceph_osdc_new_request(osdc, &ci->i_layout,
607 ci->i_vino, off, &len, 0, 1,
608 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
609 NULL, ci->i_truncate_seq,
610 ci->i_truncate_size, false);
616 more = len < iov_iter_count(to);
618 if (unlikely(iov_iter_is_pipe(to))) {
619 ret = iov_iter_get_pages_alloc(to, &pages, len,
622 ceph_osdc_put_request(req);
626 num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
629 osd_req_op_extent_update(req, 0, len);
633 num_pages = calc_pages_for(off, len);
634 page_off = off & ~PAGE_MASK;
635 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
637 ceph_osdc_put_request(req);
638 ret = PTR_ERR(pages);
643 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
645 ret = ceph_osdc_start_request(osdc, req, false);
647 ret = ceph_osdc_wait_request(osdc, req);
648 ceph_osdc_put_request(req);
650 i_size = i_size_read(inode);
651 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
652 off, len, ret, i_size, (more ? " MORE" : ""));
656 if (ret >= 0 && ret < len && (off + ret < i_size)) {
657 int zlen = min(len - ret, i_size - off - ret);
658 int zoff = page_off + ret;
659 dout("sync_read zero gap %llu~%llu\n",
660 off + ret, off + ret + zlen);
661 ceph_zero_page_vector_range(zoff, zlen, pages);
665 if (unlikely(iov_iter_is_pipe(to))) {
667 iov_iter_advance(to, ret);
670 iov_iter_advance(to, 0);
672 ceph_put_page_vector(pages, num_pages, false);
675 size_t left = ret > 0 ? ret : 0;
678 page_off = off & ~PAGE_MASK;
679 len = min_t(size_t, left, PAGE_SIZE - page_off);
680 copied = copy_page_to_iter(pages[idx++],
689 ceph_release_page_vector(pages, num_pages);
692 if (ret <= 0 || off >= i_size || !more)
696 if (off > iocb->ki_pos) {
698 iov_iter_count(to) > 0 && off >= i_size_read(inode))
699 *retry_op = CHECK_EOF;
700 ret = off - iocb->ki_pos;
704 dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
708 struct ceph_aio_request {
714 struct list_head osd_reqs;
716 atomic_t pending_reqs;
717 struct timespec64 mtime;
718 struct ceph_cap_flush *prealloc_cf;
721 struct ceph_aio_work {
722 struct work_struct work;
723 struct ceph_osd_request *req;
726 static void ceph_aio_retry_work(struct work_struct *work);
728 static void ceph_aio_complete(struct inode *inode,
729 struct ceph_aio_request *aio_req)
731 struct ceph_inode_info *ci = ceph_inode(inode);
734 if (!atomic_dec_and_test(&aio_req->pending_reqs))
737 ret = aio_req->error;
739 ret = aio_req->total_len;
741 dout("ceph_aio_complete %p rc %d\n", inode, ret);
743 if (ret >= 0 && aio_req->write) {
746 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
747 if (endoff > i_size_read(inode)) {
748 if (ceph_inode_set_size(inode, endoff))
749 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
752 spin_lock(&ci->i_ceph_lock);
753 ci->i_inline_version = CEPH_INLINE_NONE;
754 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
755 &aio_req->prealloc_cf);
756 spin_unlock(&ci->i_ceph_lock);
758 __mark_inode_dirty(inode, dirty);
762 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
765 aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
767 ceph_free_cap_flush(aio_req->prealloc_cf);
771 static void ceph_aio_complete_req(struct ceph_osd_request *req)
773 int rc = req->r_result;
774 struct inode *inode = req->r_inode;
775 struct ceph_aio_request *aio_req = req->r_priv;
776 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
778 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
779 BUG_ON(!osd_data->num_bvecs);
781 dout("ceph_aio_complete_req %p rc %d bytes %u\n",
782 inode, rc, osd_data->bvec_pos.iter.bi_size);
784 if (rc == -EOLDSNAPC) {
785 struct ceph_aio_work *aio_work;
786 BUG_ON(!aio_req->write);
788 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
790 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
792 queue_work(ceph_inode_to_client(inode)->wb_wq,
797 } else if (!aio_req->write) {
800 if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
802 int zlen = osd_data->bvec_pos.iter.bi_size - rc;
805 * If read is satisfied by single OSD request,
806 * it can pass EOF. Otherwise read is within
809 if (aio_req->num_reqs == 1) {
810 loff_t i_size = i_size_read(inode);
811 loff_t endoff = aio_req->iocb->ki_pos + rc;
813 zlen = min_t(size_t, zlen,
815 aio_req->total_len = rc + zlen;
818 iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
820 osd_data->bvec_pos.iter.bi_size);
821 iov_iter_advance(&i, rc);
822 iov_iter_zero(zlen, &i);
826 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
827 aio_req->should_dirty);
828 ceph_osdc_put_request(req);
831 cmpxchg(&aio_req->error, 0, rc);
833 ceph_aio_complete(inode, aio_req);
837 static void ceph_aio_retry_work(struct work_struct *work)
839 struct ceph_aio_work *aio_work =
840 container_of(work, struct ceph_aio_work, work);
841 struct ceph_osd_request *orig_req = aio_work->req;
842 struct ceph_aio_request *aio_req = orig_req->r_priv;
843 struct inode *inode = orig_req->r_inode;
844 struct ceph_inode_info *ci = ceph_inode(inode);
845 struct ceph_snap_context *snapc;
846 struct ceph_osd_request *req;
849 spin_lock(&ci->i_ceph_lock);
850 if (__ceph_have_pending_cap_snap(ci)) {
851 struct ceph_cap_snap *capsnap =
852 list_last_entry(&ci->i_cap_snaps,
853 struct ceph_cap_snap,
855 snapc = ceph_get_snap_context(capsnap->context);
857 BUG_ON(!ci->i_head_snapc);
858 snapc = ceph_get_snap_context(ci->i_head_snapc);
860 spin_unlock(&ci->i_ceph_lock);
862 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
870 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
871 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
872 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
874 req->r_ops[0] = orig_req->r_ops[0];
876 req->r_mtime = aio_req->mtime;
877 req->r_data_offset = req->r_ops[0].extent.offset;
879 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
881 ceph_osdc_put_request(req);
886 ceph_osdc_put_request(orig_req);
888 req->r_callback = ceph_aio_complete_req;
889 req->r_inode = inode;
890 req->r_priv = aio_req;
892 ret = ceph_osdc_start_request(req->r_osdc, req, false);
896 ceph_aio_complete_req(req);
899 ceph_put_snap_context(snapc);
904 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
905 struct ceph_snap_context *snapc,
906 struct ceph_cap_flush **pcf)
908 struct file *file = iocb->ki_filp;
909 struct inode *inode = file_inode(file);
910 struct ceph_inode_info *ci = ceph_inode(inode);
911 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
912 struct ceph_vino vino;
913 struct ceph_osd_request *req;
914 struct bio_vec *bvecs;
915 struct ceph_aio_request *aio_req = NULL;
919 struct timespec64 mtime = current_time(inode);
920 size_t count = iov_iter_count(iter);
921 loff_t pos = iocb->ki_pos;
922 bool write = iov_iter_rw(iter) == WRITE;
923 bool should_dirty = !write && iter_is_iovec(iter);
925 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
928 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
929 (write ? "write" : "read"), file, pos, (unsigned)count,
932 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
937 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
939 (pos + count) >> PAGE_SHIFT);
941 dout("invalidate_inode_pages2_range returned %d\n", ret2);
943 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
945 flags = CEPH_OSD_FLAG_READ;
948 while (iov_iter_count(iter) > 0) {
949 u64 size = iov_iter_count(iter);
953 size = min_t(u64, size, fsc->mount_options->wsize);
955 size = min_t(u64, size, fsc->mount_options->rsize);
957 vino = ceph_vino(inode);
958 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
961 write ? CEPH_OSD_OP_WRITE :
972 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
974 ceph_osdc_put_request(req);
979 osd_req_op_extent_update(req, 0, len);
982 * To simplify error handling, allow AIO when IO within i_size
983 * or IO can be satisfied by single OSD request.
985 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
986 (len == count || pos + count <= i_size_read(inode))) {
987 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
989 aio_req->iocb = iocb;
990 aio_req->write = write;
991 aio_req->should_dirty = should_dirty;
992 INIT_LIST_HEAD(&aio_req->osd_reqs);
994 aio_req->mtime = mtime;
995 swap(aio_req->prealloc_cf, *pcf);
1003 * throw out any page cache pages in this range. this
1006 truncate_inode_pages_range(inode->i_mapping, pos,
1007 (pos+len) | (PAGE_SIZE - 1));
1009 req->r_mtime = mtime;
1012 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1015 aio_req->total_len += len;
1016 aio_req->num_reqs++;
1017 atomic_inc(&aio_req->pending_reqs);
1019 req->r_callback = ceph_aio_complete_req;
1020 req->r_inode = inode;
1021 req->r_priv = aio_req;
1022 list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
1028 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1030 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1032 size = i_size_read(inode);
1036 if (ret >= 0 && ret < len && pos + ret < size) {
1038 int zlen = min_t(size_t, len - ret,
1041 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1042 iov_iter_advance(&i, ret);
1043 iov_iter_zero(zlen, &i);
1050 put_bvecs(bvecs, num_pages, should_dirty);
1051 ceph_osdc_put_request(req);
1056 if (!write && pos >= size)
1059 if (write && pos > size) {
1060 if (ceph_inode_set_size(inode, pos))
1061 ceph_check_caps(ceph_inode(inode),
1062 CHECK_CAPS_AUTHONLY,
1068 LIST_HEAD(osd_reqs);
1070 if (aio_req->num_reqs == 0) {
1075 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1078 list_splice(&aio_req->osd_reqs, &osd_reqs);
1079 while (!list_empty(&osd_reqs)) {
1080 req = list_first_entry(&osd_reqs,
1081 struct ceph_osd_request,
1083 list_del_init(&req->r_unsafe_item);
1085 ret = ceph_osdc_start_request(req->r_osdc,
1088 req->r_result = ret;
1089 ceph_aio_complete_req(req);
1092 return -EIOCBQUEUED;
1095 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1096 ret = pos - iocb->ki_pos;
1103 * Synchronous write, straight from __user pointer or user pages.
1105 * If write spans object boundary, just do multiple writes. (For a
1106 * correct atomic write, we should e.g. take write locks on all
1107 * objects, rollback on failure, etc.)
1110 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1111 struct ceph_snap_context *snapc)
1113 struct file *file = iocb->ki_filp;
1114 struct inode *inode = file_inode(file);
1115 struct ceph_inode_info *ci = ceph_inode(inode);
1116 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1117 struct ceph_vino vino;
1118 struct ceph_osd_request *req;
1119 struct page **pages;
1125 bool check_caps = false;
1126 struct timespec64 mtime = current_time(inode);
1127 size_t count = iov_iter_count(from);
1129 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1132 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1133 file, pos, (unsigned)count, snapc, snapc->seq);
1135 ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1139 ret = invalidate_inode_pages2_range(inode->i_mapping,
1141 (pos + count) >> PAGE_SHIFT);
1143 dout("invalidate_inode_pages2_range returned %d\n", ret);
1145 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1147 while ((len = iov_iter_count(from)) > 0) {
1151 vino = ceph_vino(inode);
1152 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1153 vino, pos, &len, 0, 1,
1154 CEPH_OSD_OP_WRITE, flags, snapc,
1156 ci->i_truncate_size,
1164 * write from beginning of first page,
1165 * regardless of io alignment
1167 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1169 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1170 if (IS_ERR(pages)) {
1171 ret = PTR_ERR(pages);
1176 for (n = 0; n < num_pages; n++) {
1177 size_t plen = min_t(size_t, left, PAGE_SIZE);
1178 ret = copy_page_from_iter(pages[n], 0, plen, from);
1187 ceph_release_page_vector(pages, num_pages);
1191 req->r_inode = inode;
1193 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1196 req->r_mtime = mtime;
1197 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1199 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1202 ceph_osdc_put_request(req);
1204 ceph_set_error_write(ci);
1208 ceph_clear_error_write(ci);
1211 if (pos > i_size_read(inode)) {
1212 check_caps = ceph_inode_set_size(inode, pos);
1214 ceph_check_caps(ceph_inode(inode),
1215 CHECK_CAPS_AUTHONLY,
1221 if (ret != -EOLDSNAPC && written > 0) {
1229 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1230 * Atomically grab references, so that those bits are not released
1231 * back to the MDS mid-read.
1233 * Hmm, the sync read case isn't actually async... should it be?
1235 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1237 struct file *filp = iocb->ki_filp;
1238 struct ceph_file_info *fi = filp->private_data;
1239 size_t len = iov_iter_count(to);
1240 struct inode *inode = file_inode(filp);
1241 struct ceph_inode_info *ci = ceph_inode(inode);
1242 struct page *pinned_page = NULL;
1245 int retry_op = 0, read = 0;
1248 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1249 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1251 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1252 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1254 want = CEPH_CAP_FILE_CACHE;
1255 ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1259 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1260 (iocb->ki_flags & IOCB_DIRECT) ||
1261 (fi->flags & CEPH_F_SYNC)) {
1263 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1264 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1265 ceph_cap_string(got));
1267 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1268 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1269 ret = ceph_direct_read_write(iocb, to,
1271 if (ret >= 0 && ret < len)
1272 retry_op = CHECK_EOF;
1274 ret = ceph_sync_read(iocb, to, &retry_op);
1277 retry_op = READ_INLINE;
1280 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1281 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1282 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1283 ceph_cap_string(got));
1284 ceph_add_rw_context(fi, &rw_ctx);
1285 ret = generic_file_read_iter(iocb, to);
1286 ceph_del_rw_context(fi, &rw_ctx);
1288 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1289 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1291 put_page(pinned_page);
1294 ceph_put_cap_refs(ci, got);
1295 if (retry_op > HAVE_RETRIED && ret >= 0) {
1297 struct page *page = NULL;
1299 if (retry_op == READ_INLINE) {
1300 page = __page_cache_alloc(GFP_KERNEL);
1305 statret = __ceph_do_getattr(inode, page,
1306 CEPH_STAT_CAP_INLINE_DATA, !!page);
1310 if (statret == -ENODATA) {
1311 BUG_ON(retry_op != READ_INLINE);
1317 i_size = i_size_read(inode);
1318 if (retry_op == READ_INLINE) {
1319 BUG_ON(ret > 0 || read > 0);
1320 if (iocb->ki_pos < i_size &&
1321 iocb->ki_pos < PAGE_SIZE) {
1322 loff_t end = min_t(loff_t, i_size,
1323 iocb->ki_pos + len);
1324 end = min_t(loff_t, end, PAGE_SIZE);
1326 zero_user_segment(page, statret, end);
1327 ret = copy_page_to_iter(page,
1328 iocb->ki_pos & ~PAGE_MASK,
1329 end - iocb->ki_pos, to);
1330 iocb->ki_pos += ret;
1333 if (iocb->ki_pos < i_size && read < len) {
1334 size_t zlen = min_t(size_t, len - read,
1335 i_size - iocb->ki_pos);
1336 ret = iov_iter_zero(zlen, to);
1337 iocb->ki_pos += ret;
1340 __free_pages(page, 0);
1344 /* hit EOF or hole? */
1345 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1347 dout("sync_read hit hole, ppos %lld < size %lld"
1348 ", reading more\n", iocb->ki_pos, i_size);
1352 retry_op = HAVE_RETRIED;
1364 * Take cap references to avoid releasing caps to MDS mid-write.
1366 * If we are synchronous, and write with an old snap context, the OSD
1367 * may return EOLDSNAPC. In that case, retry the write.. _after_
1368 * dropping our cap refs and allowing the pending snap to logically
1369 * complete _before_ this write occurs.
1371 * If we are near ENOSPC, write synchronously.
1373 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1375 struct file *file = iocb->ki_filp;
1376 struct ceph_file_info *fi = file->private_data;
1377 struct inode *inode = file_inode(file);
1378 struct ceph_inode_info *ci = ceph_inode(inode);
1379 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1380 struct ceph_cap_flush *prealloc_cf;
1381 ssize_t count, written = 0;
1384 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1386 if (ceph_snap(inode) != CEPH_NOSNAP)
1389 prealloc_cf = ceph_alloc_cap_flush();
1396 /* We can write back this queue in page reclaim */
1397 current->backing_dev_info = inode_to_bdi(inode);
1399 if (iocb->ki_flags & IOCB_APPEND) {
1400 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1405 err = generic_write_checks(iocb, from);
1410 if (unlikely(pos >= limit)) {
1414 iov_iter_truncate(from, limit - pos);
1417 count = iov_iter_count(from);
1418 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1423 err = file_remove_privs(file);
1427 err = file_update_time(file);
1431 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1432 err = ceph_uninline_data(file, NULL);
1437 /* FIXME: not complete since it doesn't account for being at quota */
1438 if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
1443 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1444 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1445 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1446 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1448 want = CEPH_CAP_FILE_BUFFER;
1450 err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1455 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1456 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1458 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1459 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1460 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1461 struct ceph_snap_context *snapc;
1462 struct iov_iter data;
1463 inode_unlock(inode);
1465 spin_lock(&ci->i_ceph_lock);
1466 if (__ceph_have_pending_cap_snap(ci)) {
1467 struct ceph_cap_snap *capsnap =
1468 list_last_entry(&ci->i_cap_snaps,
1469 struct ceph_cap_snap,
1471 snapc = ceph_get_snap_context(capsnap->context);
1473 BUG_ON(!ci->i_head_snapc);
1474 snapc = ceph_get_snap_context(ci->i_head_snapc);
1476 spin_unlock(&ci->i_ceph_lock);
1478 /* we might need to revert back to that point */
1480 if (iocb->ki_flags & IOCB_DIRECT)
1481 written = ceph_direct_read_write(iocb, &data, snapc,
1484 written = ceph_sync_write(iocb, &data, pos, snapc);
1486 iov_iter_advance(from, written);
1487 ceph_put_snap_context(snapc);
1490 * No need to acquire the i_truncate_mutex. Because
1491 * the MDS revokes Fwb caps before sending truncate
1492 * message to us. We can't get Fwb cap while there
1493 * are pending vmtruncate. So write and vmtruncate
1494 * can not run at the same time
1496 written = generic_perform_write(file, from, pos);
1497 if (likely(written >= 0))
1498 iocb->ki_pos = pos + written;
1499 inode_unlock(inode);
1505 spin_lock(&ci->i_ceph_lock);
1506 ci->i_inline_version = CEPH_INLINE_NONE;
1507 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1509 spin_unlock(&ci->i_ceph_lock);
1511 __mark_inode_dirty(inode, dirty);
1512 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1513 ceph_check_caps(ci, CHECK_CAPS_NODELAY, NULL);
1516 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1517 inode, ceph_vinop(inode), pos, (unsigned)count,
1518 ceph_cap_string(got));
1519 ceph_put_cap_refs(ci, got);
1521 if (written == -EOLDSNAPC) {
1522 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1523 inode, ceph_vinop(inode), pos, (unsigned)count);
1528 if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
1529 iocb->ki_flags |= IOCB_DSYNC;
1530 written = generic_write_sync(iocb, written);
1536 inode_unlock(inode);
1538 ceph_free_cap_flush(prealloc_cf);
1539 current->backing_dev_info = NULL;
1540 return written ? written : err;
1544 * llseek. be sure to verify file size on SEEK_END.
1546 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1548 struct inode *inode = file->f_mapping->host;
1549 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1555 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1556 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1561 i_size = i_size_read(inode);
1568 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1569 * position-querying operation. Avoid rewriting the "same"
1570 * f_pos value back to the file because a concurrent read(),
1571 * write() or lseek() might have altered it
1577 offset += file->f_pos;
1580 if (offset < 0 || offset >= i_size) {
1586 if (offset < 0 || offset >= i_size) {
1594 ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1597 inode_unlock(inode);
1601 static inline void ceph_zero_partial_page(
1602 struct inode *inode, loff_t offset, unsigned size)
1605 pgoff_t index = offset >> PAGE_SHIFT;
1607 page = find_lock_page(inode->i_mapping, index);
1609 wait_on_page_writeback(page);
1610 zero_user(page, offset & (PAGE_SIZE - 1), size);
1616 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1619 loff_t nearly = round_up(offset, PAGE_SIZE);
1620 if (offset < nearly) {
1621 loff_t size = nearly - offset;
1624 ceph_zero_partial_page(inode, offset, size);
1628 if (length >= PAGE_SIZE) {
1629 loff_t size = round_down(length, PAGE_SIZE);
1630 truncate_pagecache_range(inode, offset, offset + size - 1);
1635 ceph_zero_partial_page(inode, offset, length);
1638 static int ceph_zero_partial_object(struct inode *inode,
1639 loff_t offset, loff_t *length)
1641 struct ceph_inode_info *ci = ceph_inode(inode);
1642 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1643 struct ceph_osd_request *req;
1649 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1652 op = CEPH_OSD_OP_ZERO;
1655 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1659 CEPH_OSD_FLAG_WRITE,
1666 req->r_mtime = inode->i_mtime;
1667 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1669 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1673 ceph_osdc_put_request(req);
1679 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1682 struct ceph_inode_info *ci = ceph_inode(inode);
1683 s32 stripe_unit = ci->i_layout.stripe_unit;
1684 s32 stripe_count = ci->i_layout.stripe_count;
1685 s32 object_size = ci->i_layout.object_size;
1686 u64 object_set_size = object_size * stripe_count;
1689 /* round offset up to next period boundary */
1690 nearly = offset + object_set_size - 1;
1692 nearly -= do_div(t, object_set_size);
1694 while (length && offset < nearly) {
1695 loff_t size = length;
1696 ret = ceph_zero_partial_object(inode, offset, &size);
1702 while (length >= object_set_size) {
1704 loff_t pos = offset;
1705 for (i = 0; i < stripe_count; ++i) {
1706 ret = ceph_zero_partial_object(inode, pos, NULL);
1711 offset += object_set_size;
1712 length -= object_set_size;
1715 loff_t size = length;
1716 ret = ceph_zero_partial_object(inode, offset, &size);
1725 static long ceph_fallocate(struct file *file, int mode,
1726 loff_t offset, loff_t length)
1728 struct ceph_file_info *fi = file->private_data;
1729 struct inode *inode = file_inode(file);
1730 struct ceph_inode_info *ci = ceph_inode(inode);
1731 struct ceph_cap_flush *prealloc_cf;
1738 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1741 if (!S_ISREG(inode->i_mode))
1744 prealloc_cf = ceph_alloc_cap_flush();
1750 if (ceph_snap(inode) != CEPH_NOSNAP) {
1755 if (ci->i_inline_version != CEPH_INLINE_NONE) {
1756 ret = ceph_uninline_data(file, NULL);
1761 size = i_size_read(inode);
1763 /* Are we punching a hole beyond EOF? */
1766 if ((offset + length) > size)
1767 length = size - offset;
1769 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1770 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1772 want = CEPH_CAP_FILE_BUFFER;
1774 ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1778 ceph_zero_pagecache_range(inode, offset, length);
1779 ret = ceph_zero_objects(inode, offset, length);
1782 spin_lock(&ci->i_ceph_lock);
1783 ci->i_inline_version = CEPH_INLINE_NONE;
1784 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1786 spin_unlock(&ci->i_ceph_lock);
1788 __mark_inode_dirty(inode, dirty);
1791 ceph_put_cap_refs(ci, got);
1793 inode_unlock(inode);
1794 ceph_free_cap_flush(prealloc_cf);
1799 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
1800 * src_ci. Two attempts are made to obtain both caps, and an error is return if
1801 * this fails; zero is returned on success.
1803 static int get_rd_wr_caps(struct ceph_inode_info *src_ci,
1804 loff_t src_endoff, int *src_got,
1805 struct ceph_inode_info *dst_ci,
1806 loff_t dst_endoff, int *dst_got)
1809 bool retrying = false;
1812 ret = ceph_get_caps(dst_ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
1813 dst_endoff, dst_got, NULL);
1818 * Since we're already holding the FILE_WR capability for the dst file,
1819 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
1820 * retry dance instead to try to get both capabilities.
1822 ret = ceph_try_get_caps(src_ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
1825 /* Start by dropping dst_ci caps and getting src_ci caps */
1826 ceph_put_cap_refs(dst_ci, *dst_got);
1829 /* ceph_try_get_caps masks EAGAIN */
1833 ret = ceph_get_caps(src_ci, CEPH_CAP_FILE_RD,
1834 CEPH_CAP_FILE_SHARED, src_endoff,
1838 /*... drop src_ci caps too, and retry */
1839 ceph_put_cap_refs(src_ci, *src_got);
1846 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
1847 struct ceph_inode_info *dst_ci, int dst_got)
1849 ceph_put_cap_refs(src_ci, src_got);
1850 ceph_put_cap_refs(dst_ci, dst_got);
1854 * This function does several size-related checks, returning an error if:
1855 * - source file is smaller than off+len
1856 * - destination file size is not OK (inode_newsize_ok())
1857 * - max bytes quotas is exceeded
1859 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
1860 loff_t src_off, loff_t dst_off, size_t len)
1862 loff_t size, endoff;
1864 size = i_size_read(src_inode);
1866 * Don't copy beyond source file EOF. Instead of simply setting length
1867 * to (size - src_off), just drop to VFS default implementation, as the
1868 * local i_size may be stale due to other clients writing to the source
1871 if (src_off + len > size) {
1872 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
1873 src_off, len, size);
1876 size = i_size_read(dst_inode);
1878 endoff = dst_off + len;
1879 if (inode_newsize_ok(dst_inode, endoff))
1882 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
1888 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
1889 struct file *dst_file, loff_t dst_off,
1890 size_t len, unsigned int flags)
1892 struct inode *src_inode = file_inode(src_file);
1893 struct inode *dst_inode = file_inode(dst_file);
1894 struct ceph_inode_info *src_ci = ceph_inode(src_inode);
1895 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
1896 struct ceph_cap_flush *prealloc_cf;
1897 struct ceph_object_locator src_oloc, dst_oloc;
1898 struct ceph_object_id src_oid, dst_oid;
1899 loff_t endoff = 0, size;
1901 u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
1902 u32 src_objlen, dst_objlen, object_size;
1903 int src_got = 0, dst_got = 0, err, dirty;
1904 bool do_final_copy = false;
1906 if (src_inode == dst_inode)
1908 if (ceph_snap(dst_inode) != CEPH_NOSNAP)
1912 * Some of the checks below will return -EOPNOTSUPP, which will force a
1913 * fallback to the default VFS copy_file_range implementation. This is
1914 * desirable in several cases (for ex, the 'len' is smaller than the
1915 * size of the objects, or in cases where that would be more
1919 if (ceph_test_mount_opt(ceph_inode_to_client(src_inode), NOCOPYFROM))
1922 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
1923 (src_ci->i_layout.stripe_count != dst_ci->i_layout.stripe_count) ||
1924 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size))
1927 if (len < src_ci->i_layout.object_size)
1928 return -EOPNOTSUPP; /* no remote copy will be done */
1930 prealloc_cf = ceph_alloc_cap_flush();
1934 /* Start by sync'ing the source and destination files */
1935 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
1937 dout("failed to write src file (%zd)\n", ret);
1940 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
1942 dout("failed to write dst file (%zd)\n", ret);
1947 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
1948 * clients may have dirty data in their caches. And OSDs know nothing
1949 * about caps, so they can't safely do the remote object copies.
1951 err = get_rd_wr_caps(src_ci, (src_off + len), &src_got,
1952 dst_ci, (dst_off + len), &dst_got);
1954 dout("get_rd_wr_caps returned %d\n", err);
1959 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
1963 size = i_size_read(dst_inode);
1964 endoff = dst_off + len;
1966 /* Drop dst file cached pages */
1967 ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
1968 dst_off >> PAGE_SHIFT,
1969 endoff >> PAGE_SHIFT);
1971 dout("Failed to invalidate inode pages (%zd)\n", ret);
1974 src_oloc.pool = src_ci->i_layout.pool_id;
1975 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
1976 dst_oloc.pool = dst_ci->i_layout.pool_id;
1977 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
1979 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
1980 src_ci->i_layout.object_size,
1981 &src_objnum, &src_objoff, &src_objlen);
1982 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
1983 dst_ci->i_layout.object_size,
1984 &dst_objnum, &dst_objoff, &dst_objlen);
1985 /* object-level offsets need to the same */
1986 if (src_objoff != dst_objoff) {
1992 * Do a manual copy if the object offset isn't object aligned.
1993 * 'src_objlen' contains the bytes left until the end of the object,
1994 * starting at the src_off
1998 * we need to temporarily drop all caps as we'll be calling
1999 * {read,write}_iter, which will get caps again.
2001 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2002 ret = do_splice_direct(src_file, &src_off, dst_file,
2003 &dst_off, src_objlen, flags);
2005 dout("do_splice_direct returned %d\n", err);
2009 err = get_rd_wr_caps(src_ci, (src_off + len),
2011 (dst_off + len), &dst_got);
2014 err = is_file_size_ok(src_inode, dst_inode,
2015 src_off, dst_off, len);
2019 object_size = src_ci->i_layout.object_size;
2020 while (len >= object_size) {
2021 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2022 object_size, &src_objnum,
2023 &src_objoff, &src_objlen);
2024 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2025 object_size, &dst_objnum,
2026 &dst_objoff, &dst_objlen);
2027 ceph_oid_init(&src_oid);
2028 ceph_oid_printf(&src_oid, "%llx.%08llx",
2029 src_ci->i_vino.ino, src_objnum);
2030 ceph_oid_init(&dst_oid);
2031 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2032 dst_ci->i_vino.ino, dst_objnum);
2033 /* Do an object remote copy */
2034 err = ceph_osdc_copy_from(
2035 &ceph_inode_to_client(src_inode)->client->osdc,
2036 src_ci->i_vino.snap, 0,
2037 &src_oid, &src_oloc,
2038 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2039 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2040 &dst_oid, &dst_oloc,
2041 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2042 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED, 0);
2044 dout("ceph_osdc_copy_from returned %d\n", err);
2050 src_off += object_size;
2051 dst_off += object_size;
2056 /* We still need one final local copy */
2057 do_final_copy = true;
2059 file_update_time(dst_file);
2060 if (endoff > size) {
2063 /* Let the MDS know about dst file size change */
2064 if (ceph_quota_is_max_bytes_approaching(dst_inode, endoff))
2065 caps_flags |= CHECK_CAPS_NODELAY;
2066 if (ceph_inode_set_size(dst_inode, endoff))
2067 caps_flags |= CHECK_CAPS_AUTHONLY;
2069 ceph_check_caps(dst_ci, caps_flags, NULL);
2072 spin_lock(&dst_ci->i_ceph_lock);
2073 dst_ci->i_inline_version = CEPH_INLINE_NONE;
2074 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2075 spin_unlock(&dst_ci->i_ceph_lock);
2077 __mark_inode_dirty(dst_inode, dirty);
2080 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2082 if (do_final_copy) {
2083 err = do_splice_direct(src_file, &src_off, dst_file,
2084 &dst_off, len, flags);
2086 dout("do_splice_direct returned %d\n", err);
2094 ceph_free_cap_flush(prealloc_cf);
2099 const struct file_operations ceph_file_fops = {
2101 .release = ceph_release,
2102 .llseek = ceph_llseek,
2103 .read_iter = ceph_read_iter,
2104 .write_iter = ceph_write_iter,
2106 .fsync = ceph_fsync,
2108 .flock = ceph_flock,
2109 .splice_read = generic_file_splice_read,
2110 .splice_write = iter_file_splice_write,
2111 .unlocked_ioctl = ceph_ioctl,
2112 .compat_ioctl = ceph_ioctl,
2113 .fallocate = ceph_fallocate,
2114 .copy_file_range = ceph_copy_file_range,