]> Git Repo - linux.git/blob - fs/ceph/file.c
net/smc: fix kernel panic caused by race of smc_sock
[linux.git] / fs / ceph / file.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21
22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24         u32 wire_flags = 0;
25
26         switch (flags & O_ACCMODE) {
27         case O_RDONLY:
28                 wire_flags |= CEPH_O_RDONLY;
29                 break;
30         case O_WRONLY:
31                 wire_flags |= CEPH_O_WRONLY;
32                 break;
33         case O_RDWR:
34                 wire_flags |= CEPH_O_RDWR;
35                 break;
36         }
37
38         flags &= ~O_ACCMODE;
39
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41
42         ceph_sys2wire(O_CREAT);
43         ceph_sys2wire(O_EXCL);
44         ceph_sys2wire(O_TRUNC);
45         ceph_sys2wire(O_DIRECTORY);
46         ceph_sys2wire(O_NOFOLLOW);
47
48 #undef ceph_sys2wire
49
50         if (flags)
51                 dout("unused open flags: %x\n", flags);
52
53         return cpu_to_le32(wire_flags);
54 }
55
56 /*
57  * Ceph file operations
58  *
59  * Implement basic open/close functionality, and implement
60  * read/write.
61  *
62  * We implement three modes of file I/O:
63  *  - buffered uses the generic_file_aio_{read,write} helpers
64  *
65  *  - synchronous is used when there is multi-client read/write
66  *    sharing, avoids the page cache, and synchronously waits for an
67  *    ack from the OSD.
68  *
69  *  - direct io takes the variant of the sync path that references
70  *    user pages directly.
71  *
72  * fsync() flushes and waits on dirty pages, but just queues metadata
73  * for writeback: since the MDS can recover size and mtime there is no
74  * need to wait for MDS acknowledgement.
75  */
76
77 /*
78  * How many pages to get in one call to iov_iter_get_pages().  This
79  * determines the size of the on-stack array used as a buffer.
80  */
81 #define ITER_GET_BVECS_PAGES    64
82
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84                                 struct bio_vec *bvecs)
85 {
86         size_t size = 0;
87         int bvec_idx = 0;
88
89         if (maxsize > iov_iter_count(iter))
90                 maxsize = iov_iter_count(iter);
91
92         while (size < maxsize) {
93                 struct page *pages[ITER_GET_BVECS_PAGES];
94                 ssize_t bytes;
95                 size_t start;
96                 int idx = 0;
97
98                 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
99                                            ITER_GET_BVECS_PAGES, &start);
100                 if (bytes < 0)
101                         return size ?: bytes;
102
103                 iov_iter_advance(iter, bytes);
104                 size += bytes;
105
106                 for ( ; bytes; idx++, bvec_idx++) {
107                         struct bio_vec bv = {
108                                 .bv_page = pages[idx],
109                                 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
110                                 .bv_offset = start,
111                         };
112
113                         bvecs[bvec_idx] = bv;
114                         bytes -= bv.bv_len;
115                         start = 0;
116                 }
117         }
118
119         return size;
120 }
121
122 /*
123  * iov_iter_get_pages() only considers one iov_iter segment, no matter
124  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
125  * page.
126  *
127  * Attempt to get up to @maxsize bytes worth of pages from @iter.
128  * Return the number of bytes in the created bio_vec array, or an error.
129  */
130 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
131                                     struct bio_vec **bvecs, int *num_bvecs)
132 {
133         struct bio_vec *bv;
134         size_t orig_count = iov_iter_count(iter);
135         ssize_t bytes;
136         int npages;
137
138         iov_iter_truncate(iter, maxsize);
139         npages = iov_iter_npages(iter, INT_MAX);
140         iov_iter_reexpand(iter, orig_count);
141
142         /*
143          * __iter_get_bvecs() may populate only part of the array -- zero it
144          * out.
145          */
146         bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
147         if (!bv)
148                 return -ENOMEM;
149
150         bytes = __iter_get_bvecs(iter, maxsize, bv);
151         if (bytes < 0) {
152                 /*
153                  * No pages were pinned -- just free the array.
154                  */
155                 kvfree(bv);
156                 return bytes;
157         }
158
159         *bvecs = bv;
160         *num_bvecs = npages;
161         return bytes;
162 }
163
164 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
165 {
166         int i;
167
168         for (i = 0; i < num_bvecs; i++) {
169                 if (bvecs[i].bv_page) {
170                         if (should_dirty)
171                                 set_page_dirty_lock(bvecs[i].bv_page);
172                         put_page(bvecs[i].bv_page);
173                 }
174         }
175         kvfree(bvecs);
176 }
177
178 /*
179  * Prepare an open request.  Preallocate ceph_cap to avoid an
180  * inopportune ENOMEM later.
181  */
182 static struct ceph_mds_request *
183 prepare_open_request(struct super_block *sb, int flags, int create_mode)
184 {
185         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
186         struct ceph_mds_request *req;
187         int want_auth = USE_ANY_MDS;
188         int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
189
190         if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
191                 want_auth = USE_AUTH_MDS;
192
193         req = ceph_mdsc_create_request(mdsc, op, want_auth);
194         if (IS_ERR(req))
195                 goto out;
196         req->r_fmode = ceph_flags_to_mode(flags);
197         req->r_args.open.flags = ceph_flags_sys2wire(flags);
198         req->r_args.open.mode = cpu_to_le32(create_mode);
199 out:
200         return req;
201 }
202
203 static int ceph_init_file_info(struct inode *inode, struct file *file,
204                                         int fmode, bool isdir)
205 {
206         struct ceph_inode_info *ci = ceph_inode(inode);
207         struct ceph_file_info *fi;
208
209         dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
210                         inode->i_mode, isdir ? "dir" : "regular");
211         BUG_ON(inode->i_fop->release != ceph_release);
212
213         if (isdir) {
214                 struct ceph_dir_file_info *dfi =
215                         kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
216                 if (!dfi)
217                         return -ENOMEM;
218
219                 file->private_data = dfi;
220                 fi = &dfi->file_info;
221                 dfi->next_offset = 2;
222                 dfi->readdir_cache_idx = -1;
223         } else {
224                 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
225                 if (!fi)
226                         return -ENOMEM;
227
228                 file->private_data = fi;
229         }
230
231         ceph_get_fmode(ci, fmode, 1);
232         fi->fmode = fmode;
233
234         spin_lock_init(&fi->rw_contexts_lock);
235         INIT_LIST_HEAD(&fi->rw_contexts);
236         fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
237
238         return 0;
239 }
240
241 /*
242  * initialize private struct file data.
243  * if we fail, clean up by dropping fmode reference on the ceph_inode
244  */
245 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
246 {
247         int ret = 0;
248
249         switch (inode->i_mode & S_IFMT) {
250         case S_IFREG:
251                 ceph_fscache_register_inode_cookie(inode);
252                 ceph_fscache_file_set_cookie(inode, file);
253                 fallthrough;
254         case S_IFDIR:
255                 ret = ceph_init_file_info(inode, file, fmode,
256                                                 S_ISDIR(inode->i_mode));
257                 break;
258
259         case S_IFLNK:
260                 dout("init_file %p %p 0%o (symlink)\n", inode, file,
261                      inode->i_mode);
262                 break;
263
264         default:
265                 dout("init_file %p %p 0%o (special)\n", inode, file,
266                      inode->i_mode);
267                 /*
268                  * we need to drop the open ref now, since we don't
269                  * have .release set to ceph_release.
270                  */
271                 BUG_ON(inode->i_fop->release == ceph_release);
272
273                 /* call the proper open fop */
274                 ret = inode->i_fop->open(inode, file);
275         }
276         return ret;
277 }
278
279 /*
280  * try renew caps after session gets killed.
281  */
282 int ceph_renew_caps(struct inode *inode, int fmode)
283 {
284         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
285         struct ceph_inode_info *ci = ceph_inode(inode);
286         struct ceph_mds_request *req;
287         int err, flags, wanted;
288
289         spin_lock(&ci->i_ceph_lock);
290         __ceph_touch_fmode(ci, mdsc, fmode);
291         wanted = __ceph_caps_file_wanted(ci);
292         if (__ceph_is_any_real_caps(ci) &&
293             (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
294                 int issued = __ceph_caps_issued(ci, NULL);
295                 spin_unlock(&ci->i_ceph_lock);
296                 dout("renew caps %p want %s issued %s updating mds_wanted\n",
297                      inode, ceph_cap_string(wanted), ceph_cap_string(issued));
298                 ceph_check_caps(ci, 0, NULL);
299                 return 0;
300         }
301         spin_unlock(&ci->i_ceph_lock);
302
303         flags = 0;
304         if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
305                 flags = O_RDWR;
306         else if (wanted & CEPH_CAP_FILE_RD)
307                 flags = O_RDONLY;
308         else if (wanted & CEPH_CAP_FILE_WR)
309                 flags = O_WRONLY;
310 #ifdef O_LAZY
311         if (wanted & CEPH_CAP_FILE_LAZYIO)
312                 flags |= O_LAZY;
313 #endif
314
315         req = prepare_open_request(inode->i_sb, flags, 0);
316         if (IS_ERR(req)) {
317                 err = PTR_ERR(req);
318                 goto out;
319         }
320
321         req->r_inode = inode;
322         ihold(inode);
323         req->r_num_caps = 1;
324
325         err = ceph_mdsc_do_request(mdsc, NULL, req);
326         ceph_mdsc_put_request(req);
327 out:
328         dout("renew caps %p open result=%d\n", inode, err);
329         return err < 0 ? err : 0;
330 }
331
332 /*
333  * If we already have the requisite capabilities, we can satisfy
334  * the open request locally (no need to request new caps from the
335  * MDS).  We do, however, need to inform the MDS (asynchronously)
336  * if our wanted caps set expands.
337  */
338 int ceph_open(struct inode *inode, struct file *file)
339 {
340         struct ceph_inode_info *ci = ceph_inode(inode);
341         struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
342         struct ceph_mds_client *mdsc = fsc->mdsc;
343         struct ceph_mds_request *req;
344         struct ceph_file_info *fi = file->private_data;
345         int err;
346         int flags, fmode, wanted;
347
348         if (fi) {
349                 dout("open file %p is already opened\n", file);
350                 return 0;
351         }
352
353         /* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
354         flags = file->f_flags & ~(O_CREAT|O_EXCL);
355         if (S_ISDIR(inode->i_mode))
356                 flags = O_DIRECTORY;  /* mds likes to know */
357
358         dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
359              ceph_vinop(inode), file, flags, file->f_flags);
360         fmode = ceph_flags_to_mode(flags);
361         wanted = ceph_caps_for_mode(fmode);
362
363         /* snapped files are read-only */
364         if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
365                 return -EROFS;
366
367         /* trivially open snapdir */
368         if (ceph_snap(inode) == CEPH_SNAPDIR) {
369                 return ceph_init_file(inode, file, fmode);
370         }
371
372         /*
373          * No need to block if we have caps on the auth MDS (for
374          * write) or any MDS (for read).  Update wanted set
375          * asynchronously.
376          */
377         spin_lock(&ci->i_ceph_lock);
378         if (__ceph_is_any_real_caps(ci) &&
379             (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
380                 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
381                 int issued = __ceph_caps_issued(ci, NULL);
382
383                 dout("open %p fmode %d want %s issued %s using existing\n",
384                      inode, fmode, ceph_cap_string(wanted),
385                      ceph_cap_string(issued));
386                 __ceph_touch_fmode(ci, mdsc, fmode);
387                 spin_unlock(&ci->i_ceph_lock);
388
389                 /* adjust wanted? */
390                 if ((issued & wanted) != wanted &&
391                     (mds_wanted & wanted) != wanted &&
392                     ceph_snap(inode) != CEPH_SNAPDIR)
393                         ceph_check_caps(ci, 0, NULL);
394
395                 return ceph_init_file(inode, file, fmode);
396         } else if (ceph_snap(inode) != CEPH_NOSNAP &&
397                    (ci->i_snap_caps & wanted) == wanted) {
398                 __ceph_touch_fmode(ci, mdsc, fmode);
399                 spin_unlock(&ci->i_ceph_lock);
400                 return ceph_init_file(inode, file, fmode);
401         }
402
403         spin_unlock(&ci->i_ceph_lock);
404
405         dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
406         req = prepare_open_request(inode->i_sb, flags, 0);
407         if (IS_ERR(req)) {
408                 err = PTR_ERR(req);
409                 goto out;
410         }
411         req->r_inode = inode;
412         ihold(inode);
413
414         req->r_num_caps = 1;
415         err = ceph_mdsc_do_request(mdsc, NULL, req);
416         if (!err)
417                 err = ceph_init_file(inode, file, req->r_fmode);
418         ceph_mdsc_put_request(req);
419         dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
420 out:
421         return err;
422 }
423
424 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
425 static void
426 cache_file_layout(struct inode *dst, struct inode *src)
427 {
428         struct ceph_inode_info *cdst = ceph_inode(dst);
429         struct ceph_inode_info *csrc = ceph_inode(src);
430
431         spin_lock(&cdst->i_ceph_lock);
432         if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
433             !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
434                 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
435                         sizeof(cdst->i_cached_layout));
436                 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
437                                    ceph_try_get_string(csrc->i_layout.pool_ns));
438         }
439         spin_unlock(&cdst->i_ceph_lock);
440 }
441
442 /*
443  * Try to set up an async create. We need caps, a file layout, and inode number,
444  * and either a lease on the dentry or complete dir info. If any of those
445  * criteria are not satisfied, then return false and the caller can go
446  * synchronous.
447  */
448 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
449                                  struct ceph_file_layout *lo, u64 *pino)
450 {
451         struct ceph_inode_info *ci = ceph_inode(dir);
452         struct ceph_dentry_info *di = ceph_dentry(dentry);
453         int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
454         u64 ino;
455
456         spin_lock(&ci->i_ceph_lock);
457         /* No auth cap means no chance for Dc caps */
458         if (!ci->i_auth_cap)
459                 goto no_async;
460
461         /* Any delegated inos? */
462         if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
463                 goto no_async;
464
465         if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
466                 goto no_async;
467
468         if ((__ceph_caps_issued(ci, NULL) & want) != want)
469                 goto no_async;
470
471         if (d_in_lookup(dentry)) {
472                 if (!__ceph_dir_is_complete(ci))
473                         goto no_async;
474                 spin_lock(&dentry->d_lock);
475                 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
476                 spin_unlock(&dentry->d_lock);
477         } else if (atomic_read(&ci->i_shared_gen) !=
478                    READ_ONCE(di->lease_shared_gen)) {
479                 goto no_async;
480         }
481
482         ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
483         if (!ino)
484                 goto no_async;
485
486         *pino = ino;
487         ceph_take_cap_refs(ci, want, false);
488         memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
489         rcu_assign_pointer(lo->pool_ns,
490                            ceph_try_get_string(ci->i_cached_layout.pool_ns));
491         got = want;
492 no_async:
493         spin_unlock(&ci->i_ceph_lock);
494         return got;
495 }
496
497 static void restore_deleg_ino(struct inode *dir, u64 ino)
498 {
499         struct ceph_inode_info *ci = ceph_inode(dir);
500         struct ceph_mds_session *s = NULL;
501
502         spin_lock(&ci->i_ceph_lock);
503         if (ci->i_auth_cap)
504                 s = ceph_get_mds_session(ci->i_auth_cap->session);
505         spin_unlock(&ci->i_ceph_lock);
506         if (s) {
507                 int err = ceph_restore_deleg_ino(s, ino);
508                 if (err)
509                         pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
510                                 ino, err);
511                 ceph_put_mds_session(s);
512         }
513 }
514
515 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
516                                  struct ceph_mds_request *req)
517 {
518         int result = req->r_err ? req->r_err :
519                         le32_to_cpu(req->r_reply_info.head->result);
520
521         if (result == -EJUKEBOX)
522                 goto out;
523
524         mapping_set_error(req->r_parent->i_mapping, result);
525
526         if (result) {
527                 struct dentry *dentry = req->r_dentry;
528                 struct inode *inode = d_inode(dentry);
529                 int pathlen = 0;
530                 u64 base = 0;
531                 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
532                                                   &base, 0);
533
534                 ceph_dir_clear_complete(req->r_parent);
535                 if (!d_unhashed(dentry))
536                         d_drop(dentry);
537
538                 ceph_inode_shutdown(inode);
539
540                 pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
541                         base, IS_ERR(path) ? "<<bad>>" : path, result);
542                 ceph_mdsc_free_path(path, pathlen);
543         }
544
545         if (req->r_target_inode) {
546                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
547                 u64 ino = ceph_vino(req->r_target_inode).ino;
548
549                 if (req->r_deleg_ino != ino)
550                         pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
551                                 __func__, req->r_err, req->r_deleg_ino, ino);
552                 mapping_set_error(req->r_target_inode->i_mapping, result);
553
554                 spin_lock(&ci->i_ceph_lock);
555                 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
556                         ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
557                         wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
558                 }
559                 ceph_kick_flushing_inode_caps(req->r_session, ci);
560                 spin_unlock(&ci->i_ceph_lock);
561         } else if (!result) {
562                 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
563                         req->r_deleg_ino);
564         }
565 out:
566         ceph_mdsc_release_dir_caps(req);
567 }
568
569 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
570                                     struct file *file, umode_t mode,
571                                     struct ceph_mds_request *req,
572                                     struct ceph_acl_sec_ctx *as_ctx,
573                                     struct ceph_file_layout *lo)
574 {
575         int ret;
576         char xattr_buf[4];
577         struct ceph_mds_reply_inode in = { };
578         struct ceph_mds_reply_info_in iinfo = { .in = &in };
579         struct ceph_inode_info *ci = ceph_inode(dir);
580         struct inode *inode;
581         struct timespec64 now;
582         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
583         struct ceph_vino vino = { .ino = req->r_deleg_ino,
584                                   .snap = CEPH_NOSNAP };
585
586         ktime_get_real_ts64(&now);
587
588         inode = ceph_get_inode(dentry->d_sb, vino);
589         if (IS_ERR(inode))
590                 return PTR_ERR(inode);
591
592         iinfo.inline_version = CEPH_INLINE_NONE;
593         iinfo.change_attr = 1;
594         ceph_encode_timespec64(&iinfo.btime, &now);
595
596         iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
597         iinfo.xattr_data = xattr_buf;
598         memset(iinfo.xattr_data, 0, iinfo.xattr_len);
599
600         in.ino = cpu_to_le64(vino.ino);
601         in.snapid = cpu_to_le64(CEPH_NOSNAP);
602         in.version = cpu_to_le64(1);    // ???
603         in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
604         in.cap.cap_id = cpu_to_le64(1);
605         in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
606         in.cap.flags = CEPH_CAP_FLAG_AUTH;
607         in.ctime = in.mtime = in.atime = iinfo.btime;
608         in.truncate_seq = cpu_to_le32(1);
609         in.truncate_size = cpu_to_le64(-1ULL);
610         in.xattr_version = cpu_to_le64(1);
611         in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
612         if (dir->i_mode & S_ISGID) {
613                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
614
615                 /* Directories always inherit the setgid bit. */
616                 if (S_ISDIR(mode))
617                         mode |= S_ISGID;
618                 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
619                          !in_group_p(dir->i_gid) &&
620                          !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID))
621                         mode &= ~S_ISGID;
622         } else {
623                 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
624         }
625         in.mode = cpu_to_le32((u32)mode);
626
627         in.nlink = cpu_to_le32(1);
628         in.max_size = cpu_to_le64(lo->stripe_unit);
629
630         ceph_file_layout_to_legacy(lo, &in.layout);
631
632         down_read(&mdsc->snap_rwsem);
633         ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
634                               req->r_fmode, NULL);
635         up_read(&mdsc->snap_rwsem);
636         if (ret) {
637                 dout("%s failed to fill inode: %d\n", __func__, ret);
638                 ceph_dir_clear_complete(dir);
639                 if (!d_unhashed(dentry))
640                         d_drop(dentry);
641                 if (inode->i_state & I_NEW)
642                         discard_new_inode(inode);
643         } else {
644                 struct dentry *dn;
645
646                 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
647                         vino.ino, ceph_ino(dir), dentry->d_name.name);
648                 ceph_dir_clear_ordered(dir);
649                 ceph_init_inode_acls(inode, as_ctx);
650                 if (inode->i_state & I_NEW) {
651                         /*
652                          * If it's not I_NEW, then someone created this before
653                          * we got here. Assume the server is aware of it at
654                          * that point and don't worry about setting
655                          * CEPH_I_ASYNC_CREATE.
656                          */
657                         ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
658                         unlock_new_inode(inode);
659                 }
660                 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
661                         if (!d_unhashed(dentry))
662                                 d_drop(dentry);
663                         dn = d_splice_alias(inode, dentry);
664                         WARN_ON_ONCE(dn && dn != dentry);
665                 }
666                 file->f_mode |= FMODE_CREATED;
667                 ret = finish_open(file, dentry, ceph_open);
668         }
669         return ret;
670 }
671
672 /*
673  * Do a lookup + open with a single request.  If we get a non-existent
674  * file or symlink, return 1 so the VFS can retry.
675  */
676 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
677                      struct file *file, unsigned flags, umode_t mode)
678 {
679         struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
680         struct ceph_mds_client *mdsc = fsc->mdsc;
681         struct ceph_mds_request *req;
682         struct dentry *dn;
683         struct ceph_acl_sec_ctx as_ctx = {};
684         bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
685         int mask;
686         int err;
687
688         dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
689              dir, dentry, dentry,
690              d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
691
692         if (dentry->d_name.len > NAME_MAX)
693                 return -ENAMETOOLONG;
694
695         if (flags & O_CREAT) {
696                 if (ceph_quota_is_max_files_exceeded(dir))
697                         return -EDQUOT;
698                 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
699                 if (err < 0)
700                         return err;
701                 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
702                 if (err < 0)
703                         goto out_ctx;
704         } else if (!d_in_lookup(dentry)) {
705                 /* If it's not being looked up, it's negative */
706                 return -ENOENT;
707         }
708 retry:
709         /* do the open */
710         req = prepare_open_request(dir->i_sb, flags, mode);
711         if (IS_ERR(req)) {
712                 err = PTR_ERR(req);
713                 goto out_ctx;
714         }
715         req->r_dentry = dget(dentry);
716         req->r_num_caps = 2;
717         mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
718         if (ceph_security_xattr_wanted(dir))
719                 mask |= CEPH_CAP_XATTR_SHARED;
720         req->r_args.open.mask = cpu_to_le32(mask);
721         req->r_parent = dir;
722         ihold(dir);
723
724         if (flags & O_CREAT) {
725                 struct ceph_file_layout lo;
726
727                 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
728                 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
729                 if (as_ctx.pagelist) {
730                         req->r_pagelist = as_ctx.pagelist;
731                         as_ctx.pagelist = NULL;
732                 }
733                 if (try_async &&
734                     (req->r_dir_caps =
735                       try_prep_async_create(dir, dentry, &lo,
736                                             &req->r_deleg_ino))) {
737                         set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
738                         req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
739                         req->r_callback = ceph_async_create_cb;
740                         err = ceph_mdsc_submit_request(mdsc, dir, req);
741                         if (!err) {
742                                 err = ceph_finish_async_create(dir, dentry,
743                                                         file, mode, req,
744                                                         &as_ctx, &lo);
745                         } else if (err == -EJUKEBOX) {
746                                 restore_deleg_ino(dir, req->r_deleg_ino);
747                                 ceph_mdsc_put_request(req);
748                                 try_async = false;
749                                 goto retry;
750                         }
751                         goto out_req;
752                 }
753         }
754
755         set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
756         err = ceph_mdsc_do_request(mdsc,
757                                    (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
758                                    req);
759         if (err == -ENOENT) {
760                 dentry = ceph_handle_snapdir(req, dentry);
761                 if (IS_ERR(dentry)) {
762                         err = PTR_ERR(dentry);
763                         goto out_req;
764                 }
765                 err = 0;
766         }
767
768         if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
769                 err = ceph_handle_notrace_create(dir, dentry);
770
771         if (d_in_lookup(dentry)) {
772                 dn = ceph_finish_lookup(req, dentry, err);
773                 if (IS_ERR(dn))
774                         err = PTR_ERR(dn);
775         } else {
776                 /* we were given a hashed negative dentry */
777                 dn = NULL;
778         }
779         if (err)
780                 goto out_req;
781         if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
782                 /* make vfs retry on splice, ENOENT, or symlink */
783                 dout("atomic_open finish_no_open on dn %p\n", dn);
784                 err = finish_no_open(file, dn);
785         } else {
786                 dout("atomic_open finish_open on dn %p\n", dn);
787                 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
788                         struct inode *newino = d_inode(dentry);
789
790                         cache_file_layout(dir, newino);
791                         ceph_init_inode_acls(newino, &as_ctx);
792                         file->f_mode |= FMODE_CREATED;
793                 }
794                 err = finish_open(file, dentry, ceph_open);
795         }
796 out_req:
797         ceph_mdsc_put_request(req);
798 out_ctx:
799         ceph_release_acl_sec_ctx(&as_ctx);
800         dout("atomic_open result=%d\n", err);
801         return err;
802 }
803
804 int ceph_release(struct inode *inode, struct file *file)
805 {
806         struct ceph_inode_info *ci = ceph_inode(inode);
807
808         if (S_ISDIR(inode->i_mode)) {
809                 struct ceph_dir_file_info *dfi = file->private_data;
810                 dout("release inode %p dir file %p\n", inode, file);
811                 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
812
813                 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
814
815                 if (dfi->last_readdir)
816                         ceph_mdsc_put_request(dfi->last_readdir);
817                 kfree(dfi->last_name);
818                 kfree(dfi->dir_info);
819                 kmem_cache_free(ceph_dir_file_cachep, dfi);
820         } else {
821                 struct ceph_file_info *fi = file->private_data;
822                 dout("release inode %p regular file %p\n", inode, file);
823                 WARN_ON(!list_empty(&fi->rw_contexts));
824
825                 ceph_put_fmode(ci, fi->fmode, 1);
826
827                 kmem_cache_free(ceph_file_cachep, fi);
828         }
829
830         /* wake up anyone waiting for caps on this inode */
831         wake_up_all(&ci->i_cap_wq);
832         return 0;
833 }
834
835 enum {
836         HAVE_RETRIED = 1,
837         CHECK_EOF =    2,
838         READ_INLINE =  3,
839 };
840
841 /*
842  * Completely synchronous read and write methods.  Direct from __user
843  * buffer to osd, or directly to user pages (if O_DIRECT).
844  *
845  * If the read spans object boundary, just do multiple reads.  (That's not
846  * atomic, but good enough for now.)
847  *
848  * If we get a short result from the OSD, check against i_size; we need to
849  * only return a short read to the caller if we hit EOF.
850  */
851 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
852                               int *retry_op)
853 {
854         struct file *file = iocb->ki_filp;
855         struct inode *inode = file_inode(file);
856         struct ceph_inode_info *ci = ceph_inode(inode);
857         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
858         struct ceph_osd_client *osdc = &fsc->client->osdc;
859         ssize_t ret;
860         u64 off = iocb->ki_pos;
861         u64 len = iov_iter_count(to);
862         u64 i_size = i_size_read(inode);
863
864         dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
865              (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
866
867         if (!len)
868                 return 0;
869         /*
870          * flush any page cache pages in this range.  this
871          * will make concurrent normal and sync io slow,
872          * but it will at least behave sensibly when they are
873          * in sequence.
874          */
875         ret = filemap_write_and_wait_range(inode->i_mapping,
876                                            off, off + len - 1);
877         if (ret < 0)
878                 return ret;
879
880         ret = 0;
881         while ((len = iov_iter_count(to)) > 0) {
882                 struct ceph_osd_request *req;
883                 struct page **pages;
884                 int num_pages;
885                 size_t page_off;
886                 bool more;
887                 int idx;
888                 size_t left;
889
890                 req = ceph_osdc_new_request(osdc, &ci->i_layout,
891                                         ci->i_vino, off, &len, 0, 1,
892                                         CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
893                                         NULL, ci->i_truncate_seq,
894                                         ci->i_truncate_size, false);
895                 if (IS_ERR(req)) {
896                         ret = PTR_ERR(req);
897                         break;
898                 }
899
900                 more = len < iov_iter_count(to);
901
902                 num_pages = calc_pages_for(off, len);
903                 page_off = off & ~PAGE_MASK;
904                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
905                 if (IS_ERR(pages)) {
906                         ceph_osdc_put_request(req);
907                         ret = PTR_ERR(pages);
908                         break;
909                 }
910
911                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
912                                                  false, false);
913                 ret = ceph_osdc_start_request(osdc, req, false);
914                 if (!ret)
915                         ret = ceph_osdc_wait_request(osdc, req);
916
917                 ceph_update_read_metrics(&fsc->mdsc->metric,
918                                          req->r_start_latency,
919                                          req->r_end_latency,
920                                          len, ret);
921
922                 ceph_osdc_put_request(req);
923
924                 i_size = i_size_read(inode);
925                 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
926                      off, len, ret, i_size, (more ? " MORE" : ""));
927
928                 if (ret == -ENOENT)
929                         ret = 0;
930                 if (ret >= 0 && ret < len && (off + ret < i_size)) {
931                         int zlen = min(len - ret, i_size - off - ret);
932                         int zoff = page_off + ret;
933                         dout("sync_read zero gap %llu~%llu\n",
934                              off + ret, off + ret + zlen);
935                         ceph_zero_page_vector_range(zoff, zlen, pages);
936                         ret += zlen;
937                 }
938
939                 idx = 0;
940                 left = ret > 0 ? ret : 0;
941                 while (left > 0) {
942                         size_t len, copied;
943                         page_off = off & ~PAGE_MASK;
944                         len = min_t(size_t, left, PAGE_SIZE - page_off);
945                         SetPageUptodate(pages[idx]);
946                         copied = copy_page_to_iter(pages[idx++],
947                                                    page_off, len, to);
948                         off += copied;
949                         left -= copied;
950                         if (copied < len) {
951                                 ret = -EFAULT;
952                                 break;
953                         }
954                 }
955                 ceph_release_page_vector(pages, num_pages);
956
957                 if (ret < 0) {
958                         if (ret == -EBLOCKLISTED)
959                                 fsc->blocklisted = true;
960                         break;
961                 }
962
963                 if (off >= i_size || !more)
964                         break;
965         }
966
967         if (off > iocb->ki_pos) {
968                 if (off >= i_size) {
969                         *retry_op = CHECK_EOF;
970                         ret = i_size - iocb->ki_pos;
971                         iocb->ki_pos = i_size;
972                 } else {
973                         ret = off - iocb->ki_pos;
974                         iocb->ki_pos = off;
975                 }
976         }
977
978         dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
979         return ret;
980 }
981
982 struct ceph_aio_request {
983         struct kiocb *iocb;
984         size_t total_len;
985         bool write;
986         bool should_dirty;
987         int error;
988         struct list_head osd_reqs;
989         unsigned num_reqs;
990         atomic_t pending_reqs;
991         struct timespec64 mtime;
992         struct ceph_cap_flush *prealloc_cf;
993 };
994
995 struct ceph_aio_work {
996         struct work_struct work;
997         struct ceph_osd_request *req;
998 };
999
1000 static void ceph_aio_retry_work(struct work_struct *work);
1001
1002 static void ceph_aio_complete(struct inode *inode,
1003                               struct ceph_aio_request *aio_req)
1004 {
1005         struct ceph_inode_info *ci = ceph_inode(inode);
1006         int ret;
1007
1008         if (!atomic_dec_and_test(&aio_req->pending_reqs))
1009                 return;
1010
1011         if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1012                 inode_dio_end(inode);
1013
1014         ret = aio_req->error;
1015         if (!ret)
1016                 ret = aio_req->total_len;
1017
1018         dout("ceph_aio_complete %p rc %d\n", inode, ret);
1019
1020         if (ret >= 0 && aio_req->write) {
1021                 int dirty;
1022
1023                 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1024                 if (endoff > i_size_read(inode)) {
1025                         if (ceph_inode_set_size(inode, endoff))
1026                                 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1027                 }
1028
1029                 spin_lock(&ci->i_ceph_lock);
1030                 ci->i_inline_version = CEPH_INLINE_NONE;
1031                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1032                                                &aio_req->prealloc_cf);
1033                 spin_unlock(&ci->i_ceph_lock);
1034                 if (dirty)
1035                         __mark_inode_dirty(inode, dirty);
1036
1037         }
1038
1039         ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1040                                                 CEPH_CAP_FILE_RD));
1041
1042         aio_req->iocb->ki_complete(aio_req->iocb, ret);
1043
1044         ceph_free_cap_flush(aio_req->prealloc_cf);
1045         kfree(aio_req);
1046 }
1047
1048 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1049 {
1050         int rc = req->r_result;
1051         struct inode *inode = req->r_inode;
1052         struct ceph_aio_request *aio_req = req->r_priv;
1053         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1054         struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1055         unsigned int len = osd_data->bvec_pos.iter.bi_size;
1056
1057         BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1058         BUG_ON(!osd_data->num_bvecs);
1059
1060         dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1061
1062         if (rc == -EOLDSNAPC) {
1063                 struct ceph_aio_work *aio_work;
1064                 BUG_ON(!aio_req->write);
1065
1066                 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1067                 if (aio_work) {
1068                         INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1069                         aio_work->req = req;
1070                         queue_work(ceph_inode_to_client(inode)->inode_wq,
1071                                    &aio_work->work);
1072                         return;
1073                 }
1074                 rc = -ENOMEM;
1075         } else if (!aio_req->write) {
1076                 if (rc == -ENOENT)
1077                         rc = 0;
1078                 if (rc >= 0 && len > rc) {
1079                         struct iov_iter i;
1080                         int zlen = len - rc;
1081
1082                         /*
1083                          * If read is satisfied by single OSD request,
1084                          * it can pass EOF. Otherwise read is within
1085                          * i_size.
1086                          */
1087                         if (aio_req->num_reqs == 1) {
1088                                 loff_t i_size = i_size_read(inode);
1089                                 loff_t endoff = aio_req->iocb->ki_pos + rc;
1090                                 if (endoff < i_size)
1091                                         zlen = min_t(size_t, zlen,
1092                                                      i_size - endoff);
1093                                 aio_req->total_len = rc + zlen;
1094                         }
1095
1096                         iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1097                                       osd_data->num_bvecs, len);
1098                         iov_iter_advance(&i, rc);
1099                         iov_iter_zero(zlen, &i);
1100                 }
1101         }
1102
1103         /* r_start_latency == 0 means the request was not submitted */
1104         if (req->r_start_latency) {
1105                 if (aio_req->write)
1106                         ceph_update_write_metrics(metric, req->r_start_latency,
1107                                                   req->r_end_latency, len, rc);
1108                 else
1109                         ceph_update_read_metrics(metric, req->r_start_latency,
1110                                                  req->r_end_latency, len, rc);
1111         }
1112
1113         put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1114                   aio_req->should_dirty);
1115         ceph_osdc_put_request(req);
1116
1117         if (rc < 0)
1118                 cmpxchg(&aio_req->error, 0, rc);
1119
1120         ceph_aio_complete(inode, aio_req);
1121         return;
1122 }
1123
1124 static void ceph_aio_retry_work(struct work_struct *work)
1125 {
1126         struct ceph_aio_work *aio_work =
1127                 container_of(work, struct ceph_aio_work, work);
1128         struct ceph_osd_request *orig_req = aio_work->req;
1129         struct ceph_aio_request *aio_req = orig_req->r_priv;
1130         struct inode *inode = orig_req->r_inode;
1131         struct ceph_inode_info *ci = ceph_inode(inode);
1132         struct ceph_snap_context *snapc;
1133         struct ceph_osd_request *req;
1134         int ret;
1135
1136         spin_lock(&ci->i_ceph_lock);
1137         if (__ceph_have_pending_cap_snap(ci)) {
1138                 struct ceph_cap_snap *capsnap =
1139                         list_last_entry(&ci->i_cap_snaps,
1140                                         struct ceph_cap_snap,
1141                                         ci_item);
1142                 snapc = ceph_get_snap_context(capsnap->context);
1143         } else {
1144                 BUG_ON(!ci->i_head_snapc);
1145                 snapc = ceph_get_snap_context(ci->i_head_snapc);
1146         }
1147         spin_unlock(&ci->i_ceph_lock);
1148
1149         req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1150                         false, GFP_NOFS);
1151         if (!req) {
1152                 ret = -ENOMEM;
1153                 req = orig_req;
1154                 goto out;
1155         }
1156
1157         req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1158         ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1159         ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1160
1161         req->r_ops[0] = orig_req->r_ops[0];
1162
1163         req->r_mtime = aio_req->mtime;
1164         req->r_data_offset = req->r_ops[0].extent.offset;
1165
1166         ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1167         if (ret) {
1168                 ceph_osdc_put_request(req);
1169                 req = orig_req;
1170                 goto out;
1171         }
1172
1173         ceph_osdc_put_request(orig_req);
1174
1175         req->r_callback = ceph_aio_complete_req;
1176         req->r_inode = inode;
1177         req->r_priv = aio_req;
1178
1179         ret = ceph_osdc_start_request(req->r_osdc, req, false);
1180 out:
1181         if (ret < 0) {
1182                 req->r_result = ret;
1183                 ceph_aio_complete_req(req);
1184         }
1185
1186         ceph_put_snap_context(snapc);
1187         kfree(aio_work);
1188 }
1189
1190 static ssize_t
1191 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1192                        struct ceph_snap_context *snapc,
1193                        struct ceph_cap_flush **pcf)
1194 {
1195         struct file *file = iocb->ki_filp;
1196         struct inode *inode = file_inode(file);
1197         struct ceph_inode_info *ci = ceph_inode(inode);
1198         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1199         struct ceph_client_metric *metric = &fsc->mdsc->metric;
1200         struct ceph_vino vino;
1201         struct ceph_osd_request *req;
1202         struct bio_vec *bvecs;
1203         struct ceph_aio_request *aio_req = NULL;
1204         int num_pages = 0;
1205         int flags;
1206         int ret = 0;
1207         struct timespec64 mtime = current_time(inode);
1208         size_t count = iov_iter_count(iter);
1209         loff_t pos = iocb->ki_pos;
1210         bool write = iov_iter_rw(iter) == WRITE;
1211         bool should_dirty = !write && iter_is_iovec(iter);
1212
1213         if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1214                 return -EROFS;
1215
1216         dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1217              (write ? "write" : "read"), file, pos, (unsigned)count,
1218              snapc, snapc ? snapc->seq : 0);
1219
1220         if (write) {
1221                 int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1222                                         pos >> PAGE_SHIFT,
1223                                         (pos + count - 1) >> PAGE_SHIFT);
1224                 if (ret2 < 0)
1225                         dout("invalidate_inode_pages2_range returned %d\n", ret2);
1226
1227                 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1228         } else {
1229                 flags = CEPH_OSD_FLAG_READ;
1230         }
1231
1232         while (iov_iter_count(iter) > 0) {
1233                 u64 size = iov_iter_count(iter);
1234                 ssize_t len;
1235
1236                 if (write)
1237                         size = min_t(u64, size, fsc->mount_options->wsize);
1238                 else
1239                         size = min_t(u64, size, fsc->mount_options->rsize);
1240
1241                 vino = ceph_vino(inode);
1242                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1243                                             vino, pos, &size, 0,
1244                                             1,
1245                                             write ? CEPH_OSD_OP_WRITE :
1246                                                     CEPH_OSD_OP_READ,
1247                                             flags, snapc,
1248                                             ci->i_truncate_seq,
1249                                             ci->i_truncate_size,
1250                                             false);
1251                 if (IS_ERR(req)) {
1252                         ret = PTR_ERR(req);
1253                         break;
1254                 }
1255
1256                 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1257                 if (len < 0) {
1258                         ceph_osdc_put_request(req);
1259                         ret = len;
1260                         break;
1261                 }
1262                 if (len != size)
1263                         osd_req_op_extent_update(req, 0, len);
1264
1265                 /*
1266                  * To simplify error handling, allow AIO when IO within i_size
1267                  * or IO can be satisfied by single OSD request.
1268                  */
1269                 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1270                     (len == count || pos + count <= i_size_read(inode))) {
1271                         aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1272                         if (aio_req) {
1273                                 aio_req->iocb = iocb;
1274                                 aio_req->write = write;
1275                                 aio_req->should_dirty = should_dirty;
1276                                 INIT_LIST_HEAD(&aio_req->osd_reqs);
1277                                 if (write) {
1278                                         aio_req->mtime = mtime;
1279                                         swap(aio_req->prealloc_cf, *pcf);
1280                                 }
1281                         }
1282                         /* ignore error */
1283                 }
1284
1285                 if (write) {
1286                         /*
1287                          * throw out any page cache pages in this range. this
1288                          * may block.
1289                          */
1290                         truncate_inode_pages_range(inode->i_mapping, pos,
1291                                                    PAGE_ALIGN(pos + len) - 1);
1292
1293                         req->r_mtime = mtime;
1294                 }
1295
1296                 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1297
1298                 if (aio_req) {
1299                         aio_req->total_len += len;
1300                         aio_req->num_reqs++;
1301                         atomic_inc(&aio_req->pending_reqs);
1302
1303                         req->r_callback = ceph_aio_complete_req;
1304                         req->r_inode = inode;
1305                         req->r_priv = aio_req;
1306                         list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1307
1308                         pos += len;
1309                         continue;
1310                 }
1311
1312                 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1313                 if (!ret)
1314                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1315
1316                 if (write)
1317                         ceph_update_write_metrics(metric, req->r_start_latency,
1318                                                   req->r_end_latency, len, ret);
1319                 else
1320                         ceph_update_read_metrics(metric, req->r_start_latency,
1321                                                  req->r_end_latency, len, ret);
1322
1323                 size = i_size_read(inode);
1324                 if (!write) {
1325                         if (ret == -ENOENT)
1326                                 ret = 0;
1327                         if (ret >= 0 && ret < len && pos + ret < size) {
1328                                 struct iov_iter i;
1329                                 int zlen = min_t(size_t, len - ret,
1330                                                  size - pos - ret);
1331
1332                                 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1333                                 iov_iter_advance(&i, ret);
1334                                 iov_iter_zero(zlen, &i);
1335                                 ret += zlen;
1336                         }
1337                         if (ret >= 0)
1338                                 len = ret;
1339                 }
1340
1341                 put_bvecs(bvecs, num_pages, should_dirty);
1342                 ceph_osdc_put_request(req);
1343                 if (ret < 0)
1344                         break;
1345
1346                 pos += len;
1347                 if (!write && pos >= size)
1348                         break;
1349
1350                 if (write && pos > size) {
1351                         if (ceph_inode_set_size(inode, pos))
1352                                 ceph_check_caps(ceph_inode(inode),
1353                                                 CHECK_CAPS_AUTHONLY,
1354                                                 NULL);
1355                 }
1356         }
1357
1358         if (aio_req) {
1359                 LIST_HEAD(osd_reqs);
1360
1361                 if (aio_req->num_reqs == 0) {
1362                         kfree(aio_req);
1363                         return ret;
1364                 }
1365
1366                 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1367                                               CEPH_CAP_FILE_RD);
1368
1369                 list_splice(&aio_req->osd_reqs, &osd_reqs);
1370                 inode_dio_begin(inode);
1371                 while (!list_empty(&osd_reqs)) {
1372                         req = list_first_entry(&osd_reqs,
1373                                                struct ceph_osd_request,
1374                                                r_private_item);
1375                         list_del_init(&req->r_private_item);
1376                         if (ret >= 0)
1377                                 ret = ceph_osdc_start_request(req->r_osdc,
1378                                                               req, false);
1379                         if (ret < 0) {
1380                                 req->r_result = ret;
1381                                 ceph_aio_complete_req(req);
1382                         }
1383                 }
1384                 return -EIOCBQUEUED;
1385         }
1386
1387         if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1388                 ret = pos - iocb->ki_pos;
1389                 iocb->ki_pos = pos;
1390         }
1391         return ret;
1392 }
1393
1394 /*
1395  * Synchronous write, straight from __user pointer or user pages.
1396  *
1397  * If write spans object boundary, just do multiple writes.  (For a
1398  * correct atomic write, we should e.g. take write locks on all
1399  * objects, rollback on failure, etc.)
1400  */
1401 static ssize_t
1402 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1403                 struct ceph_snap_context *snapc)
1404 {
1405         struct file *file = iocb->ki_filp;
1406         struct inode *inode = file_inode(file);
1407         struct ceph_inode_info *ci = ceph_inode(inode);
1408         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1409         struct ceph_vino vino;
1410         struct ceph_osd_request *req;
1411         struct page **pages;
1412         u64 len;
1413         int num_pages;
1414         int written = 0;
1415         int flags;
1416         int ret;
1417         bool check_caps = false;
1418         struct timespec64 mtime = current_time(inode);
1419         size_t count = iov_iter_count(from);
1420
1421         if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1422                 return -EROFS;
1423
1424         dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1425              file, pos, (unsigned)count, snapc, snapc->seq);
1426
1427         ret = filemap_write_and_wait_range(inode->i_mapping,
1428                                            pos, pos + count - 1);
1429         if (ret < 0)
1430                 return ret;
1431
1432         ret = invalidate_inode_pages2_range(inode->i_mapping,
1433                                             pos >> PAGE_SHIFT,
1434                                             (pos + count - 1) >> PAGE_SHIFT);
1435         if (ret < 0)
1436                 dout("invalidate_inode_pages2_range returned %d\n", ret);
1437
1438         flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1439
1440         while ((len = iov_iter_count(from)) > 0) {
1441                 size_t left;
1442                 int n;
1443
1444                 vino = ceph_vino(inode);
1445                 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1446                                             vino, pos, &len, 0, 1,
1447                                             CEPH_OSD_OP_WRITE, flags, snapc,
1448                                             ci->i_truncate_seq,
1449                                             ci->i_truncate_size,
1450                                             false);
1451                 if (IS_ERR(req)) {
1452                         ret = PTR_ERR(req);
1453                         break;
1454                 }
1455
1456                 /*
1457                  * write from beginning of first page,
1458                  * regardless of io alignment
1459                  */
1460                 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1461
1462                 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1463                 if (IS_ERR(pages)) {
1464                         ret = PTR_ERR(pages);
1465                         goto out;
1466                 }
1467
1468                 left = len;
1469                 for (n = 0; n < num_pages; n++) {
1470                         size_t plen = min_t(size_t, left, PAGE_SIZE);
1471                         ret = copy_page_from_iter(pages[n], 0, plen, from);
1472                         if (ret != plen) {
1473                                 ret = -EFAULT;
1474                                 break;
1475                         }
1476                         left -= ret;
1477                 }
1478
1479                 if (ret < 0) {
1480                         ceph_release_page_vector(pages, num_pages);
1481                         goto out;
1482                 }
1483
1484                 req->r_inode = inode;
1485
1486                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1487                                                 false, true);
1488
1489                 req->r_mtime = mtime;
1490                 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1491                 if (!ret)
1492                         ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1493
1494                 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1495                                           req->r_end_latency, len, ret);
1496 out:
1497                 ceph_osdc_put_request(req);
1498                 if (ret != 0) {
1499                         ceph_set_error_write(ci);
1500                         break;
1501                 }
1502
1503                 ceph_clear_error_write(ci);
1504                 pos += len;
1505                 written += len;
1506                 if (pos > i_size_read(inode)) {
1507                         check_caps = ceph_inode_set_size(inode, pos);
1508                         if (check_caps)
1509                                 ceph_check_caps(ceph_inode(inode),
1510                                                 CHECK_CAPS_AUTHONLY,
1511                                                 NULL);
1512                 }
1513
1514         }
1515
1516         if (ret != -EOLDSNAPC && written > 0) {
1517                 ret = written;
1518                 iocb->ki_pos = pos;
1519         }
1520         return ret;
1521 }
1522
1523 /*
1524  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1525  * Atomically grab references, so that those bits are not released
1526  * back to the MDS mid-read.
1527  *
1528  * Hmm, the sync read case isn't actually async... should it be?
1529  */
1530 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1531 {
1532         struct file *filp = iocb->ki_filp;
1533         struct ceph_file_info *fi = filp->private_data;
1534         size_t len = iov_iter_count(to);
1535         struct inode *inode = file_inode(filp);
1536         struct ceph_inode_info *ci = ceph_inode(inode);
1537         bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1538         ssize_t ret;
1539         int want, got = 0;
1540         int retry_op = 0, read = 0;
1541
1542 again:
1543         dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1544              inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1545
1546         if (ceph_inode_is_shutdown(inode))
1547                 return -ESTALE;
1548
1549         if (direct_lock)
1550                 ceph_start_io_direct(inode);
1551         else
1552                 ceph_start_io_read(inode);
1553
1554         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1555                 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1556         else
1557                 want = CEPH_CAP_FILE_CACHE;
1558         ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1559         if (ret < 0) {
1560                 if (iocb->ki_flags & IOCB_DIRECT)
1561                         ceph_end_io_direct(inode);
1562                 else
1563                         ceph_end_io_read(inode);
1564                 return ret;
1565         }
1566
1567         if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1568             (iocb->ki_flags & IOCB_DIRECT) ||
1569             (fi->flags & CEPH_F_SYNC)) {
1570
1571                 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1572                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1573                      ceph_cap_string(got));
1574
1575                 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1576                         if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1577                                 ret = ceph_direct_read_write(iocb, to,
1578                                                              NULL, NULL);
1579                                 if (ret >= 0 && ret < len)
1580                                         retry_op = CHECK_EOF;
1581                         } else {
1582                                 ret = ceph_sync_read(iocb, to, &retry_op);
1583                         }
1584                 } else {
1585                         retry_op = READ_INLINE;
1586                 }
1587         } else {
1588                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1589                 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1590                      inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1591                      ceph_cap_string(got));
1592                 ceph_add_rw_context(fi, &rw_ctx);
1593                 ret = generic_file_read_iter(iocb, to);
1594                 ceph_del_rw_context(fi, &rw_ctx);
1595         }
1596
1597         dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1598              inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1599         ceph_put_cap_refs(ci, got);
1600
1601         if (direct_lock)
1602                 ceph_end_io_direct(inode);
1603         else
1604                 ceph_end_io_read(inode);
1605
1606         if (retry_op > HAVE_RETRIED && ret >= 0) {
1607                 int statret;
1608                 struct page *page = NULL;
1609                 loff_t i_size;
1610                 if (retry_op == READ_INLINE) {
1611                         page = __page_cache_alloc(GFP_KERNEL);
1612                         if (!page)
1613                                 return -ENOMEM;
1614                 }
1615
1616                 statret = __ceph_do_getattr(inode, page,
1617                                             CEPH_STAT_CAP_INLINE_DATA, !!page);
1618                 if (statret < 0) {
1619                         if (page)
1620                                 __free_page(page);
1621                         if (statret == -ENODATA) {
1622                                 BUG_ON(retry_op != READ_INLINE);
1623                                 goto again;
1624                         }
1625                         return statret;
1626                 }
1627
1628                 i_size = i_size_read(inode);
1629                 if (retry_op == READ_INLINE) {
1630                         BUG_ON(ret > 0 || read > 0);
1631                         if (iocb->ki_pos < i_size &&
1632                             iocb->ki_pos < PAGE_SIZE) {
1633                                 loff_t end = min_t(loff_t, i_size,
1634                                                    iocb->ki_pos + len);
1635                                 end = min_t(loff_t, end, PAGE_SIZE);
1636                                 if (statret < end)
1637                                         zero_user_segment(page, statret, end);
1638                                 ret = copy_page_to_iter(page,
1639                                                 iocb->ki_pos & ~PAGE_MASK,
1640                                                 end - iocb->ki_pos, to);
1641                                 iocb->ki_pos += ret;
1642                                 read += ret;
1643                         }
1644                         if (iocb->ki_pos < i_size && read < len) {
1645                                 size_t zlen = min_t(size_t, len - read,
1646                                                     i_size - iocb->ki_pos);
1647                                 ret = iov_iter_zero(zlen, to);
1648                                 iocb->ki_pos += ret;
1649                                 read += ret;
1650                         }
1651                         __free_pages(page, 0);
1652                         return read;
1653                 }
1654
1655                 /* hit EOF or hole? */
1656                 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1657                     ret < len) {
1658                         dout("sync_read hit hole, ppos %lld < size %lld"
1659                              ", reading more\n", iocb->ki_pos, i_size);
1660
1661                         read += ret;
1662                         len -= ret;
1663                         retry_op = HAVE_RETRIED;
1664                         goto again;
1665                 }
1666         }
1667
1668         if (ret >= 0)
1669                 ret += read;
1670
1671         return ret;
1672 }
1673
1674 /*
1675  * Take cap references to avoid releasing caps to MDS mid-write.
1676  *
1677  * If we are synchronous, and write with an old snap context, the OSD
1678  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1679  * dropping our cap refs and allowing the pending snap to logically
1680  * complete _before_ this write occurs.
1681  *
1682  * If we are near ENOSPC, write synchronously.
1683  */
1684 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1685 {
1686         struct file *file = iocb->ki_filp;
1687         struct ceph_file_info *fi = file->private_data;
1688         struct inode *inode = file_inode(file);
1689         struct ceph_inode_info *ci = ceph_inode(inode);
1690         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1691         struct ceph_osd_client *osdc = &fsc->client->osdc;
1692         struct ceph_cap_flush *prealloc_cf;
1693         ssize_t count, written = 0;
1694         int err, want, got;
1695         bool direct_lock = false;
1696         u32 map_flags;
1697         u64 pool_flags;
1698         loff_t pos;
1699         loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1700
1701         if (ceph_inode_is_shutdown(inode))
1702                 return -ESTALE;
1703
1704         if (ceph_snap(inode) != CEPH_NOSNAP)
1705                 return -EROFS;
1706
1707         prealloc_cf = ceph_alloc_cap_flush();
1708         if (!prealloc_cf)
1709                 return -ENOMEM;
1710
1711         if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1712                 direct_lock = true;
1713
1714 retry_snap:
1715         if (direct_lock)
1716                 ceph_start_io_direct(inode);
1717         else
1718                 ceph_start_io_write(inode);
1719
1720         /* We can write back this queue in page reclaim */
1721         current->backing_dev_info = inode_to_bdi(inode);
1722
1723         if (iocb->ki_flags & IOCB_APPEND) {
1724                 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1725                 if (err < 0)
1726                         goto out;
1727         }
1728
1729         err = generic_write_checks(iocb, from);
1730         if (err <= 0)
1731                 goto out;
1732
1733         pos = iocb->ki_pos;
1734         if (unlikely(pos >= limit)) {
1735                 err = -EFBIG;
1736                 goto out;
1737         } else {
1738                 iov_iter_truncate(from, limit - pos);
1739         }
1740
1741         count = iov_iter_count(from);
1742         if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1743                 err = -EDQUOT;
1744                 goto out;
1745         }
1746
1747         down_read(&osdc->lock);
1748         map_flags = osdc->osdmap->flags;
1749         pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1750         up_read(&osdc->lock);
1751         if ((map_flags & CEPH_OSDMAP_FULL) ||
1752             (pool_flags & CEPH_POOL_FLAG_FULL)) {
1753                 err = -ENOSPC;
1754                 goto out;
1755         }
1756
1757         err = file_remove_privs(file);
1758         if (err)
1759                 goto out;
1760
1761         if (ci->i_inline_version != CEPH_INLINE_NONE) {
1762                 err = ceph_uninline_data(file, NULL);
1763                 if (err < 0)
1764                         goto out;
1765         }
1766
1767         dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1768              inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1769         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1770                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1771         else
1772                 want = CEPH_CAP_FILE_BUFFER;
1773         got = 0;
1774         err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1775         if (err < 0)
1776                 goto out;
1777
1778         err = file_update_time(file);
1779         if (err)
1780                 goto out_caps;
1781
1782         inode_inc_iversion_raw(inode);
1783
1784         dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1785              inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1786
1787         if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1788             (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1789             (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1790                 struct ceph_snap_context *snapc;
1791                 struct iov_iter data;
1792
1793                 spin_lock(&ci->i_ceph_lock);
1794                 if (__ceph_have_pending_cap_snap(ci)) {
1795                         struct ceph_cap_snap *capsnap =
1796                                         list_last_entry(&ci->i_cap_snaps,
1797                                                         struct ceph_cap_snap,
1798                                                         ci_item);
1799                         snapc = ceph_get_snap_context(capsnap->context);
1800                 } else {
1801                         BUG_ON(!ci->i_head_snapc);
1802                         snapc = ceph_get_snap_context(ci->i_head_snapc);
1803                 }
1804                 spin_unlock(&ci->i_ceph_lock);
1805
1806                 /* we might need to revert back to that point */
1807                 data = *from;
1808                 if (iocb->ki_flags & IOCB_DIRECT)
1809                         written = ceph_direct_read_write(iocb, &data, snapc,
1810                                                          &prealloc_cf);
1811                 else
1812                         written = ceph_sync_write(iocb, &data, pos, snapc);
1813                 if (direct_lock)
1814                         ceph_end_io_direct(inode);
1815                 else
1816                         ceph_end_io_write(inode);
1817                 if (written > 0)
1818                         iov_iter_advance(from, written);
1819                 ceph_put_snap_context(snapc);
1820         } else {
1821                 /*
1822                  * No need to acquire the i_truncate_mutex. Because
1823                  * the MDS revokes Fwb caps before sending truncate
1824                  * message to us. We can't get Fwb cap while there
1825                  * are pending vmtruncate. So write and vmtruncate
1826                  * can not run at the same time
1827                  */
1828                 written = generic_perform_write(file, from, pos);
1829                 if (likely(written >= 0))
1830                         iocb->ki_pos = pos + written;
1831                 ceph_end_io_write(inode);
1832         }
1833
1834         if (written >= 0) {
1835                 int dirty;
1836
1837                 spin_lock(&ci->i_ceph_lock);
1838                 ci->i_inline_version = CEPH_INLINE_NONE;
1839                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1840                                                &prealloc_cf);
1841                 spin_unlock(&ci->i_ceph_lock);
1842                 if (dirty)
1843                         __mark_inode_dirty(inode, dirty);
1844                 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1845                         ceph_check_caps(ci, 0, NULL);
1846         }
1847
1848         dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1849              inode, ceph_vinop(inode), pos, (unsigned)count,
1850              ceph_cap_string(got));
1851         ceph_put_cap_refs(ci, got);
1852
1853         if (written == -EOLDSNAPC) {
1854                 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1855                      inode, ceph_vinop(inode), pos, (unsigned)count);
1856                 goto retry_snap;
1857         }
1858
1859         if (written >= 0) {
1860                 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1861                     (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1862                         iocb->ki_flags |= IOCB_DSYNC;
1863                 written = generic_write_sync(iocb, written);
1864         }
1865
1866         goto out_unlocked;
1867 out_caps:
1868         ceph_put_cap_refs(ci, got);
1869 out:
1870         if (direct_lock)
1871                 ceph_end_io_direct(inode);
1872         else
1873                 ceph_end_io_write(inode);
1874 out_unlocked:
1875         ceph_free_cap_flush(prealloc_cf);
1876         current->backing_dev_info = NULL;
1877         return written ? written : err;
1878 }
1879
1880 /*
1881  * llseek.  be sure to verify file size on SEEK_END.
1882  */
1883 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1884 {
1885         struct inode *inode = file->f_mapping->host;
1886         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1887         loff_t i_size;
1888         loff_t ret;
1889
1890         inode_lock(inode);
1891
1892         if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1893                 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1894                 if (ret < 0)
1895                         goto out;
1896         }
1897
1898         i_size = i_size_read(inode);
1899         switch (whence) {
1900         case SEEK_END:
1901                 offset += i_size;
1902                 break;
1903         case SEEK_CUR:
1904                 /*
1905                  * Here we special-case the lseek(fd, 0, SEEK_CUR)
1906                  * position-querying operation.  Avoid rewriting the "same"
1907                  * f_pos value back to the file because a concurrent read(),
1908                  * write() or lseek() might have altered it
1909                  */
1910                 if (offset == 0) {
1911                         ret = file->f_pos;
1912                         goto out;
1913                 }
1914                 offset += file->f_pos;
1915                 break;
1916         case SEEK_DATA:
1917                 if (offset < 0 || offset >= i_size) {
1918                         ret = -ENXIO;
1919                         goto out;
1920                 }
1921                 break;
1922         case SEEK_HOLE:
1923                 if (offset < 0 || offset >= i_size) {
1924                         ret = -ENXIO;
1925                         goto out;
1926                 }
1927                 offset = i_size;
1928                 break;
1929         }
1930
1931         ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1932
1933 out:
1934         inode_unlock(inode);
1935         return ret;
1936 }
1937
1938 static inline void ceph_zero_partial_page(
1939         struct inode *inode, loff_t offset, unsigned size)
1940 {
1941         struct page *page;
1942         pgoff_t index = offset >> PAGE_SHIFT;
1943
1944         page = find_lock_page(inode->i_mapping, index);
1945         if (page) {
1946                 wait_on_page_writeback(page);
1947                 zero_user(page, offset & (PAGE_SIZE - 1), size);
1948                 unlock_page(page);
1949                 put_page(page);
1950         }
1951 }
1952
1953 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1954                                       loff_t length)
1955 {
1956         loff_t nearly = round_up(offset, PAGE_SIZE);
1957         if (offset < nearly) {
1958                 loff_t size = nearly - offset;
1959                 if (length < size)
1960                         size = length;
1961                 ceph_zero_partial_page(inode, offset, size);
1962                 offset += size;
1963                 length -= size;
1964         }
1965         if (length >= PAGE_SIZE) {
1966                 loff_t size = round_down(length, PAGE_SIZE);
1967                 truncate_pagecache_range(inode, offset, offset + size - 1);
1968                 offset += size;
1969                 length -= size;
1970         }
1971         if (length)
1972                 ceph_zero_partial_page(inode, offset, length);
1973 }
1974
1975 static int ceph_zero_partial_object(struct inode *inode,
1976                                     loff_t offset, loff_t *length)
1977 {
1978         struct ceph_inode_info *ci = ceph_inode(inode);
1979         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1980         struct ceph_osd_request *req;
1981         int ret = 0;
1982         loff_t zero = 0;
1983         int op;
1984
1985         if (!length) {
1986                 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1987                 length = &zero;
1988         } else {
1989                 op = CEPH_OSD_OP_ZERO;
1990         }
1991
1992         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1993                                         ceph_vino(inode),
1994                                         offset, length,
1995                                         0, 1, op,
1996                                         CEPH_OSD_FLAG_WRITE,
1997                                         NULL, 0, 0, false);
1998         if (IS_ERR(req)) {
1999                 ret = PTR_ERR(req);
2000                 goto out;
2001         }
2002
2003         req->r_mtime = inode->i_mtime;
2004         ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
2005         if (!ret) {
2006                 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2007                 if (ret == -ENOENT)
2008                         ret = 0;
2009         }
2010         ceph_osdc_put_request(req);
2011
2012 out:
2013         return ret;
2014 }
2015
2016 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2017 {
2018         int ret = 0;
2019         struct ceph_inode_info *ci = ceph_inode(inode);
2020         s32 stripe_unit = ci->i_layout.stripe_unit;
2021         s32 stripe_count = ci->i_layout.stripe_count;
2022         s32 object_size = ci->i_layout.object_size;
2023         u64 object_set_size = object_size * stripe_count;
2024         u64 nearly, t;
2025
2026         /* round offset up to next period boundary */
2027         nearly = offset + object_set_size - 1;
2028         t = nearly;
2029         nearly -= do_div(t, object_set_size);
2030
2031         while (length && offset < nearly) {
2032                 loff_t size = length;
2033                 ret = ceph_zero_partial_object(inode, offset, &size);
2034                 if (ret < 0)
2035                         return ret;
2036                 offset += size;
2037                 length -= size;
2038         }
2039         while (length >= object_set_size) {
2040                 int i;
2041                 loff_t pos = offset;
2042                 for (i = 0; i < stripe_count; ++i) {
2043                         ret = ceph_zero_partial_object(inode, pos, NULL);
2044                         if (ret < 0)
2045                                 return ret;
2046                         pos += stripe_unit;
2047                 }
2048                 offset += object_set_size;
2049                 length -= object_set_size;
2050         }
2051         while (length) {
2052                 loff_t size = length;
2053                 ret = ceph_zero_partial_object(inode, offset, &size);
2054                 if (ret < 0)
2055                         return ret;
2056                 offset += size;
2057                 length -= size;
2058         }
2059         return ret;
2060 }
2061
2062 static long ceph_fallocate(struct file *file, int mode,
2063                                 loff_t offset, loff_t length)
2064 {
2065         struct ceph_file_info *fi = file->private_data;
2066         struct inode *inode = file_inode(file);
2067         struct ceph_inode_info *ci = ceph_inode(inode);
2068         struct ceph_cap_flush *prealloc_cf;
2069         int want, got = 0;
2070         int dirty;
2071         int ret = 0;
2072         loff_t endoff = 0;
2073         loff_t size;
2074
2075         if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2076                 return -EOPNOTSUPP;
2077
2078         if (!S_ISREG(inode->i_mode))
2079                 return -EOPNOTSUPP;
2080
2081         prealloc_cf = ceph_alloc_cap_flush();
2082         if (!prealloc_cf)
2083                 return -ENOMEM;
2084
2085         inode_lock(inode);
2086
2087         if (ceph_snap(inode) != CEPH_NOSNAP) {
2088                 ret = -EROFS;
2089                 goto unlock;
2090         }
2091
2092         if (ci->i_inline_version != CEPH_INLINE_NONE) {
2093                 ret = ceph_uninline_data(file, NULL);
2094                 if (ret < 0)
2095                         goto unlock;
2096         }
2097
2098         size = i_size_read(inode);
2099
2100         /* Are we punching a hole beyond EOF? */
2101         if (offset >= size)
2102                 goto unlock;
2103         if ((offset + length) > size)
2104                 length = size - offset;
2105
2106         if (fi->fmode & CEPH_FILE_MODE_LAZY)
2107                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2108         else
2109                 want = CEPH_CAP_FILE_BUFFER;
2110
2111         ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2112         if (ret < 0)
2113                 goto unlock;
2114
2115         filemap_invalidate_lock(inode->i_mapping);
2116         ceph_zero_pagecache_range(inode, offset, length);
2117         ret = ceph_zero_objects(inode, offset, length);
2118
2119         if (!ret) {
2120                 spin_lock(&ci->i_ceph_lock);
2121                 ci->i_inline_version = CEPH_INLINE_NONE;
2122                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2123                                                &prealloc_cf);
2124                 spin_unlock(&ci->i_ceph_lock);
2125                 if (dirty)
2126                         __mark_inode_dirty(inode, dirty);
2127         }
2128         filemap_invalidate_unlock(inode->i_mapping);
2129
2130         ceph_put_cap_refs(ci, got);
2131 unlock:
2132         inode_unlock(inode);
2133         ceph_free_cap_flush(prealloc_cf);
2134         return ret;
2135 }
2136
2137 /*
2138  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2139  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2140  * this fails; zero is returned on success.
2141  */
2142 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2143                           struct file *dst_filp,
2144                           loff_t dst_endoff, int *dst_got)
2145 {
2146         int ret = 0;
2147         bool retrying = false;
2148
2149 retry_caps:
2150         ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2151                             dst_endoff, dst_got);
2152         if (ret < 0)
2153                 return ret;
2154
2155         /*
2156          * Since we're already holding the FILE_WR capability for the dst file,
2157          * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2158          * retry dance instead to try to get both capabilities.
2159          */
2160         ret = ceph_try_get_caps(file_inode(src_filp),
2161                                 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2162                                 false, src_got);
2163         if (ret <= 0) {
2164                 /* Start by dropping dst_ci caps and getting src_ci caps */
2165                 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2166                 if (retrying) {
2167                         if (!ret)
2168                                 /* ceph_try_get_caps masks EAGAIN */
2169                                 ret = -EAGAIN;
2170                         return ret;
2171                 }
2172                 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2173                                     CEPH_CAP_FILE_SHARED, -1, src_got);
2174                 if (ret < 0)
2175                         return ret;
2176                 /*... drop src_ci caps too, and retry */
2177                 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2178                 retrying = true;
2179                 goto retry_caps;
2180         }
2181         return ret;
2182 }
2183
2184 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2185                            struct ceph_inode_info *dst_ci, int dst_got)
2186 {
2187         ceph_put_cap_refs(src_ci, src_got);
2188         ceph_put_cap_refs(dst_ci, dst_got);
2189 }
2190
2191 /*
2192  * This function does several size-related checks, returning an error if:
2193  *  - source file is smaller than off+len
2194  *  - destination file size is not OK (inode_newsize_ok())
2195  *  - max bytes quotas is exceeded
2196  */
2197 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2198                            loff_t src_off, loff_t dst_off, size_t len)
2199 {
2200         loff_t size, endoff;
2201
2202         size = i_size_read(src_inode);
2203         /*
2204          * Don't copy beyond source file EOF.  Instead of simply setting length
2205          * to (size - src_off), just drop to VFS default implementation, as the
2206          * local i_size may be stale due to other clients writing to the source
2207          * inode.
2208          */
2209         if (src_off + len > size) {
2210                 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2211                      src_off, len, size);
2212                 return -EOPNOTSUPP;
2213         }
2214         size = i_size_read(dst_inode);
2215
2216         endoff = dst_off + len;
2217         if (inode_newsize_ok(dst_inode, endoff))
2218                 return -EOPNOTSUPP;
2219
2220         if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2221                 return -EDQUOT;
2222
2223         return 0;
2224 }
2225
2226 static struct ceph_osd_request *
2227 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2228                             u64 src_snapid,
2229                             struct ceph_object_id *src_oid,
2230                             struct ceph_object_locator *src_oloc,
2231                             struct ceph_object_id *dst_oid,
2232                             struct ceph_object_locator *dst_oloc,
2233                             u32 truncate_seq, u64 truncate_size)
2234 {
2235         struct ceph_osd_request *req;
2236         int ret;
2237         u32 src_fadvise_flags =
2238                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2239                 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2240         u32 dst_fadvise_flags =
2241                 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2242                 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2243
2244         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2245         if (!req)
2246                 return ERR_PTR(-ENOMEM);
2247
2248         req->r_flags = CEPH_OSD_FLAG_WRITE;
2249
2250         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2251         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2252
2253         ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2254                                         src_oid, src_oloc,
2255                                         src_fadvise_flags,
2256                                         dst_fadvise_flags,
2257                                         truncate_seq,
2258                                         truncate_size,
2259                                         CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2260         if (ret)
2261                 goto out;
2262
2263         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2264         if (ret)
2265                 goto out;
2266
2267         return req;
2268
2269 out:
2270         ceph_osdc_put_request(req);
2271         return ERR_PTR(ret);
2272 }
2273
2274 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2275                                     struct ceph_inode_info *dst_ci, u64 *dst_off,
2276                                     struct ceph_fs_client *fsc,
2277                                     size_t len, unsigned int flags)
2278 {
2279         struct ceph_object_locator src_oloc, dst_oloc;
2280         struct ceph_object_id src_oid, dst_oid;
2281         struct ceph_osd_client *osdc;
2282         struct ceph_osd_request *req;
2283         size_t bytes = 0;
2284         u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2285         u32 src_objlen, dst_objlen;
2286         u32 object_size = src_ci->i_layout.object_size;
2287         int ret;
2288
2289         src_oloc.pool = src_ci->i_layout.pool_id;
2290         src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2291         dst_oloc.pool = dst_ci->i_layout.pool_id;
2292         dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2293         osdc = &fsc->client->osdc;
2294
2295         while (len >= object_size) {
2296                 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2297                                               object_size, &src_objnum,
2298                                               &src_objoff, &src_objlen);
2299                 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2300                                               object_size, &dst_objnum,
2301                                               &dst_objoff, &dst_objlen);
2302                 ceph_oid_init(&src_oid);
2303                 ceph_oid_printf(&src_oid, "%llx.%08llx",
2304                                 src_ci->i_vino.ino, src_objnum);
2305                 ceph_oid_init(&dst_oid);
2306                 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2307                                 dst_ci->i_vino.ino, dst_objnum);
2308                 /* Do an object remote copy */
2309                 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2310                                                   &src_oid, &src_oloc,
2311                                                   &dst_oid, &dst_oloc,
2312                                                   dst_ci->i_truncate_seq,
2313                                                   dst_ci->i_truncate_size);
2314                 if (IS_ERR(req))
2315                         ret = PTR_ERR(req);
2316                 else {
2317                         ceph_osdc_start_request(osdc, req, false);
2318                         ret = ceph_osdc_wait_request(osdc, req);
2319                         ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2320                                                      req->r_start_latency,
2321                                                      req->r_end_latency,
2322                                                      object_size, ret);
2323                         ceph_osdc_put_request(req);
2324                 }
2325                 if (ret) {
2326                         if (ret == -EOPNOTSUPP) {
2327                                 fsc->have_copy_from2 = false;
2328                                 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2329                         }
2330                         dout("ceph_osdc_copy_from returned %d\n", ret);
2331                         if (!bytes)
2332                                 bytes = ret;
2333                         goto out;
2334                 }
2335                 len -= object_size;
2336                 bytes += object_size;
2337                 *src_off += object_size;
2338                 *dst_off += object_size;
2339         }
2340
2341 out:
2342         ceph_oloc_destroy(&src_oloc);
2343         ceph_oloc_destroy(&dst_oloc);
2344         return bytes;
2345 }
2346
2347 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2348                                       struct file *dst_file, loff_t dst_off,
2349                                       size_t len, unsigned int flags)
2350 {
2351         struct inode *src_inode = file_inode(src_file);
2352         struct inode *dst_inode = file_inode(dst_file);
2353         struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2354         struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2355         struct ceph_cap_flush *prealloc_cf;
2356         struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2357         loff_t size;
2358         ssize_t ret = -EIO, bytes;
2359         u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2360         u32 src_objlen, dst_objlen;
2361         int src_got = 0, dst_got = 0, err, dirty;
2362
2363         if (src_inode->i_sb != dst_inode->i_sb) {
2364                 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2365
2366                 if (ceph_fsid_compare(&src_fsc->client->fsid,
2367                                       &dst_fsc->client->fsid)) {
2368                         dout("Copying files across clusters: src: %pU dst: %pU\n",
2369                              &src_fsc->client->fsid, &dst_fsc->client->fsid);
2370                         return -EXDEV;
2371                 }
2372         }
2373         if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2374                 return -EROFS;
2375
2376         /*
2377          * Some of the checks below will return -EOPNOTSUPP, which will force a
2378          * fallback to the default VFS copy_file_range implementation.  This is
2379          * desirable in several cases (for ex, the 'len' is smaller than the
2380          * size of the objects, or in cases where that would be more
2381          * efficient).
2382          */
2383
2384         if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2385                 return -EOPNOTSUPP;
2386
2387         if (!src_fsc->have_copy_from2)
2388                 return -EOPNOTSUPP;
2389
2390         /*
2391          * Striped file layouts require that we copy partial objects, but the
2392          * OSD copy-from operation only supports full-object copies.  Limit
2393          * this to non-striped file layouts for now.
2394          */
2395         if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2396             (src_ci->i_layout.stripe_count != 1) ||
2397             (dst_ci->i_layout.stripe_count != 1) ||
2398             (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2399                 dout("Invalid src/dst files layout\n");
2400                 return -EOPNOTSUPP;
2401         }
2402
2403         if (len < src_ci->i_layout.object_size)
2404                 return -EOPNOTSUPP; /* no remote copy will be done */
2405
2406         prealloc_cf = ceph_alloc_cap_flush();
2407         if (!prealloc_cf)
2408                 return -ENOMEM;
2409
2410         /* Start by sync'ing the source and destination files */
2411         ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2412         if (ret < 0) {
2413                 dout("failed to write src file (%zd)\n", ret);
2414                 goto out;
2415         }
2416         ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2417         if (ret < 0) {
2418                 dout("failed to write dst file (%zd)\n", ret);
2419                 goto out;
2420         }
2421
2422         /*
2423          * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2424          * clients may have dirty data in their caches.  And OSDs know nothing
2425          * about caps, so they can't safely do the remote object copies.
2426          */
2427         err = get_rd_wr_caps(src_file, &src_got,
2428                              dst_file, (dst_off + len), &dst_got);
2429         if (err < 0) {
2430                 dout("get_rd_wr_caps returned %d\n", err);
2431                 ret = -EOPNOTSUPP;
2432                 goto out;
2433         }
2434
2435         ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2436         if (ret < 0)
2437                 goto out_caps;
2438
2439         /* Drop dst file cached pages */
2440         ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2441                                             dst_off >> PAGE_SHIFT,
2442                                             (dst_off + len) >> PAGE_SHIFT);
2443         if (ret < 0) {
2444                 dout("Failed to invalidate inode pages (%zd)\n", ret);
2445                 ret = 0; /* XXX */
2446         }
2447         ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2448                                       src_ci->i_layout.object_size,
2449                                       &src_objnum, &src_objoff, &src_objlen);
2450         ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2451                                       dst_ci->i_layout.object_size,
2452                                       &dst_objnum, &dst_objoff, &dst_objlen);
2453         /* object-level offsets need to the same */
2454         if (src_objoff != dst_objoff) {
2455                 ret = -EOPNOTSUPP;
2456                 goto out_caps;
2457         }
2458
2459         /*
2460          * Do a manual copy if the object offset isn't object aligned.
2461          * 'src_objlen' contains the bytes left until the end of the object,
2462          * starting at the src_off
2463          */
2464         if (src_objoff) {
2465                 dout("Initial partial copy of %u bytes\n", src_objlen);
2466
2467                 /*
2468                  * we need to temporarily drop all caps as we'll be calling
2469                  * {read,write}_iter, which will get caps again.
2470                  */
2471                 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2472                 ret = do_splice_direct(src_file, &src_off, dst_file,
2473                                        &dst_off, src_objlen, flags);
2474                 /* Abort on short copies or on error */
2475                 if (ret < src_objlen) {
2476                         dout("Failed partial copy (%zd)\n", ret);
2477                         goto out;
2478                 }
2479                 len -= ret;
2480                 err = get_rd_wr_caps(src_file, &src_got,
2481                                      dst_file, (dst_off + len), &dst_got);
2482                 if (err < 0)
2483                         goto out;
2484                 err = is_file_size_ok(src_inode, dst_inode,
2485                                       src_off, dst_off, len);
2486                 if (err < 0)
2487                         goto out_caps;
2488         }
2489
2490         size = i_size_read(dst_inode);
2491         bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2492                                      src_fsc, len, flags);
2493         if (bytes <= 0) {
2494                 if (!ret)
2495                         ret = bytes;
2496                 goto out_caps;
2497         }
2498         dout("Copied %zu bytes out of %zu\n", bytes, len);
2499         len -= bytes;
2500         ret += bytes;
2501
2502         file_update_time(dst_file);
2503         inode_inc_iversion_raw(dst_inode);
2504
2505         if (dst_off > size) {
2506                 /* Let the MDS know about dst file size change */
2507                 if (ceph_inode_set_size(dst_inode, dst_off) ||
2508                     ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2509                         ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2510         }
2511         /* Mark Fw dirty */
2512         spin_lock(&dst_ci->i_ceph_lock);
2513         dst_ci->i_inline_version = CEPH_INLINE_NONE;
2514         dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2515         spin_unlock(&dst_ci->i_ceph_lock);
2516         if (dirty)
2517                 __mark_inode_dirty(dst_inode, dirty);
2518
2519 out_caps:
2520         put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2521
2522         /*
2523          * Do the final manual copy if we still have some bytes left, unless
2524          * there were errors in remote object copies (len >= object_size).
2525          */
2526         if (len && (len < src_ci->i_layout.object_size)) {
2527                 dout("Final partial copy of %zu bytes\n", len);
2528                 bytes = do_splice_direct(src_file, &src_off, dst_file,
2529                                          &dst_off, len, flags);
2530                 if (bytes > 0)
2531                         ret += bytes;
2532                 else
2533                         dout("Failed partial copy (%zd)\n", bytes);
2534         }
2535
2536 out:
2537         ceph_free_cap_flush(prealloc_cf);
2538
2539         return ret;
2540 }
2541
2542 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2543                                     struct file *dst_file, loff_t dst_off,
2544                                     size_t len, unsigned int flags)
2545 {
2546         ssize_t ret;
2547
2548         ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2549                                      len, flags);
2550
2551         if (ret == -EOPNOTSUPP || ret == -EXDEV)
2552                 ret = generic_copy_file_range(src_file, src_off, dst_file,
2553                                               dst_off, len, flags);
2554         return ret;
2555 }
2556
2557 const struct file_operations ceph_file_fops = {
2558         .open = ceph_open,
2559         .release = ceph_release,
2560         .llseek = ceph_llseek,
2561         .read_iter = ceph_read_iter,
2562         .write_iter = ceph_write_iter,
2563         .mmap = ceph_mmap,
2564         .fsync = ceph_fsync,
2565         .lock = ceph_lock,
2566         .setlease = simple_nosetlease,
2567         .flock = ceph_flock,
2568         .splice_read = generic_file_splice_read,
2569         .splice_write = iter_file_splice_write,
2570         .unlocked_ioctl = ceph_ioctl,
2571         .compat_ioctl = compat_ptr_ioctl,
2572         .fallocate      = ceph_fallocate,
2573         .copy_file_range = ceph_copy_file_range,
2574 };
This page took 0.181863 seconds and 4 git commands to generate.