1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
19 #include "mds_client.h"
21 #include <linux/ceph/decode.h>
24 * Ceph inode operations
26 * Implement basic inode helpers (get, alloc) and inode ops (getattr,
27 * setattr, etc.), xattr helpers, and helpers for assimilating
28 * metadata returned by the MDS into our cache.
30 * Also define helpers for doing asynchronous writeback, invalidation,
31 * and truncation for the benefit of those who can't afford to block
32 * (typically because they are in the message handler path).
35 static const struct inode_operations ceph_symlink_iops;
37 static void ceph_inode_work(struct work_struct *work);
40 * find or create an inode, given the ceph ino number
42 static int ceph_set_ino_cb(struct inode *inode, void *data)
44 struct ceph_inode_info *ci = ceph_inode(inode);
45 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
47 ci->i_vino = *(struct ceph_vino *)data;
48 inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
49 inode_set_iversion_raw(inode, 0);
50 percpu_counter_inc(&mdsc->metric.total_inodes);
55 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
59 inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
60 ceph_set_ino_cb, &vino);
62 return ERR_PTR(-ENOMEM);
64 dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
65 ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
70 * get/constuct snapdir inode for a given directory
72 struct inode *ceph_get_snapdir(struct inode *parent)
74 struct ceph_vino vino = {
75 .ino = ceph_ino(parent),
78 struct inode *inode = ceph_get_inode(parent->i_sb, vino);
79 struct ceph_inode_info *ci = ceph_inode(inode);
81 BUG_ON(!S_ISDIR(parent->i_mode));
84 inode->i_mode = parent->i_mode;
85 inode->i_uid = parent->i_uid;
86 inode->i_gid = parent->i_gid;
87 inode->i_mtime = parent->i_mtime;
88 inode->i_ctime = parent->i_ctime;
89 inode->i_atime = parent->i_atime;
90 inode->i_op = &ceph_snapdir_iops;
91 inode->i_fop = &ceph_snapdir_fops;
92 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
94 ci->i_btime = ceph_inode(parent)->i_btime;
96 if (inode->i_state & I_NEW)
97 unlock_new_inode(inode);
102 const struct inode_operations ceph_file_iops = {
103 .permission = ceph_permission,
104 .setattr = ceph_setattr,
105 .getattr = ceph_getattr,
106 .listxattr = ceph_listxattr,
107 .get_acl = ceph_get_acl,
108 .set_acl = ceph_set_acl,
113 * We use a 'frag tree' to keep track of the MDS's directory fragments
114 * for a given inode (usually there is just a single fragment). We
115 * need to know when a child frag is delegated to a new MDS, or when
116 * it is flagged as replicated, so we can direct our requests
121 * find/create a frag in the tree
123 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
127 struct rb_node *parent = NULL;
128 struct ceph_inode_frag *frag;
131 p = &ci->i_fragtree.rb_node;
134 frag = rb_entry(parent, struct ceph_inode_frag, node);
135 c = ceph_frag_compare(f, frag->frag);
144 frag = kmalloc(sizeof(*frag), GFP_NOFS);
146 return ERR_PTR(-ENOMEM);
153 rb_link_node(&frag->node, parent, p);
154 rb_insert_color(&frag->node, &ci->i_fragtree);
156 dout("get_or_create_frag added %llx.%llx frag %x\n",
157 ceph_vinop(&ci->vfs_inode), f);
162 * find a specific frag @f
164 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
166 struct rb_node *n = ci->i_fragtree.rb_node;
169 struct ceph_inode_frag *frag =
170 rb_entry(n, struct ceph_inode_frag, node);
171 int c = ceph_frag_compare(f, frag->frag);
183 * Choose frag containing the given value @v. If @pfrag is
184 * specified, copy the frag delegation info to the caller if
187 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
188 struct ceph_inode_frag *pfrag, int *found)
190 u32 t = ceph_frag_make(0, 0);
191 struct ceph_inode_frag *frag;
199 WARN_ON(!ceph_frag_contains_value(t, v));
200 frag = __ceph_find_frag(ci, t);
202 break; /* t is a leaf */
203 if (frag->split_by == 0) {
205 memcpy(pfrag, frag, sizeof(*pfrag));
212 nway = 1 << frag->split_by;
213 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
214 frag->split_by, nway);
215 for (i = 0; i < nway; i++) {
216 n = ceph_frag_make_child(t, frag->split_by, i);
217 if (ceph_frag_contains_value(n, v)) {
224 dout("choose_frag(%x) = %x\n", v, t);
229 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
230 struct ceph_inode_frag *pfrag, int *found)
233 mutex_lock(&ci->i_fragtree_mutex);
234 ret = __ceph_choose_frag(ci, v, pfrag, found);
235 mutex_unlock(&ci->i_fragtree_mutex);
240 * Process dirfrag (delegation) info from the mds. Include leaf
241 * fragment in tree ONLY if ndist > 0. Otherwise, only
242 * branches/splits are included in i_fragtree)
244 static int ceph_fill_dirfrag(struct inode *inode,
245 struct ceph_mds_reply_dirfrag *dirinfo)
247 struct ceph_inode_info *ci = ceph_inode(inode);
248 struct ceph_inode_frag *frag;
249 u32 id = le32_to_cpu(dirinfo->frag);
250 int mds = le32_to_cpu(dirinfo->auth);
251 int ndist = le32_to_cpu(dirinfo->ndist);
256 spin_lock(&ci->i_ceph_lock);
258 diri_auth = ci->i_auth_cap->mds;
259 spin_unlock(&ci->i_ceph_lock);
261 if (mds == -1) /* CDIR_AUTH_PARENT */
264 mutex_lock(&ci->i_fragtree_mutex);
265 if (ndist == 0 && mds == diri_auth) {
266 /* no delegation info needed. */
267 frag = __ceph_find_frag(ci, id);
270 if (frag->split_by == 0) {
271 /* tree leaf, remove */
272 dout("fill_dirfrag removed %llx.%llx frag %x"
273 " (no ref)\n", ceph_vinop(inode), id);
274 rb_erase(&frag->node, &ci->i_fragtree);
277 /* tree branch, keep and clear */
278 dout("fill_dirfrag cleared %llx.%llx frag %x"
279 " referral\n", ceph_vinop(inode), id);
287 /* find/add this frag to store mds delegation info */
288 frag = __get_or_create_frag(ci, id);
290 /* this is not the end of the world; we can continue
291 with bad/inaccurate delegation info */
292 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
293 ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
299 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
300 for (i = 0; i < frag->ndist; i++)
301 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
302 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
303 ceph_vinop(inode), frag->frag, frag->ndist);
306 mutex_unlock(&ci->i_fragtree_mutex);
310 static int frag_tree_split_cmp(const void *l, const void *r)
312 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
313 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
314 return ceph_frag_compare(le32_to_cpu(ls->frag),
315 le32_to_cpu(rs->frag));
318 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
321 return f == ceph_frag_make(0, 0);
322 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
324 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
327 static int ceph_fill_fragtree(struct inode *inode,
328 struct ceph_frag_tree_head *fragtree,
329 struct ceph_mds_reply_dirfrag *dirinfo)
331 struct ceph_inode_info *ci = ceph_inode(inode);
332 struct ceph_inode_frag *frag, *prev_frag = NULL;
333 struct rb_node *rb_node;
334 unsigned i, split_by, nsplits;
338 mutex_lock(&ci->i_fragtree_mutex);
339 nsplits = le32_to_cpu(fragtree->nsplits);
340 if (nsplits != ci->i_fragtree_nsplits) {
342 } else if (nsplits) {
343 i = prandom_u32() % nsplits;
344 id = le32_to_cpu(fragtree->splits[i].frag);
345 if (!__ceph_find_frag(ci, id))
347 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
348 rb_node = rb_first(&ci->i_fragtree);
349 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
350 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
353 if (!update && dirinfo) {
354 id = le32_to_cpu(dirinfo->frag);
355 if (id != __ceph_choose_frag(ci, id, NULL, NULL))
362 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
363 frag_tree_split_cmp, NULL);
366 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
367 rb_node = rb_first(&ci->i_fragtree);
368 for (i = 0; i < nsplits; i++) {
369 id = le32_to_cpu(fragtree->splits[i].frag);
370 split_by = le32_to_cpu(fragtree->splits[i].by);
371 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
372 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
373 "frag %x split by %d\n", ceph_vinop(inode),
374 i, nsplits, id, split_by);
379 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
380 if (ceph_frag_compare(frag->frag, id) >= 0) {
381 if (frag->frag != id)
384 rb_node = rb_next(rb_node);
387 rb_node = rb_next(rb_node);
388 /* delete stale split/leaf node */
389 if (frag->split_by > 0 ||
390 !is_frag_child(frag->frag, prev_frag)) {
391 rb_erase(&frag->node, &ci->i_fragtree);
392 if (frag->split_by > 0)
393 ci->i_fragtree_nsplits--;
399 frag = __get_or_create_frag(ci, id);
403 if (frag->split_by == 0)
404 ci->i_fragtree_nsplits++;
405 frag->split_by = split_by;
406 dout(" frag %x split by %d\n", frag->frag, frag->split_by);
410 frag = rb_entry(rb_node, struct ceph_inode_frag, node);
411 rb_node = rb_next(rb_node);
412 /* delete stale split/leaf node */
413 if (frag->split_by > 0 ||
414 !is_frag_child(frag->frag, prev_frag)) {
415 rb_erase(&frag->node, &ci->i_fragtree);
416 if (frag->split_by > 0)
417 ci->i_fragtree_nsplits--;
422 mutex_unlock(&ci->i_fragtree_mutex);
427 * initialize a newly allocated inode.
429 struct inode *ceph_alloc_inode(struct super_block *sb)
431 struct ceph_inode_info *ci;
434 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
438 dout("alloc_inode %p\n", &ci->vfs_inode);
440 spin_lock_init(&ci->i_ceph_lock);
443 ci->i_inline_version = 0;
444 ci->i_time_warp_seq = 0;
445 ci->i_ceph_flags = 0;
446 atomic64_set(&ci->i_ordered_count, 1);
447 atomic64_set(&ci->i_release_count, 1);
448 atomic64_set(&ci->i_complete_seq[0], 0);
449 atomic64_set(&ci->i_complete_seq[1], 0);
450 ci->i_symlink = NULL;
455 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
456 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
457 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
459 ci->i_fragtree = RB_ROOT;
460 mutex_init(&ci->i_fragtree_mutex);
462 ci->i_xattrs.blob = NULL;
463 ci->i_xattrs.prealloc_blob = NULL;
464 ci->i_xattrs.dirty = false;
465 ci->i_xattrs.index = RB_ROOT;
466 ci->i_xattrs.count = 0;
467 ci->i_xattrs.names_size = 0;
468 ci->i_xattrs.vals_size = 0;
469 ci->i_xattrs.version = 0;
470 ci->i_xattrs.index_version = 0;
472 ci->i_caps = RB_ROOT;
473 ci->i_auth_cap = NULL;
474 ci->i_dirty_caps = 0;
475 ci->i_flushing_caps = 0;
476 INIT_LIST_HEAD(&ci->i_dirty_item);
477 INIT_LIST_HEAD(&ci->i_flushing_item);
478 ci->i_prealloc_cap_flush = NULL;
479 INIT_LIST_HEAD(&ci->i_cap_flush_list);
480 init_waitqueue_head(&ci->i_cap_wq);
481 ci->i_hold_caps_max = 0;
482 INIT_LIST_HEAD(&ci->i_cap_delay_list);
483 INIT_LIST_HEAD(&ci->i_cap_snaps);
484 ci->i_head_snapc = NULL;
487 ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
488 for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
489 ci->i_nr_by_mode[i] = 0;
491 mutex_init(&ci->i_truncate_mutex);
492 ci->i_truncate_seq = 0;
493 ci->i_truncate_size = 0;
494 ci->i_truncate_pending = 0;
497 ci->i_reported_size = 0;
498 ci->i_wanted_max_size = 0;
499 ci->i_requested_max_size = 0;
503 ci->i_rdcache_ref = 0;
507 ci->i_wrbuffer_ref = 0;
508 ci->i_wrbuffer_ref_head = 0;
509 atomic_set(&ci->i_filelock_ref, 0);
510 atomic_set(&ci->i_shared_gen, 1);
511 ci->i_rdcache_gen = 0;
512 ci->i_rdcache_revoking = 0;
514 INIT_LIST_HEAD(&ci->i_unsafe_dirops);
515 INIT_LIST_HEAD(&ci->i_unsafe_iops);
516 spin_lock_init(&ci->i_unsafe_lock);
518 ci->i_snap_realm = NULL;
519 INIT_LIST_HEAD(&ci->i_snap_realm_item);
520 INIT_LIST_HEAD(&ci->i_snap_flush_item);
522 INIT_WORK(&ci->i_work, ceph_inode_work);
524 memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
526 ceph_fscache_inode_init(ci);
530 return &ci->vfs_inode;
533 void ceph_free_inode(struct inode *inode)
535 struct ceph_inode_info *ci = ceph_inode(inode);
537 kfree(ci->i_symlink);
538 kmem_cache_free(ceph_inode_cachep, ci);
541 void ceph_evict_inode(struct inode *inode)
543 struct ceph_inode_info *ci = ceph_inode(inode);
544 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
545 struct ceph_inode_frag *frag;
548 dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
550 percpu_counter_dec(&mdsc->metric.total_inodes);
552 truncate_inode_pages_final(&inode->i_data);
555 ceph_fscache_unregister_inode_cookie(ci);
557 __ceph_remove_caps(ci);
559 if (__ceph_has_any_quota(ci))
560 ceph_adjust_quota_realms_count(inode, false);
563 * we may still have a snap_realm reference if there are stray
564 * caps in i_snap_caps.
566 if (ci->i_snap_realm) {
567 if (ceph_snap(inode) == CEPH_NOSNAP) {
568 struct ceph_snap_realm *realm = ci->i_snap_realm;
569 dout(" dropping residual ref to snap realm %p\n",
571 spin_lock(&realm->inodes_with_caps_lock);
572 list_del_init(&ci->i_snap_realm_item);
573 ci->i_snap_realm = NULL;
574 if (realm->ino == ci->i_vino.ino)
576 spin_unlock(&realm->inodes_with_caps_lock);
577 ceph_put_snap_realm(mdsc, realm);
579 ceph_put_snapid_map(mdsc, ci->i_snapid_map);
580 ci->i_snap_realm = NULL;
584 while ((n = rb_first(&ci->i_fragtree)) != NULL) {
585 frag = rb_entry(n, struct ceph_inode_frag, node);
586 rb_erase(n, &ci->i_fragtree);
589 ci->i_fragtree_nsplits = 0;
591 __ceph_destroy_xattrs(ci);
592 if (ci->i_xattrs.blob)
593 ceph_buffer_put(ci->i_xattrs.blob);
594 if (ci->i_xattrs.prealloc_blob)
595 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
597 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
598 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
601 static inline blkcnt_t calc_inode_blocks(u64 size)
603 return (size + (1<<9) - 1) >> 9;
607 * Helpers to fill in size, ctime, mtime, and atime. We have to be
608 * careful because either the client or MDS may have more up to date
609 * info, depending on which capabilities are held, and whether
610 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
611 * and size are monotonically increasing, except when utimes() or
612 * truncate() increments the corresponding _seq values.)
614 int ceph_fill_file_size(struct inode *inode, int issued,
615 u32 truncate_seq, u64 truncate_size, u64 size)
617 struct ceph_inode_info *ci = ceph_inode(inode);
620 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
621 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
622 dout("size %lld -> %llu\n", inode->i_size, size);
623 if (size > 0 && S_ISDIR(inode->i_mode)) {
624 pr_err("fill_file_size non-zero size for directory\n");
627 i_size_write(inode, size);
628 inode->i_blocks = calc_inode_blocks(size);
629 ci->i_reported_size = size;
630 if (truncate_seq != ci->i_truncate_seq) {
631 dout("truncate_seq %u -> %u\n",
632 ci->i_truncate_seq, truncate_seq);
633 ci->i_truncate_seq = truncate_seq;
635 /* the MDS should have revoked these caps */
636 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
639 CEPH_CAP_FILE_LAZYIO));
641 * If we hold relevant caps, or in the case where we're
642 * not the only client referencing this file and we
643 * don't hold those caps, then we need to check whether
644 * the file is either opened or mmaped
646 if ((issued & (CEPH_CAP_FILE_CACHE|
647 CEPH_CAP_FILE_BUFFER)) ||
648 mapping_mapped(inode->i_mapping) ||
649 __ceph_is_file_opened(ci)) {
650 ci->i_truncate_pending++;
655 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
656 ci->i_truncate_size != truncate_size) {
657 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
659 ci->i_truncate_size = truncate_size;
663 ceph_fscache_invalidate(inode);
668 void ceph_fill_file_time(struct inode *inode, int issued,
669 u64 time_warp_seq, struct timespec64 *ctime,
670 struct timespec64 *mtime, struct timespec64 *atime)
672 struct ceph_inode_info *ci = ceph_inode(inode);
675 if (issued & (CEPH_CAP_FILE_EXCL|
677 CEPH_CAP_FILE_BUFFER|
679 CEPH_CAP_XATTR_EXCL)) {
680 if (ci->i_version == 0 ||
681 timespec64_compare(ctime, &inode->i_ctime) > 0) {
682 dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
683 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
684 ctime->tv_sec, ctime->tv_nsec);
685 inode->i_ctime = *ctime;
687 if (ci->i_version == 0 ||
688 ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
689 /* the MDS did a utimes() */
690 dout("mtime %lld.%09ld -> %lld.%09ld "
692 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
693 mtime->tv_sec, mtime->tv_nsec,
694 ci->i_time_warp_seq, (int)time_warp_seq);
696 inode->i_mtime = *mtime;
697 inode->i_atime = *atime;
698 ci->i_time_warp_seq = time_warp_seq;
699 } else if (time_warp_seq == ci->i_time_warp_seq) {
700 /* nobody did utimes(); take the max */
701 if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
702 dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
703 inode->i_mtime.tv_sec,
704 inode->i_mtime.tv_nsec,
705 mtime->tv_sec, mtime->tv_nsec);
706 inode->i_mtime = *mtime;
708 if (timespec64_compare(atime, &inode->i_atime) > 0) {
709 dout("atime %lld.%09ld -> %lld.%09ld inc\n",
710 inode->i_atime.tv_sec,
711 inode->i_atime.tv_nsec,
712 atime->tv_sec, atime->tv_nsec);
713 inode->i_atime = *atime;
715 } else if (issued & CEPH_CAP_FILE_EXCL) {
716 /* we did a utimes(); ignore mds values */
721 /* we have no write|excl caps; whatever the MDS says is true */
722 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
723 inode->i_ctime = *ctime;
724 inode->i_mtime = *mtime;
725 inode->i_atime = *atime;
726 ci->i_time_warp_seq = time_warp_seq;
731 if (warn) /* time_warp_seq shouldn't go backwards */
732 dout("%p mds time_warp_seq %llu < %u\n",
733 inode, time_warp_seq, ci->i_time_warp_seq);
737 * Populate an inode based on info from mds. May be called on new or
740 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
741 struct ceph_mds_reply_info_in *iinfo,
742 struct ceph_mds_reply_dirfrag *dirinfo,
743 struct ceph_mds_session *session, int cap_fmode,
744 struct ceph_cap_reservation *caps_reservation)
746 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
747 struct ceph_mds_reply_inode *info = iinfo->in;
748 struct ceph_inode_info *ci = ceph_inode(inode);
749 int issued, new_issued, info_caps;
750 struct timespec64 mtime, atime, ctime;
751 struct ceph_buffer *xattr_blob = NULL;
752 struct ceph_buffer *old_blob = NULL;
753 struct ceph_string *pool_ns = NULL;
754 struct ceph_cap *new_cap = NULL;
757 bool queue_trunc = false;
758 bool new_version = false;
759 bool fill_inline = false;
761 dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
762 inode, ceph_vinop(inode), le64_to_cpu(info->version),
765 info_caps = le32_to_cpu(info->cap.caps);
767 /* prealloc new cap struct */
768 if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
769 new_cap = ceph_get_cap(mdsc, caps_reservation);
775 * prealloc xattr data, if it looks like we'll need it. only
776 * if len > 4 (meaning there are actually xattrs; the first 4
777 * bytes are the xattr count).
779 if (iinfo->xattr_len > 4) {
780 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
782 pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
786 if (iinfo->pool_ns_len > 0)
787 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
790 if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
791 ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
793 spin_lock(&ci->i_ceph_lock);
796 * provided version will be odd if inode value is projected,
797 * even if stable. skip the update if we have newer stable
798 * info (ours>=theirs, e.g. due to racing mds replies), unless
799 * we are getting projected (unstable) info (in which case the
800 * version is odd, and we want ours>theirs).
806 if (ci->i_version == 0 ||
807 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
808 le64_to_cpu(info->version) > (ci->i_version & ~1)))
811 /* Update change_attribute */
812 inode_set_max_iversion_raw(inode, iinfo->change_attr);
814 __ceph_caps_issued(ci, &issued);
815 issued |= __ceph_caps_dirty(ci);
816 new_issued = ~issued & info_caps;
819 inode->i_rdev = le32_to_cpu(info->rdev);
820 /* directories have fl_stripe_unit set to zero */
821 if (le32_to_cpu(info->layout.fl_stripe_unit))
823 fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
825 inode->i_blkbits = CEPH_BLOCK_SHIFT;
827 __ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
829 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
830 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
831 inode->i_mode = le32_to_cpu(info->mode);
832 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
833 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
834 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
835 from_kuid(&init_user_ns, inode->i_uid),
836 from_kgid(&init_user_ns, inode->i_gid));
837 ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
838 ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
841 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
842 (issued & CEPH_CAP_LINK_EXCL) == 0)
843 set_nlink(inode, le32_to_cpu(info->nlink));
845 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
846 /* be careful with mtime, atime, size */
847 ceph_decode_timespec64(&atime, &info->atime);
848 ceph_decode_timespec64(&mtime, &info->mtime);
849 ceph_decode_timespec64(&ctime, &info->ctime);
850 ceph_fill_file_time(inode, issued,
851 le32_to_cpu(info->time_warp_seq),
852 &ctime, &mtime, &atime);
855 if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
856 ci->i_files = le64_to_cpu(info->files);
857 ci->i_subdirs = le64_to_cpu(info->subdirs);
861 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
862 s64 old_pool = ci->i_layout.pool_id;
863 struct ceph_string *old_ns;
865 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
866 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
867 lockdep_is_held(&ci->i_ceph_lock));
868 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
870 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
871 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
875 queue_trunc = ceph_fill_file_size(inode, issued,
876 le32_to_cpu(info->truncate_seq),
877 le64_to_cpu(info->truncate_size),
878 le64_to_cpu(info->size));
879 /* only update max_size on auth cap */
880 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
881 ci->i_max_size != le64_to_cpu(info->max_size)) {
882 dout("max_size %lld -> %llu\n", ci->i_max_size,
883 le64_to_cpu(info->max_size));
884 ci->i_max_size = le64_to_cpu(info->max_size);
888 /* layout and rstat are not tracked by capability, update them if
889 * the inode info is from auth mds */
890 if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
891 if (S_ISDIR(inode->i_mode)) {
892 ci->i_dir_layout = iinfo->dir_layout;
893 ci->i_rbytes = le64_to_cpu(info->rbytes);
894 ci->i_rfiles = le64_to_cpu(info->rfiles);
895 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
896 ci->i_dir_pin = iinfo->dir_pin;
897 ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
902 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
903 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
904 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
905 if (ci->i_xattrs.blob)
906 old_blob = ci->i_xattrs.blob;
907 ci->i_xattrs.blob = xattr_blob;
909 memcpy(ci->i_xattrs.blob->vec.iov_base,
910 iinfo->xattr_data, iinfo->xattr_len);
911 ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
912 ceph_forget_all_cached_acls(inode);
913 ceph_security_invalidate_secctx(inode);
917 /* finally update i_version */
918 if (le64_to_cpu(info->version) > ci->i_version)
919 ci->i_version = le64_to_cpu(info->version);
921 inode->i_mapping->a_ops = &ceph_aops;
923 switch (inode->i_mode & S_IFMT) {
928 inode->i_blkbits = PAGE_SHIFT;
929 init_special_inode(inode, inode->i_mode, inode->i_rdev);
930 inode->i_op = &ceph_file_iops;
933 inode->i_op = &ceph_file_iops;
934 inode->i_fop = &ceph_file_fops;
937 inode->i_op = &ceph_symlink_iops;
938 if (!ci->i_symlink) {
939 u32 symlen = iinfo->symlink_len;
942 spin_unlock(&ci->i_ceph_lock);
944 if (symlen != i_size_read(inode)) {
945 pr_err("%s %llx.%llx BAD symlink "
946 "size %lld\n", __func__,
949 i_size_write(inode, symlen);
950 inode->i_blocks = calc_inode_blocks(symlen);
954 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
958 spin_lock(&ci->i_ceph_lock);
962 kfree(sym); /* lost a race */
964 inode->i_link = ci->i_symlink;
967 inode->i_op = &ceph_dir_iops;
968 inode->i_fop = &ceph_dir_fops;
971 pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
972 ceph_vinop(inode), inode->i_mode);
975 /* were we issued a capability? */
977 if (ceph_snap(inode) == CEPH_NOSNAP) {
978 ceph_add_cap(inode, session,
979 le64_to_cpu(info->cap.cap_id),
981 le32_to_cpu(info->cap.wanted),
982 le32_to_cpu(info->cap.seq),
983 le32_to_cpu(info->cap.mseq),
984 le64_to_cpu(info->cap.realm),
985 info->cap.flags, &new_cap);
987 /* set dir completion flag? */
988 if (S_ISDIR(inode->i_mode) &&
989 ci->i_files == 0 && ci->i_subdirs == 0 &&
990 (info_caps & CEPH_CAP_FILE_SHARED) &&
991 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
992 !__ceph_dir_is_complete(ci)) {
993 dout(" marking %p complete (empty)\n", inode);
994 i_size_write(inode, 0);
995 __ceph_dir_set_complete(ci,
996 atomic64_read(&ci->i_release_count),
997 atomic64_read(&ci->i_ordered_count));
1002 dout(" %p got snap_caps %s\n", inode,
1003 ceph_cap_string(info_caps));
1004 ci->i_snap_caps |= info_caps;
1008 if (iinfo->inline_version > 0 &&
1009 iinfo->inline_version >= ci->i_inline_version) {
1010 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1011 ci->i_inline_version = iinfo->inline_version;
1012 if (ci->i_inline_version != CEPH_INLINE_NONE &&
1013 (locked_page || (info_caps & cache_caps)))
1017 if (cap_fmode >= 0) {
1019 pr_warn("mds issued no caps on %llx.%llx\n",
1021 __ceph_touch_fmode(ci, mdsc, cap_fmode);
1024 spin_unlock(&ci->i_ceph_lock);
1027 ceph_fill_inline_data(inode, locked_page,
1028 iinfo->inline_data, iinfo->inline_len);
1031 wake_up_all(&ci->i_cap_wq);
1033 /* queue truncate if we saw i_size decrease */
1035 ceph_queue_vmtruncate(inode);
1037 /* populate frag tree */
1038 if (S_ISDIR(inode->i_mode))
1039 ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1041 /* update delegation info? */
1043 ceph_fill_dirfrag(inode, dirinfo);
1048 ceph_put_cap(mdsc, new_cap);
1049 ceph_buffer_put(old_blob);
1050 ceph_buffer_put(xattr_blob);
1051 ceph_put_string(pool_ns);
1056 * caller should hold session s_mutex and dentry->d_lock.
1058 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1059 struct ceph_mds_reply_lease *lease,
1060 struct ceph_mds_session *session,
1061 unsigned long from_time,
1062 struct ceph_mds_session **old_lease_session)
1064 struct ceph_dentry_info *di = ceph_dentry(dentry);
1065 unsigned mask = le16_to_cpu(lease->mask);
1066 long unsigned duration = le32_to_cpu(lease->duration_ms);
1067 long unsigned ttl = from_time + (duration * HZ) / 1000;
1068 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1070 dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1071 dentry, duration, ttl);
1073 /* only track leases on regular dentries */
1074 if (ceph_snap(dir) != CEPH_NOSNAP)
1077 if (mask & CEPH_LEASE_PRIMARY_LINK)
1078 di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1080 di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1082 di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1083 if (!(mask & CEPH_LEASE_VALID)) {
1084 __ceph_dentry_dir_lease_touch(di);
1088 if (di->lease_gen == session->s_cap_gen &&
1089 time_before(ttl, di->time))
1090 return; /* we already have a newer lease. */
1092 if (di->lease_session && di->lease_session != session) {
1093 *old_lease_session = di->lease_session;
1094 di->lease_session = NULL;
1097 if (!di->lease_session)
1098 di->lease_session = ceph_get_mds_session(session);
1099 di->lease_gen = session->s_cap_gen;
1100 di->lease_seq = le32_to_cpu(lease->seq);
1101 di->lease_renew_after = half_ttl;
1102 di->lease_renew_from = 0;
1105 __ceph_dentry_lease_touch(di);
1108 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1109 struct ceph_mds_reply_lease *lease,
1110 struct ceph_mds_session *session,
1111 unsigned long from_time)
1113 struct ceph_mds_session *old_lease_session = NULL;
1114 spin_lock(&dentry->d_lock);
1115 __update_dentry_lease(dir, dentry, lease, session, from_time,
1116 &old_lease_session);
1117 spin_unlock(&dentry->d_lock);
1118 if (old_lease_session)
1119 ceph_put_mds_session(old_lease_session);
1123 * update dentry lease without having parent inode locked
1125 static void update_dentry_lease_careful(struct dentry *dentry,
1126 struct ceph_mds_reply_lease *lease,
1127 struct ceph_mds_session *session,
1128 unsigned long from_time,
1129 char *dname, u32 dname_len,
1130 struct ceph_vino *pdvino,
1131 struct ceph_vino *ptvino)
1135 struct ceph_mds_session *old_lease_session = NULL;
1137 spin_lock(&dentry->d_lock);
1138 /* make sure dentry's name matches target */
1139 if (dentry->d_name.len != dname_len ||
1140 memcmp(dentry->d_name.name, dname, dname_len))
1143 dir = d_inode(dentry->d_parent);
1144 /* make sure parent matches dvino */
1145 if (!ceph_ino_compare(dir, pdvino))
1148 /* make sure dentry's inode matches target. NULL ptvino means that
1149 * we expect a negative dentry */
1151 if (d_really_is_negative(dentry))
1153 if (!ceph_ino_compare(d_inode(dentry), ptvino))
1156 if (d_really_is_positive(dentry))
1160 __update_dentry_lease(dir, dentry, lease, session,
1161 from_time, &old_lease_session);
1163 spin_unlock(&dentry->d_lock);
1164 if (old_lease_session)
1165 ceph_put_mds_session(old_lease_session);
1169 * splice a dentry to an inode.
1170 * caller must hold directory i_mutex for this to be safe.
1172 static int splice_dentry(struct dentry **pdn, struct inode *in)
1174 struct dentry *dn = *pdn;
1175 struct dentry *realdn;
1177 BUG_ON(d_inode(dn));
1179 if (S_ISDIR(in->i_mode)) {
1180 /* If inode is directory, d_splice_alias() below will remove
1181 * 'realdn' from its origin parent. We need to ensure that
1182 * origin parent's readdir cache will not reference 'realdn'
1184 realdn = d_find_any_alias(in);
1186 struct ceph_dentry_info *di = ceph_dentry(realdn);
1187 spin_lock(&realdn->d_lock);
1189 realdn->d_op->d_prune(realdn);
1192 di->lease_shared_gen = 0;
1195 spin_unlock(&realdn->d_lock);
1200 /* dn must be unhashed */
1201 if (!d_unhashed(dn))
1203 realdn = d_splice_alias(in, dn);
1204 if (IS_ERR(realdn)) {
1205 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1206 PTR_ERR(realdn), dn, in, ceph_vinop(in));
1207 return PTR_ERR(realdn);
1211 dout("dn %p (%d) spliced with %p (%d) "
1212 "inode %p ino %llx.%llx\n",
1214 realdn, d_count(realdn),
1215 d_inode(realdn), ceph_vinop(d_inode(realdn)));
1219 BUG_ON(!ceph_dentry(dn));
1220 dout("dn %p attached to %p ino %llx.%llx\n",
1221 dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1227 * Incorporate results into the local cache. This is either just
1228 * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1231 * A reply may contain
1232 * a directory inode along with a dentry.
1233 * and/or a target inode
1235 * Called with snap_rwsem (read).
1237 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1239 struct ceph_mds_session *session = req->r_session;
1240 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1241 struct inode *in = NULL;
1242 struct ceph_vino tvino, dvino;
1243 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1246 dout("fill_trace %p is_dentry %d is_target %d\n", req,
1247 rinfo->head->is_dentry, rinfo->head->is_target);
1249 if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1250 dout("fill_trace reply is empty!\n");
1251 if (rinfo->head->result == 0 && req->r_parent)
1252 ceph_invalidate_dir_request(req);
1256 if (rinfo->head->is_dentry) {
1257 struct inode *dir = req->r_parent;
1260 err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1261 rinfo->dirfrag, session, -1,
1262 &req->r_caps_reservation);
1269 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1270 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1271 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1273 struct dentry *dn, *parent;
1275 BUG_ON(!rinfo->head->is_target);
1276 BUG_ON(req->r_dentry);
1278 parent = d_find_any_alias(dir);
1281 dname.name = rinfo->dname;
1282 dname.len = rinfo->dname_len;
1283 dname.hash = full_name_hash(parent, dname.name, dname.len);
1284 tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1285 tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1287 dn = d_lookup(parent, &dname);
1288 dout("d_lookup on parent=%p name=%.*s got %p\n",
1289 parent, dname.len, dname.name, dn);
1292 dn = d_alloc(parent, &dname);
1293 dout("d_alloc %p '%.*s' = %p\n", parent,
1294 dname.len, dname.name, dn);
1301 } else if (d_really_is_positive(dn) &&
1302 (ceph_ino(d_inode(dn)) != tvino.ino ||
1303 ceph_snap(d_inode(dn)) != tvino.snap)) {
1304 dout(" dn %p points to wrong inode %p\n",
1306 ceph_dir_clear_ordered(dir);
1317 if (rinfo->head->is_target) {
1318 tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1319 tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1321 in = ceph_get_inode(sb, tvino);
1327 err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1329 (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1330 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1331 rinfo->head->result == 0) ? req->r_fmode : -1,
1332 &req->r_caps_reservation);
1334 pr_err("ceph_fill_inode badness %p %llx.%llx\n",
1335 in, ceph_vinop(in));
1336 if (in->i_state & I_NEW)
1337 discard_new_inode(in);
1340 req->r_target_inode = in;
1341 if (in->i_state & I_NEW)
1342 unlock_new_inode(in);
1346 * ignore null lease/binding on snapdir ENOENT, or else we
1347 * will have trouble splicing in the virtual snapdir later
1349 if (rinfo->head->is_dentry &&
1350 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1351 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1352 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1353 fsc->mount_options->snapdir_name,
1354 req->r_dentry->d_name.len))) {
1356 * lookup link rename : null -> possibly existing inode
1357 * mknod symlink mkdir : null -> new inode
1358 * unlink : linked -> null
1360 struct inode *dir = req->r_parent;
1361 struct dentry *dn = req->r_dentry;
1362 bool have_dir_cap, have_lease;
1366 BUG_ON(d_inode(dn->d_parent) != dir);
1368 dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1369 dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1371 BUG_ON(ceph_ino(dir) != dvino.ino);
1372 BUG_ON(ceph_snap(dir) != dvino.snap);
1374 /* do we have a lease on the whole dir? */
1376 (le32_to_cpu(rinfo->diri.in->cap.caps) &
1377 CEPH_CAP_FILE_SHARED);
1379 /* do we have a dn lease? */
1380 have_lease = have_dir_cap ||
1381 le32_to_cpu(rinfo->dlease->duration_ms);
1383 dout("fill_trace no dentry lease or dir cap\n");
1386 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1387 struct inode *olddir = req->r_old_dentry_dir;
1390 dout(" src %p '%pd' dst %p '%pd'\n",
1394 dout("fill_trace doing d_move %p -> %p\n",
1395 req->r_old_dentry, dn);
1397 /* d_move screws up sibling dentries' offsets */
1398 ceph_dir_clear_ordered(dir);
1399 ceph_dir_clear_ordered(olddir);
1401 d_move(req->r_old_dentry, dn);
1402 dout(" src %p '%pd' dst %p '%pd'\n",
1407 /* ensure target dentry is invalidated, despite
1408 rehashing bug in vfs_rename_dir */
1409 ceph_invalidate_dentry_lease(dn);
1411 dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1412 ceph_dentry(req->r_old_dentry)->offset);
1414 /* swap r_dentry and r_old_dentry in case that
1415 * splice_dentry() gets called later. This is safe
1416 * because no other place will use them */
1417 req->r_dentry = req->r_old_dentry;
1418 req->r_old_dentry = dn;
1423 if (!rinfo->head->is_target) {
1424 dout("fill_trace null dentry\n");
1425 if (d_really_is_positive(dn)) {
1426 dout("d_delete %p\n", dn);
1427 ceph_dir_clear_ordered(dir);
1429 } else if (have_lease) {
1432 update_dentry_lease(dir, dn,
1433 rinfo->dlease, session,
1434 req->r_request_started);
1439 /* attach proper inode */
1440 if (d_really_is_negative(dn)) {
1441 ceph_dir_clear_ordered(dir);
1443 err = splice_dentry(&req->r_dentry, in);
1446 dn = req->r_dentry; /* may have spliced */
1447 } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1448 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1449 dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1456 update_dentry_lease(dir, dn,
1457 rinfo->dlease, session,
1458 req->r_request_started);
1460 dout(" final dn %p\n", dn);
1461 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1462 req->r_op == CEPH_MDS_OP_MKSNAP) &&
1463 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1464 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1465 struct inode *dir = req->r_parent;
1467 /* fill out a snapdir LOOKUPSNAP dentry */
1469 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1470 BUG_ON(!req->r_dentry);
1471 dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
1472 ceph_dir_clear_ordered(dir);
1474 err = splice_dentry(&req->r_dentry, in);
1477 } else if (rinfo->head->is_dentry && req->r_dentry) {
1478 /* parent inode is not locked, be carefull */
1479 struct ceph_vino *ptvino = NULL;
1480 dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1481 dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1482 if (rinfo->head->is_target) {
1483 tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1484 tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1487 update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1488 session, req->r_request_started,
1489 rinfo->dname, rinfo->dname_len,
1493 dout("fill_trace done err=%d\n", err);
1498 * Prepopulate our cache with readdir results, leases, etc.
1500 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1501 struct ceph_mds_session *session)
1503 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1506 for (i = 0; i < rinfo->dir_nr; i++) {
1507 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1508 struct ceph_vino vino;
1512 vino.ino = le64_to_cpu(rde->inode.in->ino);
1513 vino.snap = le64_to_cpu(rde->inode.in->snapid);
1515 in = ceph_get_inode(req->r_dentry->d_sb, vino);
1518 dout("new_inode badness got %d\n", err);
1521 rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1522 -1, &req->r_caps_reservation);
1524 pr_err("ceph_fill_inode badness on %p got %d\n",
1527 if (in->i_state & I_NEW) {
1529 discard_new_inode(in);
1531 } else if (in->i_state & I_NEW) {
1532 unlock_new_inode(in);
1535 /* avoid calling iput_final() in mds dispatch threads */
1536 ceph_async_iput(in);
1542 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1546 put_page(ctl->page);
1551 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1552 struct ceph_readdir_cache_control *ctl,
1553 struct ceph_mds_request *req)
1555 struct ceph_inode_info *ci = ceph_inode(dir);
1556 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1557 unsigned idx = ctl->index % nsize;
1558 pgoff_t pgoff = ctl->index / nsize;
1560 if (!ctl->page || pgoff != page_index(ctl->page)) {
1561 ceph_readdir_cache_release(ctl);
1563 ctl->page = grab_cache_page(&dir->i_data, pgoff);
1565 ctl->page = find_lock_page(&dir->i_data, pgoff);
1568 return idx == 0 ? -ENOMEM : 0;
1570 /* reading/filling the cache are serialized by
1571 * i_mutex, no need to use page lock */
1572 unlock_page(ctl->page);
1573 ctl->dentries = kmap(ctl->page);
1575 memset(ctl->dentries, 0, PAGE_SIZE);
1578 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1579 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1580 dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1581 ctl->dentries[idx] = dn;
1584 dout("disable readdir cache\n");
1590 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1591 struct ceph_mds_session *session)
1593 struct dentry *parent = req->r_dentry;
1594 struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1595 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1599 int err = 0, skipped = 0, ret, i;
1600 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1601 u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1604 struct ceph_readdir_cache_control cache_ctl = {};
1606 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1607 return readdir_prepopulate_inodes_only(req, session);
1609 if (rinfo->hash_order) {
1611 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1613 strlen(req->r_path2));
1614 last_hash = ceph_frag_value(last_hash);
1615 } else if (rinfo->offset_hash) {
1616 /* mds understands offset_hash */
1617 WARN_ON_ONCE(req->r_readdir_offset != 2);
1618 last_hash = le32_to_cpu(rhead->args.readdir.offset_hash);
1622 if (rinfo->dir_dir &&
1623 le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1624 dout("readdir_prepopulate got new frag %x -> %x\n",
1625 frag, le32_to_cpu(rinfo->dir_dir->frag));
1626 frag = le32_to_cpu(rinfo->dir_dir->frag);
1627 if (!rinfo->hash_order)
1628 req->r_readdir_offset = 2;
1631 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1632 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1633 rinfo->dir_nr, parent);
1635 dout("readdir_prepopulate %d items under dn %p\n",
1636 rinfo->dir_nr, parent);
1638 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1640 if (ceph_frag_is_leftmost(frag) &&
1641 req->r_readdir_offset == 2 &&
1642 !(rinfo->hash_order && last_hash)) {
1643 /* note dir version at start of readdir so we can
1644 * tell if any dentries get dropped */
1645 req->r_dir_release_cnt =
1646 atomic64_read(&ci->i_release_count);
1647 req->r_dir_ordered_cnt =
1648 atomic64_read(&ci->i_ordered_count);
1649 req->r_readdir_cache_idx = 0;
1653 cache_ctl.index = req->r_readdir_cache_idx;
1654 fpos_offset = req->r_readdir_offset;
1656 /* FIXME: release caps/leases if error occurs */
1657 for (i = 0; i < rinfo->dir_nr; i++) {
1658 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1659 struct ceph_vino tvino;
1661 dname.name = rde->name;
1662 dname.len = rde->name_len;
1663 dname.hash = full_name_hash(parent, dname.name, dname.len);
1665 tvino.ino = le64_to_cpu(rde->inode.in->ino);
1666 tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1668 if (rinfo->hash_order) {
1669 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1670 rde->name, rde->name_len);
1671 hash = ceph_frag_value(hash);
1672 if (hash != last_hash)
1675 rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1677 rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1681 dn = d_lookup(parent, &dname);
1682 dout("d_lookup on parent=%p name=%.*s got %p\n",
1683 parent, dname.len, dname.name, dn);
1686 dn = d_alloc(parent, &dname);
1687 dout("d_alloc %p '%.*s' = %p\n", parent,
1688 dname.len, dname.name, dn);
1690 dout("d_alloc badness\n");
1694 } else if (d_really_is_positive(dn) &&
1695 (ceph_ino(d_inode(dn)) != tvino.ino ||
1696 ceph_snap(d_inode(dn)) != tvino.snap)) {
1697 struct ceph_dentry_info *di = ceph_dentry(dn);
1698 dout(" dn %p points to wrong inode %p\n",
1701 spin_lock(&dn->d_lock);
1702 if (di->offset > 0 &&
1703 di->lease_shared_gen ==
1704 atomic_read(&ci->i_shared_gen)) {
1705 __ceph_dir_clear_ordered(ci);
1708 spin_unlock(&dn->d_lock);
1716 if (d_really_is_positive(dn)) {
1719 in = ceph_get_inode(parent->d_sb, tvino);
1721 dout("new_inode badness\n");
1729 ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1730 -1, &req->r_caps_reservation);
1732 pr_err("ceph_fill_inode badness on %p\n", in);
1733 if (d_really_is_negative(dn)) {
1734 /* avoid calling iput_final() in mds
1735 * dispatch threads */
1736 if (in->i_state & I_NEW) {
1738 discard_new_inode(in);
1740 ceph_async_iput(in);
1746 if (in->i_state & I_NEW)
1747 unlock_new_inode(in);
1749 if (d_really_is_negative(dn)) {
1750 if (ceph_security_xattr_deadlock(in)) {
1751 dout(" skip splicing dn %p to inode %p"
1752 " (security xattr deadlock)\n", dn, in);
1753 ceph_async_iput(in);
1758 err = splice_dentry(&dn, in);
1763 ceph_dentry(dn)->offset = rde->offset;
1765 update_dentry_lease(d_inode(parent), dn,
1766 rde->lease, req->r_session,
1767 req->r_request_started);
1769 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1770 ret = fill_readdir_cache(d_inode(parent), dn,
1779 if (err == 0 && skipped == 0) {
1780 set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
1781 req->r_readdir_cache_idx = cache_ctl.index;
1783 ceph_readdir_cache_release(&cache_ctl);
1784 dout("readdir_prepopulate done\n");
1788 bool ceph_inode_set_size(struct inode *inode, loff_t size)
1790 struct ceph_inode_info *ci = ceph_inode(inode);
1793 spin_lock(&ci->i_ceph_lock);
1794 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1795 i_size_write(inode, size);
1796 inode->i_blocks = calc_inode_blocks(size);
1798 ret = __ceph_should_report_size(ci);
1800 spin_unlock(&ci->i_ceph_lock);
1805 * Put reference to inode, but avoid calling iput_final() in current thread.
1806 * iput_final() may wait for reahahead pages. The wait can cause deadlock in
1809 void ceph_async_iput(struct inode *inode)
1814 if (atomic_add_unless(&inode->i_count, -1, 1))
1816 if (queue_work(ceph_inode_to_client(inode)->inode_wq,
1817 &ceph_inode(inode)->i_work))
1819 /* queue work failed, i_count must be at least 2 */
1824 * Write back inode data in a worker thread. (This can't be done
1825 * in the message handler context.)
1827 void ceph_queue_writeback(struct inode *inode)
1829 struct ceph_inode_info *ci = ceph_inode(inode);
1830 set_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask);
1833 if (queue_work(ceph_inode_to_client(inode)->inode_wq,
1835 dout("ceph_queue_writeback %p\n", inode);
1837 dout("ceph_queue_writeback %p already queued, mask=%lx\n",
1838 inode, ci->i_work_mask);
1844 * queue an async invalidation
1846 void ceph_queue_invalidate(struct inode *inode)
1848 struct ceph_inode_info *ci = ceph_inode(inode);
1849 set_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask);
1852 if (queue_work(ceph_inode_to_client(inode)->inode_wq,
1853 &ceph_inode(inode)->i_work)) {
1854 dout("ceph_queue_invalidate %p\n", inode);
1856 dout("ceph_queue_invalidate %p already queued, mask=%lx\n",
1857 inode, ci->i_work_mask);
1863 * Queue an async vmtruncate. If we fail to queue work, we will handle
1864 * the truncation the next time we call __ceph_do_pending_vmtruncate.
1866 void ceph_queue_vmtruncate(struct inode *inode)
1868 struct ceph_inode_info *ci = ceph_inode(inode);
1869 set_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask);
1872 if (queue_work(ceph_inode_to_client(inode)->inode_wq,
1874 dout("ceph_queue_vmtruncate %p\n", inode);
1876 dout("ceph_queue_vmtruncate %p already queued, mask=%lx\n",
1877 inode, ci->i_work_mask);
1882 static void ceph_do_invalidate_pages(struct inode *inode)
1884 struct ceph_inode_info *ci = ceph_inode(inode);
1885 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1889 mutex_lock(&ci->i_truncate_mutex);
1891 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1892 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1893 inode, ceph_ino(inode));
1894 mapping_set_error(inode->i_mapping, -EIO);
1895 truncate_pagecache(inode, 0);
1896 mutex_unlock(&ci->i_truncate_mutex);
1900 spin_lock(&ci->i_ceph_lock);
1901 dout("invalidate_pages %p gen %d revoking %d\n", inode,
1902 ci->i_rdcache_gen, ci->i_rdcache_revoking);
1903 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1904 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1906 spin_unlock(&ci->i_ceph_lock);
1907 mutex_unlock(&ci->i_truncate_mutex);
1910 orig_gen = ci->i_rdcache_gen;
1911 spin_unlock(&ci->i_ceph_lock);
1913 if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1914 pr_err("invalidate_pages %p fails\n", inode);
1917 spin_lock(&ci->i_ceph_lock);
1918 if (orig_gen == ci->i_rdcache_gen &&
1919 orig_gen == ci->i_rdcache_revoking) {
1920 dout("invalidate_pages %p gen %d successful\n", inode,
1922 ci->i_rdcache_revoking--;
1925 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1926 inode, orig_gen, ci->i_rdcache_gen,
1927 ci->i_rdcache_revoking);
1928 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1931 spin_unlock(&ci->i_ceph_lock);
1932 mutex_unlock(&ci->i_truncate_mutex);
1935 ceph_check_caps(ci, 0, NULL);
1939 * Make sure any pending truncation is applied before doing anything
1940 * that may depend on it.
1942 void __ceph_do_pending_vmtruncate(struct inode *inode)
1944 struct ceph_inode_info *ci = ceph_inode(inode);
1946 int wrbuffer_refs, finish = 0;
1948 mutex_lock(&ci->i_truncate_mutex);
1950 spin_lock(&ci->i_ceph_lock);
1951 if (ci->i_truncate_pending == 0) {
1952 dout("__do_pending_vmtruncate %p none pending\n", inode);
1953 spin_unlock(&ci->i_ceph_lock);
1954 mutex_unlock(&ci->i_truncate_mutex);
1959 * make sure any dirty snapped pages are flushed before we
1960 * possibly truncate them.. so write AND block!
1962 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1963 spin_unlock(&ci->i_ceph_lock);
1964 dout("__do_pending_vmtruncate %p flushing snaps first\n",
1966 filemap_write_and_wait_range(&inode->i_data, 0,
1967 inode->i_sb->s_maxbytes);
1971 /* there should be no reader or writer */
1972 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1974 to = ci->i_truncate_size;
1975 wrbuffer_refs = ci->i_wrbuffer_ref;
1976 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1977 ci->i_truncate_pending, to);
1978 spin_unlock(&ci->i_ceph_lock);
1980 truncate_pagecache(inode, to);
1982 spin_lock(&ci->i_ceph_lock);
1983 if (to == ci->i_truncate_size) {
1984 ci->i_truncate_pending = 0;
1987 spin_unlock(&ci->i_ceph_lock);
1991 mutex_unlock(&ci->i_truncate_mutex);
1993 if (wrbuffer_refs == 0)
1994 ceph_check_caps(ci, 0, NULL);
1996 wake_up_all(&ci->i_cap_wq);
1999 static void ceph_inode_work(struct work_struct *work)
2001 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2003 struct inode *inode = &ci->vfs_inode;
2005 if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2006 dout("writeback %p\n", inode);
2007 filemap_fdatawrite(&inode->i_data);
2009 if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2010 ceph_do_invalidate_pages(inode);
2012 if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2013 __ceph_do_pending_vmtruncate(inode);
2021 static const struct inode_operations ceph_symlink_iops = {
2022 .get_link = simple_get_link,
2023 .setattr = ceph_setattr,
2024 .getattr = ceph_getattr,
2025 .listxattr = ceph_listxattr,
2028 int __ceph_setattr(struct inode *inode, struct iattr *attr)
2030 struct ceph_inode_info *ci = ceph_inode(inode);
2031 unsigned int ia_valid = attr->ia_valid;
2032 struct ceph_mds_request *req;
2033 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2034 struct ceph_cap_flush *prealloc_cf;
2036 int release = 0, dirtied = 0;
2039 int inode_dirty_flags = 0;
2040 bool lock_snap_rwsem = false;
2042 prealloc_cf = ceph_alloc_cap_flush();
2046 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2049 ceph_free_cap_flush(prealloc_cf);
2050 return PTR_ERR(req);
2053 spin_lock(&ci->i_ceph_lock);
2054 issued = __ceph_caps_issued(ci, NULL);
2056 if (!ci->i_head_snapc &&
2057 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2058 lock_snap_rwsem = true;
2059 if (!down_read_trylock(&mdsc->snap_rwsem)) {
2060 spin_unlock(&ci->i_ceph_lock);
2061 down_read(&mdsc->snap_rwsem);
2062 spin_lock(&ci->i_ceph_lock);
2063 issued = __ceph_caps_issued(ci, NULL);
2067 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
2069 if (ia_valid & ATTR_UID) {
2070 dout("setattr %p uid %d -> %d\n", inode,
2071 from_kuid(&init_user_ns, inode->i_uid),
2072 from_kuid(&init_user_ns, attr->ia_uid));
2073 if (issued & CEPH_CAP_AUTH_EXCL) {
2074 inode->i_uid = attr->ia_uid;
2075 dirtied |= CEPH_CAP_AUTH_EXCL;
2076 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2077 !uid_eq(attr->ia_uid, inode->i_uid)) {
2078 req->r_args.setattr.uid = cpu_to_le32(
2079 from_kuid(&init_user_ns, attr->ia_uid));
2080 mask |= CEPH_SETATTR_UID;
2081 release |= CEPH_CAP_AUTH_SHARED;
2084 if (ia_valid & ATTR_GID) {
2085 dout("setattr %p gid %d -> %d\n", inode,
2086 from_kgid(&init_user_ns, inode->i_gid),
2087 from_kgid(&init_user_ns, attr->ia_gid));
2088 if (issued & CEPH_CAP_AUTH_EXCL) {
2089 inode->i_gid = attr->ia_gid;
2090 dirtied |= CEPH_CAP_AUTH_EXCL;
2091 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2092 !gid_eq(attr->ia_gid, inode->i_gid)) {
2093 req->r_args.setattr.gid = cpu_to_le32(
2094 from_kgid(&init_user_ns, attr->ia_gid));
2095 mask |= CEPH_SETATTR_GID;
2096 release |= CEPH_CAP_AUTH_SHARED;
2099 if (ia_valid & ATTR_MODE) {
2100 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
2102 if (issued & CEPH_CAP_AUTH_EXCL) {
2103 inode->i_mode = attr->ia_mode;
2104 dirtied |= CEPH_CAP_AUTH_EXCL;
2105 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2106 attr->ia_mode != inode->i_mode) {
2107 inode->i_mode = attr->ia_mode;
2108 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2109 mask |= CEPH_SETATTR_MODE;
2110 release |= CEPH_CAP_AUTH_SHARED;
2114 if (ia_valid & ATTR_ATIME) {
2115 dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
2116 inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
2117 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2118 if (issued & CEPH_CAP_FILE_EXCL) {
2119 ci->i_time_warp_seq++;
2120 inode->i_atime = attr->ia_atime;
2121 dirtied |= CEPH_CAP_FILE_EXCL;
2122 } else if ((issued & CEPH_CAP_FILE_WR) &&
2123 timespec64_compare(&inode->i_atime,
2124 &attr->ia_atime) < 0) {
2125 inode->i_atime = attr->ia_atime;
2126 dirtied |= CEPH_CAP_FILE_WR;
2127 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2128 !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
2129 ceph_encode_timespec64(&req->r_args.setattr.atime,
2131 mask |= CEPH_SETATTR_ATIME;
2132 release |= CEPH_CAP_FILE_SHARED |
2133 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2136 if (ia_valid & ATTR_SIZE) {
2137 dout("setattr %p size %lld -> %lld\n", inode,
2138 inode->i_size, attr->ia_size);
2139 if ((issued & CEPH_CAP_FILE_EXCL) &&
2140 attr->ia_size > inode->i_size) {
2141 i_size_write(inode, attr->ia_size);
2142 inode->i_blocks = calc_inode_blocks(attr->ia_size);
2143 ci->i_reported_size = attr->ia_size;
2144 dirtied |= CEPH_CAP_FILE_EXCL;
2145 ia_valid |= ATTR_MTIME;
2146 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2147 attr->ia_size != inode->i_size) {
2148 req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2149 req->r_args.setattr.old_size =
2150 cpu_to_le64(inode->i_size);
2151 mask |= CEPH_SETATTR_SIZE;
2152 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2153 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2156 if (ia_valid & ATTR_MTIME) {
2157 dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
2158 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
2159 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2160 if (issued & CEPH_CAP_FILE_EXCL) {
2161 ci->i_time_warp_seq++;
2162 inode->i_mtime = attr->ia_mtime;
2163 dirtied |= CEPH_CAP_FILE_EXCL;
2164 } else if ((issued & CEPH_CAP_FILE_WR) &&
2165 timespec64_compare(&inode->i_mtime,
2166 &attr->ia_mtime) < 0) {
2167 inode->i_mtime = attr->ia_mtime;
2168 dirtied |= CEPH_CAP_FILE_WR;
2169 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2170 !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
2171 ceph_encode_timespec64(&req->r_args.setattr.mtime,
2173 mask |= CEPH_SETATTR_MTIME;
2174 release |= CEPH_CAP_FILE_SHARED |
2175 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2179 /* these do nothing */
2180 if (ia_valid & ATTR_CTIME) {
2181 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2182 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2183 dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
2184 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2185 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2186 only ? "ctime only" : "ignored");
2189 * if kernel wants to dirty ctime but nothing else,
2190 * we need to choose a cap to dirty under, or do
2191 * a almost-no-op setattr
2193 if (issued & CEPH_CAP_AUTH_EXCL)
2194 dirtied |= CEPH_CAP_AUTH_EXCL;
2195 else if (issued & CEPH_CAP_FILE_EXCL)
2196 dirtied |= CEPH_CAP_FILE_EXCL;
2197 else if (issued & CEPH_CAP_XATTR_EXCL)
2198 dirtied |= CEPH_CAP_XATTR_EXCL;
2200 mask |= CEPH_SETATTR_CTIME;
2203 if (ia_valid & ATTR_FILE)
2204 dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2207 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2209 inode->i_ctime = attr->ia_ctime;
2213 spin_unlock(&ci->i_ceph_lock);
2214 if (lock_snap_rwsem)
2215 up_read(&mdsc->snap_rwsem);
2217 if (inode_dirty_flags)
2218 __mark_inode_dirty(inode, inode_dirty_flags);
2222 req->r_inode = inode;
2224 req->r_inode_drop = release;
2225 req->r_args.setattr.mask = cpu_to_le32(mask);
2226 req->r_num_caps = 1;
2227 req->r_stamp = attr->ia_ctime;
2228 err = ceph_mdsc_do_request(mdsc, NULL, req);
2230 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2231 ceph_cap_string(dirtied), mask);
2233 ceph_mdsc_put_request(req);
2234 ceph_free_cap_flush(prealloc_cf);
2236 if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2237 __ceph_do_pending_vmtruncate(inode);
2245 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
2247 struct inode *inode = d_inode(dentry);
2248 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2251 if (ceph_snap(inode) != CEPH_NOSNAP)
2254 err = setattr_prepare(dentry, attr);
2258 if ((attr->ia_valid & ATTR_SIZE) &&
2259 attr->ia_size > max(inode->i_size, fsc->max_file_size))
2262 if ((attr->ia_valid & ATTR_SIZE) &&
2263 ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2266 err = __ceph_setattr(inode, attr);
2268 if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2269 err = posix_acl_chmod(inode, attr->ia_mode);
2275 * Verify that we have a lease on the given mask. If not,
2276 * do a getattr against an mds.
2278 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2279 int mask, bool force)
2281 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2282 struct ceph_mds_client *mdsc = fsc->mdsc;
2283 struct ceph_mds_request *req;
2287 if (ceph_snap(inode) == CEPH_SNAPDIR) {
2288 dout("do_getattr inode %p SNAPDIR\n", inode);
2292 dout("do_getattr inode %p mask %s mode 0%o\n",
2293 inode, ceph_cap_string(mask), inode->i_mode);
2294 if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2297 mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS;
2298 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2300 return PTR_ERR(req);
2301 req->r_inode = inode;
2303 req->r_num_caps = 1;
2304 req->r_args.getattr.mask = cpu_to_le32(mask);
2305 req->r_locked_page = locked_page;
2306 err = ceph_mdsc_do_request(mdsc, NULL, req);
2307 if (locked_page && err == 0) {
2308 u64 inline_version = req->r_reply_info.targeti.inline_version;
2309 if (inline_version == 0) {
2310 /* the reply is supposed to contain inline data */
2312 } else if (inline_version == CEPH_INLINE_NONE) {
2315 err = req->r_reply_info.targeti.inline_len;
2318 ceph_mdsc_put_request(req);
2319 dout("do_getattr result=%d\n", err);
2325 * Check inode permissions. We verify we have a valid value for
2326 * the AUTH cap, then call the generic handler.
2328 int ceph_permission(struct inode *inode, int mask)
2332 if (mask & MAY_NOT_BLOCK)
2335 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2338 err = generic_permission(inode, mask);
2342 /* Craft a mask of needed caps given a set of requested statx attrs. */
2343 static int statx_to_caps(u32 want)
2347 if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME))
2348 mask |= CEPH_CAP_AUTH_SHARED;
2350 if (want & (STATX_NLINK|STATX_CTIME))
2351 mask |= CEPH_CAP_LINK_SHARED;
2353 if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|
2355 mask |= CEPH_CAP_FILE_SHARED;
2357 if (want & (STATX_CTIME))
2358 mask |= CEPH_CAP_XATTR_SHARED;
2364 * Get all the attributes. If we have sufficient caps for the requested attrs,
2365 * then we can avoid talking to the MDS at all.
2367 int ceph_getattr(const struct path *path, struct kstat *stat,
2368 u32 request_mask, unsigned int flags)
2370 struct inode *inode = d_inode(path->dentry);
2371 struct ceph_inode_info *ci = ceph_inode(inode);
2372 u32 valid_mask = STATX_BASIC_STATS;
2375 /* Skip the getattr altogether if we're asked not to sync */
2376 if (!(flags & AT_STATX_DONT_SYNC)) {
2377 err = ceph_do_getattr(inode, statx_to_caps(request_mask),
2378 flags & AT_STATX_FORCE_SYNC);
2383 generic_fillattr(inode, stat);
2384 stat->ino = ceph_present_inode(inode);
2387 * btime on newly-allocated inodes is 0, so if this is still set to
2388 * that, then assume that it's not valid.
2390 if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
2391 stat->btime = ci->i_btime;
2392 valid_mask |= STATX_BTIME;
2395 if (ceph_snap(inode) == CEPH_NOSNAP)
2396 stat->dev = inode->i_sb->s_dev;
2398 stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
2400 if (S_ISDIR(inode->i_mode)) {
2401 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2403 stat->size = ci->i_rbytes;
2405 stat->size = ci->i_files + ci->i_subdirs;
2407 stat->blksize = 65536;
2409 * Some applications rely on the number of st_nlink
2410 * value on directories to be either 0 (if unlinked)
2411 * or 2 + number of subdirectories.
2413 if (stat->nlink == 1)
2414 /* '.' + '..' + subdirs */
2415 stat->nlink = 1 + 1 + ci->i_subdirs;
2418 stat->result_mask = request_mask & valid_mask;