static DEFINE_PER_CPU(struct file_lock_list_struct, file_lock_list);
DEFINE_STATIC_PERCPU_RWSEM(file_rwsem);
+
/*
* The blocked_hash is used to find POSIX lock loops for deadlock detection.
* It is protected by blocked_lock_lock.
ctx = smp_load_acquire(&inode->i_flctx);
if (!ctx) {
WARN_ON_ONCE(1);
- return error;
+ goto free_lock;
}
percpu_down_read(&file_rwsem);
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
+free_lock:
locks_free_lock(new_fl);
return error;
}
}
EXPORT_SYMBOL(generic_setlease);
+ #if IS_ENABLED(CONFIG_SRCU)
+ /*
+ * Kernel subsystems can register to be notified on any attempt to set
+ * a new lease with the lease_notifier_chain. This is used by (e.g.) nfsd
+ * to close files that it may have cached when there is an attempt to set a
+ * conflicting lease.
+ */
+ static struct srcu_notifier_head lease_notifier_chain;
+
+ static inline void
+ lease_notifier_chain_init(void)
+ {
+ srcu_init_notifier_head(&lease_notifier_chain);
+ }
+
+ static inline void
+ setlease_notifier(long arg, struct file_lock *lease)
+ {
+ if (arg != F_UNLCK)
+ srcu_notifier_call_chain(&lease_notifier_chain, arg, lease);
+ }
+
+ int lease_register_notifier(struct notifier_block *nb)
+ {
+ return srcu_notifier_chain_register(&lease_notifier_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(lease_register_notifier);
+
+ void lease_unregister_notifier(struct notifier_block *nb)
+ {
+ srcu_notifier_chain_unregister(&lease_notifier_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(lease_unregister_notifier);
+
+ #else /* !IS_ENABLED(CONFIG_SRCU) */
+ static inline void
+ lease_notifier_chain_init(void)
+ {
+ }
+
+ static inline void
+ setlease_notifier(long arg, struct file_lock *lease)
+ {
+ }
+
+ int lease_register_notifier(struct notifier_block *nb)
+ {
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(lease_register_notifier);
+
+ void lease_unregister_notifier(struct notifier_block *nb)
+ {
+ }
+ EXPORT_SYMBOL_GPL(lease_unregister_notifier);
+
+ #endif /* IS_ENABLED(CONFIG_SRCU) */
+
/**
* vfs_setlease - sets a lease on an open file
* @filp: file pointer
int
vfs_setlease(struct file *filp, long arg, struct file_lock **lease, void **priv)
{
+ if (lease)
+ setlease_notifier(arg, *lease);
if (filp->f_op->setlease)
return filp->f_op->setlease(filp, arg, lease, priv);
else
? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
: (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
} else {
- seq_printf(f, "%s ",
- (lease_breaking(fl))
- ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
- : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
+ int type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
+
+ seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
+ (type == F_RDLCK) ? "READ" : "UNLCK");
}
if (inode) {
/* userspace relies on this representation of dev_t */
INIT_HLIST_HEAD(&fll->hlist);
}
+ lease_notifier_chain_init();
return 0;
}
core_initcall(filelock_init);
static int nfsd_fs_get_tree(struct fs_context *fc)
{
- fc->s_fs_info = get_net(fc->net_ns);
- return vfs_get_super(fc, vfs_get_keyed_super, nfsd_fill_super);
+ return get_tree_keyed(fc, nfsd_fill_super, get_net(fc->net_ns));
}
static void nfsd_fs_free_fc(struct fs_context *fc)
atomic_set(&nn->ntf_refcnt, 0);
init_waitqueue_head(&nn->ntf_wq);
+ seqlock_init(&nn->boot_lock);
mnt = vfs_kern_mount(&nfsd_fs_type, SB_KERNMOUNT, "nfsd", NULL);
if (IS_ERR(mnt)) {
struct iov_iter;
struct fscrypt_info;
struct fscrypt_operations;
+struct fsverity_info;
+struct fsverity_operations;
struct fs_context;
struct fs_parameter_description;
* @i_pages: Cached pages.
* @gfp_mask: Memory allocation flags to use for allocating pages.
* @i_mmap_writable: Number of VM_SHARED mappings.
+ * @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
* @nrpages: Number of page entries, protected by the i_pages lock.
struct xarray i_pages;
gfp_t gfp_mask;
atomic_t i_mmap_writable;
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ /* number of thp, only for non-shmem files */
+ atomic_t nr_thps;
+#endif
struct rb_root_cached i_mmap;
struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
struct fscrypt_info *i_crypt_info;
#endif
+#ifdef CONFIG_FS_VERITY
+ struct fsverity_info *i_verity_info;
+#endif
+
void *i_private; /* fs or device private pointer */
} __randomize_layout;
+struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode);
+
static inline unsigned int i_blocksize(const struct inode *node)
{
return (1 << node->i_blkbits);
extern int generic_setlease(struct file *, long, struct file_lock **, void **priv);
extern int vfs_setlease(struct file *, long, struct file_lock **, void **);
extern int lease_modify(struct file_lock *, int, struct list_head *);
+
+ struct notifier_block;
+ extern int lease_register_notifier(struct notifier_block *);
+ extern void lease_unregister_notifier(struct notifier_block *);
+
struct files_struct;
extern void show_fd_locks(struct seq_file *f,
struct file *filp, struct files_struct *files);
const struct xattr_handler **s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
+ struct key *s_master_keys; /* master crypto keys in use */
+#endif
+#ifdef CONFIG_FS_VERITY
+ const struct fsverity_operations *s_vop;
#endif
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
/* Granularity of c/m/atime in ns (cannot be worse than a second) */
u32 s_time_gran;
+ /* Time limits for c/m/atime in seconds */
+ time64_t s_time_min;
+ time64_t s_time_max;
#ifdef CONFIG_FSNOTIFY
__u32 s_fsnotify_mask;
struct fsnotify_mark_connector __rcu *s_fsnotify_marks;
#endif
#define S_ENCRYPTED 16384 /* Encrypted file (using fs/crypto/) */
#define S_CASEFOLD 32768 /* Casefolded file */
+#define S_VERITY 65536 /* Verity file (using fs/verity/) */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
#define IS_DAX(inode) ((inode)->i_flags & S_DAX)
#define IS_ENCRYPTED(inode) ((inode)->i_flags & S_ENCRYPTED)
#define IS_CASEFOLDED(inode) ((inode)->i_flags & S_CASEFOLD)
+#define IS_VERITY(inode) ((inode)->i_flags & S_VERITY)
#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
(inode)->i_rdev == WHITEOUT_DEV)
void *holder);
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
void *holder);
+extern struct block_device *bd_start_claiming(struct block_device *bdev,
+ void *holder);
+extern void bd_finish_claiming(struct block_device *bdev,
+ struct block_device *whole, void *holder);
+extern void bd_abort_claiming(struct block_device *bdev,
+ struct block_device *whole, void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
extern int __blkdev_reread_part(struct block_device *bdev);
extern int blkdev_reread_part(struct block_device *bdev);
return errseq_sample(&mapping->wb_err);
}
+static inline int filemap_nr_thps(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ return atomic_read(&mapping->nr_thps);
+#else
+ return 0;
+#endif
+}
+
+static inline void filemap_nr_thps_inc(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ atomic_inc(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(1);
+#endif
+}
+
+static inline void filemap_nr_thps_dec(struct address_space *mapping)
+{
+#ifdef CONFIG_READ_ONLY_THP_FOR_FS
+ atomic_dec(&mapping->nr_thps);
+#else
+ WARN_ON_ONCE(1);
+#endif
+}
+
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
/* mm/fadvise.c */
extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len,
int advice);
+extern int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice);
#if defined(CONFIG_IO_URING)
extern struct sock *io_uring_get_socket(struct file *file);
fa->fsx_xflags = xflags;
}
+/*
+ * Flush file data before changing attributes. Caller must hold any locks
+ * required to prevent further writes to this file until we're done setting
+ * flags.
+ */
+static inline int inode_drain_writes(struct inode *inode)
+{
+ inode_dio_wait(inode);
+ return filemap_write_and_wait(inode->i_mapping);
+}
+
#endif /* _LINUX_FS_H */
INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q);
INIT_LIST_HEAD(&cma_xprt->sc_read_complete_q);
INIT_LIST_HEAD(&cma_xprt->sc_send_ctxts);
- INIT_LIST_HEAD(&cma_xprt->sc_recv_ctxts);
+ init_llist_head(&cma_xprt->sc_recv_ctxts);
INIT_LIST_HEAD(&cma_xprt->sc_rw_ctxts);
init_waitqueue_head(&cma_xprt->sc_send_wait);
spin_lock_init(&cma_xprt->sc_lock);
spin_lock_init(&cma_xprt->sc_rq_dto_lock);
spin_lock_init(&cma_xprt->sc_send_lock);
- spin_lock_init(&cma_xprt->sc_recv_lock);
spin_lock_init(&cma_xprt->sc_rw_ctxt_lock);
/*
dprintk("svcrdma: error creating PD for connect request\n");
goto errout;
}
- newxprt->sc_sq_cq = ib_alloc_cq(dev, newxprt, newxprt->sc_sq_depth,
- 0, IB_POLL_WORKQUEUE);
+ newxprt->sc_sq_cq = ib_alloc_cq_any(dev, newxprt, newxprt->sc_sq_depth,
+ IB_POLL_WORKQUEUE);
if (IS_ERR(newxprt->sc_sq_cq)) {
dprintk("svcrdma: error creating SQ CQ for connect request\n");
goto errout;
}
- newxprt->sc_rq_cq = ib_alloc_cq(dev, newxprt, rq_depth,
- 0, IB_POLL_WORKQUEUE);
+ newxprt->sc_rq_cq =
+ ib_alloc_cq_any(dev, newxprt, rq_depth, IB_POLL_WORKQUEUE);
if (IS_ERR(newxprt->sc_rq_cq)) {
dprintk("svcrdma: error creating RQ CQ for connect request\n");
goto errout;
{
struct svcxprt_rdma *rdma =
container_of(xprt, struct svcxprt_rdma, sc_xprt);
+
INIT_WORK(&rdma->sc_work, __svc_rdma_free);
- queue_work(svc_rdma_wq, &rdma->sc_work);
+ schedule_work(&rdma->sc_work);
}
static int svc_rdma_has_wspace(struct svc_xprt *xprt)