1 // SPDX-License-Identifier: LGPL-2.1
4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
12 #include <linux/backing-dev.h>
13 #include <linux/stat.h>
14 #include <linux/fcntl.h>
15 #include <linux/pagemap.h>
16 #include <linux/pagevec.h>
17 #include <linux/writeback.h>
18 #include <linux/task_io_accounting_ops.h>
19 #include <linux/delay.h>
20 #include <linux/mount.h>
21 #include <linux/slab.h>
22 #include <linux/swap.h>
24 #include <asm/div64.h>
28 #include "cifsproto.h"
29 #include "smb2proto.h"
30 #include "cifs_unicode.h"
31 #include "cifs_debug.h"
32 #include "cifs_fs_sb.h"
34 #include "smbdirect.h"
35 #include "fs_context.h"
36 #include "cifs_ioctl.h"
37 #include "cached_dir.h"
40 * Mark as invalid, all open files on tree connections since they
41 * were closed when session to server was lost.
44 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
46 struct cifsFileInfo *open_file = NULL;
47 struct list_head *tmp;
48 struct list_head *tmp1;
50 /* only send once per connect */
51 spin_lock(&tcon->ses->ses_lock);
52 if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) {
53 spin_unlock(&tcon->ses->ses_lock);
56 tcon->status = TID_IN_FILES_INVALIDATE;
57 spin_unlock(&tcon->ses->ses_lock);
59 /* list all files open on tree connection and mark them invalid */
60 spin_lock(&tcon->open_file_lock);
61 list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
62 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
63 open_file->invalidHandle = true;
64 open_file->oplock_break_cancelled = true;
66 spin_unlock(&tcon->open_file_lock);
68 invalidate_all_cached_dirs(tcon);
69 spin_lock(&tcon->tc_lock);
70 if (tcon->status == TID_IN_FILES_INVALIDATE)
71 tcon->status = TID_NEED_TCON;
72 spin_unlock(&tcon->tc_lock);
75 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
80 static inline int cifs_convert_flags(unsigned int flags)
82 if ((flags & O_ACCMODE) == O_RDONLY)
84 else if ((flags & O_ACCMODE) == O_WRONLY)
86 else if ((flags & O_ACCMODE) == O_RDWR) {
87 /* GENERIC_ALL is too much permission to request
88 can cause unnecessary access denied on create */
89 /* return GENERIC_ALL; */
90 return (GENERIC_READ | GENERIC_WRITE);
93 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
94 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
98 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
99 static u32 cifs_posix_convert_flags(unsigned int flags)
103 if ((flags & O_ACCMODE) == O_RDONLY)
104 posix_flags = SMB_O_RDONLY;
105 else if ((flags & O_ACCMODE) == O_WRONLY)
106 posix_flags = SMB_O_WRONLY;
107 else if ((flags & O_ACCMODE) == O_RDWR)
108 posix_flags = SMB_O_RDWR;
110 if (flags & O_CREAT) {
111 posix_flags |= SMB_O_CREAT;
113 posix_flags |= SMB_O_EXCL;
114 } else if (flags & O_EXCL)
115 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
116 current->comm, current->tgid);
119 posix_flags |= SMB_O_TRUNC;
120 /* be safe and imply O_SYNC for O_DSYNC */
122 posix_flags |= SMB_O_SYNC;
123 if (flags & O_DIRECTORY)
124 posix_flags |= SMB_O_DIRECTORY;
125 if (flags & O_NOFOLLOW)
126 posix_flags |= SMB_O_NOFOLLOW;
127 if (flags & O_DIRECT)
128 posix_flags |= SMB_O_DIRECT;
132 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
134 static inline int cifs_get_disposition(unsigned int flags)
136 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
138 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
139 return FILE_OVERWRITE_IF;
140 else if ((flags & O_CREAT) == O_CREAT)
142 else if ((flags & O_TRUNC) == O_TRUNC)
143 return FILE_OVERWRITE;
148 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
149 int cifs_posix_open(const char *full_path, struct inode **pinode,
150 struct super_block *sb, int mode, unsigned int f_flags,
151 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
154 FILE_UNIX_BASIC_INFO *presp_data;
155 __u32 posix_flags = 0;
156 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
157 struct cifs_fattr fattr;
158 struct tcon_link *tlink;
159 struct cifs_tcon *tcon;
161 cifs_dbg(FYI, "posix open %s\n", full_path);
163 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
164 if (presp_data == NULL)
167 tlink = cifs_sb_tlink(cifs_sb);
173 tcon = tlink_tcon(tlink);
174 mode &= ~current_umask();
176 posix_flags = cifs_posix_convert_flags(f_flags);
177 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
178 poplock, full_path, cifs_sb->local_nls,
179 cifs_remap(cifs_sb));
180 cifs_put_tlink(tlink);
185 if (presp_data->Type == cpu_to_le32(-1))
186 goto posix_open_ret; /* open ok, caller does qpathinfo */
189 goto posix_open_ret; /* caller does not need info */
191 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
193 /* get new inode and set it up */
194 if (*pinode == NULL) {
195 cifs_fill_uniqueid(sb, &fattr);
196 *pinode = cifs_iget(sb, &fattr);
202 cifs_revalidate_mapping(*pinode);
203 rc = cifs_fattr_to_inode(*pinode, &fattr);
210 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
212 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
213 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
214 struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
219 int create_options = CREATE_NOT_DIR;
220 struct TCP_Server_Info *server = tcon->ses->server;
221 struct cifs_open_parms oparms;
223 if (!server->ops->open)
226 desired_access = cifs_convert_flags(f_flags);
228 /*********************************************************************
229 * open flag mapping table:
231 * POSIX Flag CIFS Disposition
232 * ---------- ----------------
233 * O_CREAT FILE_OPEN_IF
234 * O_CREAT | O_EXCL FILE_CREATE
235 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
236 * O_TRUNC FILE_OVERWRITE
237 * none of the above FILE_OPEN
239 * Note that there is not a direct match between disposition
240 * FILE_SUPERSEDE (ie create whether or not file exists although
241 * O_CREAT | O_TRUNC is similar but truncates the existing
242 * file rather than creating a new file as FILE_SUPERSEDE does
243 * (which uses the attributes / metadata passed in on open call)
245 *? O_SYNC is a reasonable match to CIFS writethrough flag
246 *? and the read write flags match reasonably. O_LARGEFILE
247 *? is irrelevant because largefile support is always used
248 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
249 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
250 *********************************************************************/
252 disposition = cifs_get_disposition(f_flags);
254 /* BB pass O_SYNC flag through on file attributes .. BB */
256 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
257 if (f_flags & O_SYNC)
258 create_options |= CREATE_WRITE_THROUGH;
260 if (f_flags & O_DIRECT)
261 create_options |= CREATE_NO_BUFFER;
264 oparms.cifs_sb = cifs_sb;
265 oparms.desired_access = desired_access;
266 oparms.create_options = cifs_create_options(cifs_sb, create_options);
267 oparms.disposition = disposition;
268 oparms.path = full_path;
270 oparms.reconnect = false;
272 rc = server->ops->open(xid, &oparms, oplock, buf);
276 /* TODO: Add support for calling posix query info but with passing in fid */
278 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
281 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
285 server->ops->close(xid, tcon, fid);
294 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
296 struct cifs_fid_locks *cur;
297 bool has_locks = false;
299 down_read(&cinode->lock_sem);
300 list_for_each_entry(cur, &cinode->llist, llist) {
301 if (!list_empty(&cur->locks)) {
306 up_read(&cinode->lock_sem);
311 cifs_down_write(struct rw_semaphore *sem)
313 while (!down_write_trylock(sem))
317 static void cifsFileInfo_put_work(struct work_struct *work);
319 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
320 struct tcon_link *tlink, __u32 oplock,
321 const char *symlink_target)
323 struct dentry *dentry = file_dentry(file);
324 struct inode *inode = d_inode(dentry);
325 struct cifsInodeInfo *cinode = CIFS_I(inode);
326 struct cifsFileInfo *cfile;
327 struct cifs_fid_locks *fdlocks;
328 struct cifs_tcon *tcon = tlink_tcon(tlink);
329 struct TCP_Server_Info *server = tcon->ses->server;
331 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
335 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
341 if (symlink_target) {
342 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
343 if (!cfile->symlink_target) {
350 INIT_LIST_HEAD(&fdlocks->locks);
351 fdlocks->cfile = cfile;
352 cfile->llist = fdlocks;
355 cfile->pid = current->tgid;
356 cfile->uid = current_fsuid();
357 cfile->dentry = dget(dentry);
358 cfile->f_flags = file->f_flags;
359 cfile->invalidHandle = false;
360 cfile->deferred_close_scheduled = false;
361 cfile->tlink = cifs_get_tlink(tlink);
362 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
363 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
364 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
365 mutex_init(&cfile->fh_mutex);
366 spin_lock_init(&cfile->file_info_lock);
368 cifs_sb_active(inode->i_sb);
371 * If the server returned a read oplock and we have mandatory brlocks,
372 * set oplock level to None.
374 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
375 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
379 cifs_down_write(&cinode->lock_sem);
380 list_add(&fdlocks->llist, &cinode->llist);
381 up_write(&cinode->lock_sem);
383 spin_lock(&tcon->open_file_lock);
384 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
385 oplock = fid->pending_open->oplock;
386 list_del(&fid->pending_open->olist);
388 fid->purge_cache = false;
389 server->ops->set_fid(cfile, fid, oplock);
391 list_add(&cfile->tlist, &tcon->openFileList);
392 atomic_inc(&tcon->num_local_opens);
394 /* if readable file instance put first in list*/
395 spin_lock(&cinode->open_file_lock);
396 if (file->f_mode & FMODE_READ)
397 list_add(&cfile->flist, &cinode->openFileList);
399 list_add_tail(&cfile->flist, &cinode->openFileList);
400 spin_unlock(&cinode->open_file_lock);
401 spin_unlock(&tcon->open_file_lock);
403 if (fid->purge_cache)
404 cifs_zap_mapping(inode);
406 file->private_data = cfile;
410 struct cifsFileInfo *
411 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
413 spin_lock(&cifs_file->file_info_lock);
414 cifsFileInfo_get_locked(cifs_file);
415 spin_unlock(&cifs_file->file_info_lock);
419 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
421 struct inode *inode = d_inode(cifs_file->dentry);
422 struct cifsInodeInfo *cifsi = CIFS_I(inode);
423 struct cifsLockInfo *li, *tmp;
424 struct super_block *sb = inode->i_sb;
427 * Delete any outstanding lock records. We'll lose them when the file
430 cifs_down_write(&cifsi->lock_sem);
431 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
432 list_del(&li->llist);
433 cifs_del_lock_waiters(li);
436 list_del(&cifs_file->llist->llist);
437 kfree(cifs_file->llist);
438 up_write(&cifsi->lock_sem);
440 cifs_put_tlink(cifs_file->tlink);
441 dput(cifs_file->dentry);
442 cifs_sb_deactive(sb);
443 kfree(cifs_file->symlink_target);
447 static void cifsFileInfo_put_work(struct work_struct *work)
449 struct cifsFileInfo *cifs_file = container_of(work,
450 struct cifsFileInfo, put);
452 cifsFileInfo_put_final(cifs_file);
456 * cifsFileInfo_put - release a reference of file priv data
458 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
460 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
462 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
464 _cifsFileInfo_put(cifs_file, true, true);
468 * _cifsFileInfo_put - release a reference of file priv data
470 * This may involve closing the filehandle @cifs_file out on the
471 * server. Must be called without holding tcon->open_file_lock,
472 * cinode->open_file_lock and cifs_file->file_info_lock.
474 * If @wait_for_oplock_handler is true and we are releasing the last
475 * reference, wait for any running oplock break handler of the file
476 * and cancel any pending one.
478 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
479 * @wait_oplock_handler: must be false if called from oplock_break_handler
480 * @offload: not offloaded on close and oplock breaks
483 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
484 bool wait_oplock_handler, bool offload)
486 struct inode *inode = d_inode(cifs_file->dentry);
487 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
488 struct TCP_Server_Info *server = tcon->ses->server;
489 struct cifsInodeInfo *cifsi = CIFS_I(inode);
490 struct super_block *sb = inode->i_sb;
491 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
492 struct cifs_fid fid = {};
493 struct cifs_pending_open open;
494 bool oplock_break_cancelled;
496 spin_lock(&tcon->open_file_lock);
497 spin_lock(&cifsi->open_file_lock);
498 spin_lock(&cifs_file->file_info_lock);
499 if (--cifs_file->count > 0) {
500 spin_unlock(&cifs_file->file_info_lock);
501 spin_unlock(&cifsi->open_file_lock);
502 spin_unlock(&tcon->open_file_lock);
505 spin_unlock(&cifs_file->file_info_lock);
507 if (server->ops->get_lease_key)
508 server->ops->get_lease_key(inode, &fid);
510 /* store open in pending opens to make sure we don't miss lease break */
511 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
513 /* remove it from the lists */
514 list_del(&cifs_file->flist);
515 list_del(&cifs_file->tlist);
516 atomic_dec(&tcon->num_local_opens);
518 if (list_empty(&cifsi->openFileList)) {
519 cifs_dbg(FYI, "closing last open instance for inode %p\n",
520 d_inode(cifs_file->dentry));
522 * In strict cache mode we need invalidate mapping on the last
523 * close because it may cause a error when we open this file
524 * again and get at least level II oplock.
526 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
527 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
528 cifs_set_oplock_level(cifsi, 0);
531 spin_unlock(&cifsi->open_file_lock);
532 spin_unlock(&tcon->open_file_lock);
534 oplock_break_cancelled = wait_oplock_handler ?
535 cancel_work_sync(&cifs_file->oplock_break) : false;
537 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
538 struct TCP_Server_Info *server = tcon->ses->server;
542 if (server->ops->close_getattr)
543 server->ops->close_getattr(xid, tcon, cifs_file);
544 else if (server->ops->close)
545 server->ops->close(xid, tcon, &cifs_file->fid);
549 if (oplock_break_cancelled)
550 cifs_done_oplock_break(cifsi);
552 cifs_del_pending_open(&open);
555 queue_work(fileinfo_put_wq, &cifs_file->put);
557 cifsFileInfo_put_final(cifs_file);
560 int cifs_open(struct inode *inode, struct file *file)
566 struct cifs_sb_info *cifs_sb;
567 struct TCP_Server_Info *server;
568 struct cifs_tcon *tcon;
569 struct tcon_link *tlink;
570 struct cifsFileInfo *cfile = NULL;
572 const char *full_path;
573 bool posix_open_ok = false;
574 struct cifs_fid fid = {};
575 struct cifs_pending_open open;
576 struct cifs_open_info_data data = {};
580 cifs_sb = CIFS_SB(inode->i_sb);
581 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
586 tlink = cifs_sb_tlink(cifs_sb);
589 return PTR_ERR(tlink);
591 tcon = tlink_tcon(tlink);
592 server = tcon->ses->server;
594 page = alloc_dentry_path();
595 full_path = build_path_from_dentry(file_dentry(file), page);
596 if (IS_ERR(full_path)) {
597 rc = PTR_ERR(full_path);
601 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
602 inode, file->f_flags, full_path);
604 if (file->f_flags & O_DIRECT &&
605 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
606 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
607 file->f_op = &cifs_file_direct_nobrl_ops;
609 file->f_op = &cifs_file_direct_ops;
612 /* Get the cached handle as SMB2 close is deferred */
613 rc = cifs_get_readable_path(tcon, full_path, &cfile);
615 if (file->f_flags == cfile->f_flags) {
616 file->private_data = cfile;
617 spin_lock(&CIFS_I(inode)->deferred_lock);
618 cifs_del_deferred_close(cfile);
619 spin_unlock(&CIFS_I(inode)->deferred_lock);
622 _cifsFileInfo_put(cfile, true, false);
631 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
632 if (!tcon->broken_posix_open && tcon->unix_ext &&
633 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
634 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
635 /* can not refresh inode info since size could be stale */
636 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
637 cifs_sb->ctx->file_mode /* ignored */,
638 file->f_flags, &oplock, &fid.netfid, xid);
640 cifs_dbg(FYI, "posix open succeeded\n");
641 posix_open_ok = true;
642 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
643 if (tcon->ses->serverNOS)
644 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
646 tcon->ses->serverNOS);
647 tcon->broken_posix_open = true;
648 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
649 (rc != -EOPNOTSUPP)) /* path not found or net err */
652 * Else fallthrough to retry open the old way on network i/o
656 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
658 if (server->ops->get_lease_key)
659 server->ops->get_lease_key(inode, &fid);
661 cifs_add_pending_open(&fid, tlink, &open);
663 if (!posix_open_ok) {
664 if (server->ops->get_lease_key)
665 server->ops->get_lease_key(inode, &fid);
667 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
670 cifs_del_pending_open(&open);
675 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
677 if (server->ops->close)
678 server->ops->close(xid, tcon, &fid);
679 cifs_del_pending_open(&open);
684 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
685 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
687 * Time to set mode which we can not set earlier due to
688 * problems creating new read-only files.
690 struct cifs_unix_set_info_args args = {
691 .mode = inode->i_mode,
692 .uid = INVALID_UID, /* no change */
693 .gid = INVALID_GID, /* no change */
694 .ctime = NO_CHANGE_64,
695 .atime = NO_CHANGE_64,
696 .mtime = NO_CHANGE_64,
699 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
702 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
705 fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
706 file->f_mode & FMODE_WRITE);
707 if (file->f_flags & O_DIRECT &&
708 (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
709 file->f_flags & O_APPEND))
710 cifs_invalidate_cache(file_inode(file),
711 FSCACHE_INVAL_DIO_WRITE);
714 free_dentry_path(page);
716 cifs_put_tlink(tlink);
717 cifs_free_open_info(&data);
721 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
722 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
723 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
726 * Try to reacquire byte range locks that were released when session
727 * to server was lost.
730 cifs_relock_file(struct cifsFileInfo *cfile)
732 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
733 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
735 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
736 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
737 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
739 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
740 if (cinode->can_cache_brlcks) {
741 /* can cache locks - no need to relock */
742 up_read(&cinode->lock_sem);
746 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
747 if (cap_unix(tcon->ses) &&
748 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
749 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
750 rc = cifs_push_posix_locks(cfile);
752 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
753 rc = tcon->ses->server->ops->push_mand_locks(cfile);
755 up_read(&cinode->lock_sem);
760 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
765 struct cifs_sb_info *cifs_sb;
766 struct cifs_tcon *tcon;
767 struct TCP_Server_Info *server;
768 struct cifsInodeInfo *cinode;
771 const char *full_path;
773 int disposition = FILE_OPEN;
774 int create_options = CREATE_NOT_DIR;
775 struct cifs_open_parms oparms;
778 mutex_lock(&cfile->fh_mutex);
779 if (!cfile->invalidHandle) {
780 mutex_unlock(&cfile->fh_mutex);
785 inode = d_inode(cfile->dentry);
786 cifs_sb = CIFS_SB(inode->i_sb);
787 tcon = tlink_tcon(cfile->tlink);
788 server = tcon->ses->server;
791 * Can not grab rename sem here because various ops, including those
792 * that already have the rename sem can end up causing writepage to get
793 * called and if the server was down that means we end up here, and we
794 * can never tell if the caller already has the rename_sem.
796 page = alloc_dentry_path();
797 full_path = build_path_from_dentry(cfile->dentry, page);
798 if (IS_ERR(full_path)) {
799 mutex_unlock(&cfile->fh_mutex);
800 free_dentry_path(page);
802 return PTR_ERR(full_path);
805 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
806 inode, cfile->f_flags, full_path);
808 if (tcon->ses->server->oplocks)
813 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
814 if (tcon->unix_ext && cap_unix(tcon->ses) &&
815 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
816 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
818 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
819 * original open. Must mask them off for a reopen.
821 unsigned int oflags = cfile->f_flags &
822 ~(O_CREAT | O_EXCL | O_TRUNC);
824 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
825 cifs_sb->ctx->file_mode /* ignored */,
826 oflags, &oplock, &cfile->fid.netfid, xid);
828 cifs_dbg(FYI, "posix reopen succeeded\n");
829 oparms.reconnect = true;
833 * fallthrough to retry open the old way on errors, especially
834 * in the reconnect path it is important to retry hard
837 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
839 desired_access = cifs_convert_flags(cfile->f_flags);
841 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
842 if (cfile->f_flags & O_SYNC)
843 create_options |= CREATE_WRITE_THROUGH;
845 if (cfile->f_flags & O_DIRECT)
846 create_options |= CREATE_NO_BUFFER;
848 if (server->ops->get_lease_key)
849 server->ops->get_lease_key(inode, &cfile->fid);
852 oparms.cifs_sb = cifs_sb;
853 oparms.desired_access = desired_access;
854 oparms.create_options = cifs_create_options(cifs_sb, create_options);
855 oparms.disposition = disposition;
856 oparms.path = full_path;
857 oparms.fid = &cfile->fid;
858 oparms.reconnect = true;
861 * Can not refresh inode by passing in file_info buf to be returned by
862 * ops->open and then calling get_inode_info with returned buf since
863 * file might have write behind data that needs to be flushed and server
864 * version of file size can be stale. If we knew for sure that inode was
865 * not dirty locally we could do this.
867 rc = server->ops->open(xid, &oparms, &oplock, NULL);
868 if (rc == -ENOENT && oparms.reconnect == false) {
869 /* durable handle timeout is expired - open the file again */
870 rc = server->ops->open(xid, &oparms, &oplock, NULL);
871 /* indicate that we need to relock the file */
872 oparms.reconnect = true;
876 mutex_unlock(&cfile->fh_mutex);
877 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
878 cifs_dbg(FYI, "oplock: %d\n", oplock);
879 goto reopen_error_exit;
882 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
884 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
885 cfile->invalidHandle = false;
886 mutex_unlock(&cfile->fh_mutex);
887 cinode = CIFS_I(inode);
890 rc = filemap_write_and_wait(inode->i_mapping);
891 if (!is_interrupt_error(rc))
892 mapping_set_error(inode->i_mapping, rc);
894 if (tcon->posix_extensions)
895 rc = smb311_posix_get_inode_info(&inode, full_path, inode->i_sb, xid);
896 else if (tcon->unix_ext)
897 rc = cifs_get_inode_info_unix(&inode, full_path,
900 rc = cifs_get_inode_info(&inode, full_path, NULL,
901 inode->i_sb, xid, NULL);
904 * Else we are writing out data to server already and could deadlock if
905 * we tried to flush data, and since we do not know if we have data that
906 * would invalidate the current end of file on the server we can not go
907 * to the server to get the new inode info.
911 * If the server returned a read oplock and we have mandatory brlocks,
912 * set oplock level to None.
914 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
915 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
919 server->ops->set_fid(cfile, &cfile->fid, oplock);
920 if (oparms.reconnect)
921 cifs_relock_file(cfile);
924 free_dentry_path(page);
929 void smb2_deferred_work_close(struct work_struct *work)
931 struct cifsFileInfo *cfile = container_of(work,
932 struct cifsFileInfo, deferred.work);
934 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
935 cifs_del_deferred_close(cfile);
936 cfile->deferred_close_scheduled = false;
937 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
938 _cifsFileInfo_put(cfile, true, false);
941 int cifs_close(struct inode *inode, struct file *file)
943 struct cifsFileInfo *cfile;
944 struct cifsInodeInfo *cinode = CIFS_I(inode);
945 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
946 struct cifs_deferred_close *dclose;
948 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
950 if (file->private_data != NULL) {
951 cfile = file->private_data;
952 file->private_data = NULL;
953 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
954 if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
955 cinode->lease_granted &&
956 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
958 if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
959 inode->i_ctime = inode->i_mtime = current_time(inode);
961 spin_lock(&cinode->deferred_lock);
962 cifs_add_deferred_close(cfile, dclose);
963 if (cfile->deferred_close_scheduled &&
964 delayed_work_pending(&cfile->deferred)) {
966 * If there is no pending work, mod_delayed_work queues new work.
967 * So, Increase the ref count to avoid use-after-free.
969 if (!mod_delayed_work(deferredclose_wq,
970 &cfile->deferred, cifs_sb->ctx->closetimeo))
971 cifsFileInfo_get(cfile);
973 /* Deferred close for files */
974 queue_delayed_work(deferredclose_wq,
975 &cfile->deferred, cifs_sb->ctx->closetimeo);
976 cfile->deferred_close_scheduled = true;
977 spin_unlock(&cinode->deferred_lock);
980 spin_unlock(&cinode->deferred_lock);
981 _cifsFileInfo_put(cfile, true, false);
983 _cifsFileInfo_put(cfile, true, false);
988 /* return code from the ->release op is always ignored */
993 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
995 struct cifsFileInfo *open_file, *tmp;
996 struct list_head tmp_list;
998 if (!tcon->use_persistent || !tcon->need_reopen_files)
1001 tcon->need_reopen_files = false;
1003 cifs_dbg(FYI, "Reopen persistent handles\n");
1004 INIT_LIST_HEAD(&tmp_list);
1006 /* list all files open on tree connection, reopen resilient handles */
1007 spin_lock(&tcon->open_file_lock);
1008 list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1009 if (!open_file->invalidHandle)
1011 cifsFileInfo_get(open_file);
1012 list_add_tail(&open_file->rlist, &tmp_list);
1014 spin_unlock(&tcon->open_file_lock);
1016 list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1017 if (cifs_reopen_file(open_file, false /* do not flush */))
1018 tcon->need_reopen_files = true;
1019 list_del_init(&open_file->rlist);
1020 cifsFileInfo_put(open_file);
1024 int cifs_closedir(struct inode *inode, struct file *file)
1028 struct cifsFileInfo *cfile = file->private_data;
1029 struct cifs_tcon *tcon;
1030 struct TCP_Server_Info *server;
1033 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1039 tcon = tlink_tcon(cfile->tlink);
1040 server = tcon->ses->server;
1042 cifs_dbg(FYI, "Freeing private data in close dir\n");
1043 spin_lock(&cfile->file_info_lock);
1044 if (server->ops->dir_needs_close(cfile)) {
1045 cfile->invalidHandle = true;
1046 spin_unlock(&cfile->file_info_lock);
1047 if (server->ops->close_dir)
1048 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1051 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1052 /* not much we can do if it fails anyway, ignore rc */
1055 spin_unlock(&cfile->file_info_lock);
1057 buf = cfile->srch_inf.ntwrk_buf_start;
1059 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1060 cfile->srch_inf.ntwrk_buf_start = NULL;
1061 if (cfile->srch_inf.smallBuf)
1062 cifs_small_buf_release(buf);
1064 cifs_buf_release(buf);
1067 cifs_put_tlink(cfile->tlink);
1068 kfree(file->private_data);
1069 file->private_data = NULL;
1070 /* BB can we lock the filestruct while this is going on? */
1075 static struct cifsLockInfo *
1076 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1078 struct cifsLockInfo *lock =
1079 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1082 lock->offset = offset;
1083 lock->length = length;
1085 lock->pid = current->tgid;
1086 lock->flags = flags;
1087 INIT_LIST_HEAD(&lock->blist);
1088 init_waitqueue_head(&lock->block_q);
1093 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1095 struct cifsLockInfo *li, *tmp;
1096 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1097 list_del_init(&li->blist);
1098 wake_up(&li->block_q);
1102 #define CIFS_LOCK_OP 0
1103 #define CIFS_READ_OP 1
1104 #define CIFS_WRITE_OP 2
1106 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1108 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1109 __u64 length, __u8 type, __u16 flags,
1110 struct cifsFileInfo *cfile,
1111 struct cifsLockInfo **conf_lock, int rw_check)
1113 struct cifsLockInfo *li;
1114 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1115 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1117 list_for_each_entry(li, &fdlocks->locks, llist) {
1118 if (offset + length <= li->offset ||
1119 offset >= li->offset + li->length)
1121 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1122 server->ops->compare_fids(cfile, cur_cfile)) {
1123 /* shared lock prevents write op through the same fid */
1124 if (!(li->type & server->vals->shared_lock_type) ||
1125 rw_check != CIFS_WRITE_OP)
1128 if ((type & server->vals->shared_lock_type) &&
1129 ((server->ops->compare_fids(cfile, cur_cfile) &&
1130 current->tgid == li->pid) || type == li->type))
1132 if (rw_check == CIFS_LOCK_OP &&
1133 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1134 server->ops->compare_fids(cfile, cur_cfile))
1144 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1145 __u8 type, __u16 flags,
1146 struct cifsLockInfo **conf_lock, int rw_check)
1149 struct cifs_fid_locks *cur;
1150 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1152 list_for_each_entry(cur, &cinode->llist, llist) {
1153 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1154 flags, cfile, conf_lock,
1164 * Check if there is another lock that prevents us to set the lock (mandatory
1165 * style). If such a lock exists, update the flock structure with its
1166 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1167 * or leave it the same if we can't. Returns 0 if we don't need to request to
1168 * the server or 1 otherwise.
1171 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1172 __u8 type, struct file_lock *flock)
1175 struct cifsLockInfo *conf_lock;
1176 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1177 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1180 down_read(&cinode->lock_sem);
1182 exist = cifs_find_lock_conflict(cfile, offset, length, type,
1183 flock->fl_flags, &conf_lock,
1186 flock->fl_start = conf_lock->offset;
1187 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1188 flock->fl_pid = conf_lock->pid;
1189 if (conf_lock->type & server->vals->shared_lock_type)
1190 flock->fl_type = F_RDLCK;
1192 flock->fl_type = F_WRLCK;
1193 } else if (!cinode->can_cache_brlcks)
1196 flock->fl_type = F_UNLCK;
1198 up_read(&cinode->lock_sem);
1203 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1205 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1206 cifs_down_write(&cinode->lock_sem);
1207 list_add_tail(&lock->llist, &cfile->llist->locks);
1208 up_write(&cinode->lock_sem);
1212 * Set the byte-range lock (mandatory style). Returns:
1213 * 1) 0, if we set the lock and don't need to request to the server;
1214 * 2) 1, if no locks prevent us but we need to request to the server;
1215 * 3) -EACCES, if there is a lock that prevents us and wait is false.
1218 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1221 struct cifsLockInfo *conf_lock;
1222 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1228 cifs_down_write(&cinode->lock_sem);
1230 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1231 lock->type, lock->flags, &conf_lock,
1233 if (!exist && cinode->can_cache_brlcks) {
1234 list_add_tail(&lock->llist, &cfile->llist->locks);
1235 up_write(&cinode->lock_sem);
1244 list_add_tail(&lock->blist, &conf_lock->blist);
1245 up_write(&cinode->lock_sem);
1246 rc = wait_event_interruptible(lock->block_q,
1247 (lock->blist.prev == &lock->blist) &&
1248 (lock->blist.next == &lock->blist));
1251 cifs_down_write(&cinode->lock_sem);
1252 list_del_init(&lock->blist);
1255 up_write(&cinode->lock_sem);
1259 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1261 * Check if there is another lock that prevents us to set the lock (posix
1262 * style). If such a lock exists, update the flock structure with its
1263 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1264 * or leave it the same if we can't. Returns 0 if we don't need to request to
1265 * the server or 1 otherwise.
1268 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1271 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1272 unsigned char saved_type = flock->fl_type;
1274 if ((flock->fl_flags & FL_POSIX) == 0)
1277 down_read(&cinode->lock_sem);
1278 posix_test_lock(file, flock);
1280 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1281 flock->fl_type = saved_type;
1285 up_read(&cinode->lock_sem);
1290 * Set the byte-range lock (posix style). Returns:
1291 * 1) <0, if the error occurs while setting the lock;
1292 * 2) 0, if we set the lock and don't need to request to the server;
1293 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1294 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1297 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1299 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1300 int rc = FILE_LOCK_DEFERRED + 1;
1302 if ((flock->fl_flags & FL_POSIX) == 0)
1305 cifs_down_write(&cinode->lock_sem);
1306 if (!cinode->can_cache_brlcks) {
1307 up_write(&cinode->lock_sem);
1311 rc = posix_lock_file(file, flock, NULL);
1312 up_write(&cinode->lock_sem);
1317 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1320 int rc = 0, stored_rc;
1321 struct cifsLockInfo *li, *tmp;
1322 struct cifs_tcon *tcon;
1323 unsigned int num, max_num, max_buf;
1324 LOCKING_ANDX_RANGE *buf, *cur;
1325 static const int types[] = {
1326 LOCKING_ANDX_LARGE_FILES,
1327 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1332 tcon = tlink_tcon(cfile->tlink);
1335 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1336 * and check it before using.
1338 max_buf = tcon->ses->server->maxBuf;
1339 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1344 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1346 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1348 max_num = (max_buf - sizeof(struct smb_hdr)) /
1349 sizeof(LOCKING_ANDX_RANGE);
1350 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1356 for (i = 0; i < 2; i++) {
1359 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1360 if (li->type != types[i])
1362 cur->Pid = cpu_to_le16(li->pid);
1363 cur->LengthLow = cpu_to_le32((u32)li->length);
1364 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1365 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1366 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1367 if (++num == max_num) {
1368 stored_rc = cifs_lockv(xid, tcon,
1370 (__u8)li->type, 0, num,
1381 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1382 (__u8)types[i], 0, num, buf);
1394 hash_lockowner(fl_owner_t owner)
1396 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1398 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1400 struct lock_to_push {
1401 struct list_head llist;
1409 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1411 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1413 struct inode *inode = d_inode(cfile->dentry);
1414 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1415 struct file_lock *flock;
1416 struct file_lock_context *flctx = inode->i_flctx;
1417 unsigned int count = 0, i;
1418 int rc = 0, xid, type;
1419 struct list_head locks_to_send, *el;
1420 struct lock_to_push *lck, *tmp;
1428 spin_lock(&flctx->flc_lock);
1429 list_for_each(el, &flctx->flc_posix) {
1432 spin_unlock(&flctx->flc_lock);
1434 INIT_LIST_HEAD(&locks_to_send);
1437 * Allocating count locks is enough because no FL_POSIX locks can be
1438 * added to the list while we are holding cinode->lock_sem that
1439 * protects locking operations of this inode.
1441 for (i = 0; i < count; i++) {
1442 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1447 list_add_tail(&lck->llist, &locks_to_send);
1450 el = locks_to_send.next;
1451 spin_lock(&flctx->flc_lock);
1452 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1453 if (el == &locks_to_send) {
1455 * The list ended. We don't have enough allocated
1456 * structures - something is really wrong.
1458 cifs_dbg(VFS, "Can't push all brlocks!\n");
1461 length = cifs_flock_len(flock);
1462 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1466 lck = list_entry(el, struct lock_to_push, llist);
1467 lck->pid = hash_lockowner(flock->fl_owner);
1468 lck->netfid = cfile->fid.netfid;
1469 lck->length = length;
1471 lck->offset = flock->fl_start;
1473 spin_unlock(&flctx->flc_lock);
1475 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1478 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1479 lck->offset, lck->length, NULL,
1483 list_del(&lck->llist);
1491 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1492 list_del(&lck->llist);
1497 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1500 cifs_push_locks(struct cifsFileInfo *cfile)
1502 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1503 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1505 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1506 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1507 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1509 /* we are going to update can_cache_brlcks here - need a write access */
1510 cifs_down_write(&cinode->lock_sem);
1511 if (!cinode->can_cache_brlcks) {
1512 up_write(&cinode->lock_sem);
1516 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1517 if (cap_unix(tcon->ses) &&
1518 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1519 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1520 rc = cifs_push_posix_locks(cfile);
1522 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1523 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1525 cinode->can_cache_brlcks = false;
1526 up_write(&cinode->lock_sem);
1531 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1532 bool *wait_flag, struct TCP_Server_Info *server)
1534 if (flock->fl_flags & FL_POSIX)
1535 cifs_dbg(FYI, "Posix\n");
1536 if (flock->fl_flags & FL_FLOCK)
1537 cifs_dbg(FYI, "Flock\n");
1538 if (flock->fl_flags & FL_SLEEP) {
1539 cifs_dbg(FYI, "Blocking lock\n");
1542 if (flock->fl_flags & FL_ACCESS)
1543 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1544 if (flock->fl_flags & FL_LEASE)
1545 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1546 if (flock->fl_flags &
1547 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1548 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1549 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1551 *type = server->vals->large_lock_type;
1552 if (flock->fl_type == F_WRLCK) {
1553 cifs_dbg(FYI, "F_WRLCK\n");
1554 *type |= server->vals->exclusive_lock_type;
1556 } else if (flock->fl_type == F_UNLCK) {
1557 cifs_dbg(FYI, "F_UNLCK\n");
1558 *type |= server->vals->unlock_lock_type;
1560 /* Check if unlock includes more than one lock range */
1561 } else if (flock->fl_type == F_RDLCK) {
1562 cifs_dbg(FYI, "F_RDLCK\n");
1563 *type |= server->vals->shared_lock_type;
1565 } else if (flock->fl_type == F_EXLCK) {
1566 cifs_dbg(FYI, "F_EXLCK\n");
1567 *type |= server->vals->exclusive_lock_type;
1569 } else if (flock->fl_type == F_SHLCK) {
1570 cifs_dbg(FYI, "F_SHLCK\n");
1571 *type |= server->vals->shared_lock_type;
1574 cifs_dbg(FYI, "Unknown type of lock\n");
1578 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1579 bool wait_flag, bool posix_lck, unsigned int xid)
1582 __u64 length = cifs_flock_len(flock);
1583 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1584 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1585 struct TCP_Server_Info *server = tcon->ses->server;
1586 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1587 __u16 netfid = cfile->fid.netfid;
1590 int posix_lock_type;
1592 rc = cifs_posix_lock_test(file, flock);
1596 if (type & server->vals->shared_lock_type)
1597 posix_lock_type = CIFS_RDLCK;
1599 posix_lock_type = CIFS_WRLCK;
1600 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1601 hash_lockowner(flock->fl_owner),
1602 flock->fl_start, length, flock,
1603 posix_lock_type, wait_flag);
1606 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1608 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1612 /* BB we could chain these into one lock request BB */
1613 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1616 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1618 flock->fl_type = F_UNLCK;
1620 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1625 if (type & server->vals->shared_lock_type) {
1626 flock->fl_type = F_WRLCK;
1630 type &= ~server->vals->exclusive_lock_type;
1632 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1633 type | server->vals->shared_lock_type,
1636 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1637 type | server->vals->shared_lock_type, 0, 1, false);
1638 flock->fl_type = F_RDLCK;
1640 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1643 flock->fl_type = F_WRLCK;
1649 cifs_move_llist(struct list_head *source, struct list_head *dest)
1651 struct list_head *li, *tmp;
1652 list_for_each_safe(li, tmp, source)
1653 list_move(li, dest);
1657 cifs_free_llist(struct list_head *llist)
1659 struct cifsLockInfo *li, *tmp;
1660 list_for_each_entry_safe(li, tmp, llist, llist) {
1661 cifs_del_lock_waiters(li);
1662 list_del(&li->llist);
1667 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1669 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1672 int rc = 0, stored_rc;
1673 static const int types[] = {
1674 LOCKING_ANDX_LARGE_FILES,
1675 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1678 unsigned int max_num, num, max_buf;
1679 LOCKING_ANDX_RANGE *buf, *cur;
1680 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1681 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1682 struct cifsLockInfo *li, *tmp;
1683 __u64 length = cifs_flock_len(flock);
1684 struct list_head tmp_llist;
1686 INIT_LIST_HEAD(&tmp_llist);
1689 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1690 * and check it before using.
1692 max_buf = tcon->ses->server->maxBuf;
1693 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1696 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1698 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1700 max_num = (max_buf - sizeof(struct smb_hdr)) /
1701 sizeof(LOCKING_ANDX_RANGE);
1702 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1706 cifs_down_write(&cinode->lock_sem);
1707 for (i = 0; i < 2; i++) {
1710 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1711 if (flock->fl_start > li->offset ||
1712 (flock->fl_start + length) <
1713 (li->offset + li->length))
1715 if (current->tgid != li->pid)
1717 if (types[i] != li->type)
1719 if (cinode->can_cache_brlcks) {
1721 * We can cache brlock requests - simply remove
1722 * a lock from the file's list.
1724 list_del(&li->llist);
1725 cifs_del_lock_waiters(li);
1729 cur->Pid = cpu_to_le16(li->pid);
1730 cur->LengthLow = cpu_to_le32((u32)li->length);
1731 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1732 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1733 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1735 * We need to save a lock here to let us add it again to
1736 * the file's list if the unlock range request fails on
1739 list_move(&li->llist, &tmp_llist);
1740 if (++num == max_num) {
1741 stored_rc = cifs_lockv(xid, tcon,
1743 li->type, num, 0, buf);
1746 * We failed on the unlock range
1747 * request - add all locks from the tmp
1748 * list to the head of the file's list.
1750 cifs_move_llist(&tmp_llist,
1751 &cfile->llist->locks);
1755 * The unlock range request succeed -
1756 * free the tmp list.
1758 cifs_free_llist(&tmp_llist);
1765 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1766 types[i], num, 0, buf);
1768 cifs_move_llist(&tmp_llist,
1769 &cfile->llist->locks);
1772 cifs_free_llist(&tmp_llist);
1776 up_write(&cinode->lock_sem);
1780 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1783 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1784 bool wait_flag, bool posix_lck, int lock, int unlock,
1788 __u64 length = cifs_flock_len(flock);
1789 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1790 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1791 struct TCP_Server_Info *server = tcon->ses->server;
1792 struct inode *inode = d_inode(cfile->dentry);
1794 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1796 int posix_lock_type;
1798 rc = cifs_posix_lock_set(file, flock);
1799 if (rc <= FILE_LOCK_DEFERRED)
1802 if (type & server->vals->shared_lock_type)
1803 posix_lock_type = CIFS_RDLCK;
1805 posix_lock_type = CIFS_WRLCK;
1808 posix_lock_type = CIFS_UNLCK;
1810 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1811 hash_lockowner(flock->fl_owner),
1812 flock->fl_start, length,
1813 NULL, posix_lock_type, wait_flag);
1816 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1818 struct cifsLockInfo *lock;
1820 lock = cifs_lock_init(flock->fl_start, length, type,
1825 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1834 * Windows 7 server can delay breaking lease from read to None
1835 * if we set a byte-range lock on a file - break it explicitly
1836 * before sending the lock to the server to be sure the next
1837 * read won't conflict with non-overlapted locks due to
1840 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1841 CIFS_CACHE_READ(CIFS_I(inode))) {
1842 cifs_zap_mapping(inode);
1843 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1845 CIFS_I(inode)->oplock = 0;
1848 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1849 type, 1, 0, wait_flag);
1855 cifs_lock_add(cfile, lock);
1857 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1860 if ((flock->fl_flags & FL_POSIX) || (flock->fl_flags & FL_FLOCK)) {
1862 * If this is a request to remove all locks because we
1863 * are closing the file, it doesn't matter if the
1864 * unlocking failed as both cifs.ko and the SMB server
1865 * remove the lock on file close
1868 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1869 if (!(flock->fl_flags & FL_CLOSE))
1872 rc = locks_lock_file_wait(file, flock);
1877 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
1880 int lock = 0, unlock = 0;
1881 bool wait_flag = false;
1882 bool posix_lck = false;
1883 struct cifs_sb_info *cifs_sb;
1884 struct cifs_tcon *tcon;
1885 struct cifsFileInfo *cfile;
1890 if (!(fl->fl_flags & FL_FLOCK)) {
1896 cfile = (struct cifsFileInfo *)file->private_data;
1897 tcon = tlink_tcon(cfile->tlink);
1899 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
1901 cifs_sb = CIFS_FILE_SB(file);
1903 if (cap_unix(tcon->ses) &&
1904 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1905 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1908 if (!lock && !unlock) {
1910 * if no lock or unlock then nothing to do since we do not
1918 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
1926 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1929 int lock = 0, unlock = 0;
1930 bool wait_flag = false;
1931 bool posix_lck = false;
1932 struct cifs_sb_info *cifs_sb;
1933 struct cifs_tcon *tcon;
1934 struct cifsFileInfo *cfile;
1940 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
1941 flock->fl_flags, flock->fl_type, (long long)flock->fl_start,
1942 (long long)flock->fl_end);
1944 cfile = (struct cifsFileInfo *)file->private_data;
1945 tcon = tlink_tcon(cfile->tlink);
1947 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1949 cifs_sb = CIFS_FILE_SB(file);
1950 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
1952 if (cap_unix(tcon->ses) &&
1953 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1954 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1957 * BB add code here to normalize offset and length to account for
1958 * negative length which we can not accept over the wire.
1960 if (IS_GETLK(cmd)) {
1961 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1966 if (!lock && !unlock) {
1968 * if no lock or unlock then nothing to do since we do not
1975 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1982 * update the file size (if needed) after a write. Should be called with
1983 * the inode->i_lock held
1986 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1987 unsigned int bytes_written)
1989 loff_t end_of_write = offset + bytes_written;
1991 if (end_of_write > cifsi->server_eof)
1992 cifsi->server_eof = end_of_write;
1996 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1997 size_t write_size, loff_t *offset)
2000 unsigned int bytes_written = 0;
2001 unsigned int total_written;
2002 struct cifs_tcon *tcon;
2003 struct TCP_Server_Info *server;
2005 struct dentry *dentry = open_file->dentry;
2006 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
2007 struct cifs_io_parms io_parms = {0};
2009 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
2010 write_size, *offset, dentry);
2012 tcon = tlink_tcon(open_file->tlink);
2013 server = tcon->ses->server;
2015 if (!server->ops->sync_write)
2020 for (total_written = 0; write_size > total_written;
2021 total_written += bytes_written) {
2023 while (rc == -EAGAIN) {
2027 if (open_file->invalidHandle) {
2028 /* we could deadlock if we called
2029 filemap_fdatawait from here so tell
2030 reopen_file not to flush data to
2032 rc = cifs_reopen_file(open_file, false);
2037 len = min(server->ops->wp_retry_size(d_inode(dentry)),
2038 (unsigned int)write_size - total_written);
2039 /* iov[0] is reserved for smb header */
2040 iov[1].iov_base = (char *)write_data + total_written;
2041 iov[1].iov_len = len;
2043 io_parms.tcon = tcon;
2044 io_parms.offset = *offset;
2045 io_parms.length = len;
2046 rc = server->ops->sync_write(xid, &open_file->fid,
2047 &io_parms, &bytes_written, iov, 1);
2049 if (rc || (bytes_written == 0)) {
2057 spin_lock(&d_inode(dentry)->i_lock);
2058 cifs_update_eof(cifsi, *offset, bytes_written);
2059 spin_unlock(&d_inode(dentry)->i_lock);
2060 *offset += bytes_written;
2064 cifs_stats_bytes_written(tcon, total_written);
2066 if (total_written > 0) {
2067 spin_lock(&d_inode(dentry)->i_lock);
2068 if (*offset > d_inode(dentry)->i_size) {
2069 i_size_write(d_inode(dentry), *offset);
2070 d_inode(dentry)->i_blocks = (512 - 1 + *offset) >> 9;
2072 spin_unlock(&d_inode(dentry)->i_lock);
2074 mark_inode_dirty_sync(d_inode(dentry));
2076 return total_written;
2079 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2082 struct cifsFileInfo *open_file = NULL;
2083 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2085 /* only filter by fsuid on multiuser mounts */
2086 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2089 spin_lock(&cifs_inode->open_file_lock);
2090 /* we could simply get the first_list_entry since write-only entries
2091 are always at the end of the list but since the first entry might
2092 have a close pending, we go through the whole list */
2093 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2094 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2096 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2097 if ((!open_file->invalidHandle)) {
2098 /* found a good file */
2099 /* lock it so it will not be closed on us */
2100 cifsFileInfo_get(open_file);
2101 spin_unlock(&cifs_inode->open_file_lock);
2103 } /* else might as well continue, and look for
2104 another, or simply have the caller reopen it
2105 again rather than trying to fix this handle */
2106 } else /* write only file */
2107 break; /* write only files are last so must be done */
2109 spin_unlock(&cifs_inode->open_file_lock);
2113 /* Return -EBADF if no handle is found and general rc otherwise */
2115 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2116 struct cifsFileInfo **ret_file)
2118 struct cifsFileInfo *open_file, *inv_file = NULL;
2119 struct cifs_sb_info *cifs_sb;
2120 bool any_available = false;
2122 unsigned int refind = 0;
2123 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2124 bool with_delete = flags & FIND_WR_WITH_DELETE;
2128 * Having a null inode here (because mapping->host was set to zero by
2129 * the VFS or MM) should not happen but we had reports of on oops (due
2130 * to it being zero) during stress testcases so we need to check for it
2133 if (cifs_inode == NULL) {
2134 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2139 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2141 /* only filter by fsuid on multiuser mounts */
2142 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2145 spin_lock(&cifs_inode->open_file_lock);
2147 if (refind > MAX_REOPEN_ATT) {
2148 spin_unlock(&cifs_inode->open_file_lock);
2151 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2152 if (!any_available && open_file->pid != current->tgid)
2154 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2156 if (with_delete && !(open_file->fid.access & DELETE))
2158 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2159 if (!open_file->invalidHandle) {
2160 /* found a good writable file */
2161 cifsFileInfo_get(open_file);
2162 spin_unlock(&cifs_inode->open_file_lock);
2163 *ret_file = open_file;
2167 inv_file = open_file;
2171 /* couldn't find useable FH with same pid, try any available */
2172 if (!any_available) {
2173 any_available = true;
2174 goto refind_writable;
2178 any_available = false;
2179 cifsFileInfo_get(inv_file);
2182 spin_unlock(&cifs_inode->open_file_lock);
2185 rc = cifs_reopen_file(inv_file, false);
2187 *ret_file = inv_file;
2191 spin_lock(&cifs_inode->open_file_lock);
2192 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2193 spin_unlock(&cifs_inode->open_file_lock);
2194 cifsFileInfo_put(inv_file);
2197 spin_lock(&cifs_inode->open_file_lock);
2198 goto refind_writable;
2204 struct cifsFileInfo *
2205 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2207 struct cifsFileInfo *cfile;
2210 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2212 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2218 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2220 struct cifsFileInfo **ret_file)
2222 struct cifsFileInfo *cfile;
2223 void *page = alloc_dentry_path();
2227 spin_lock(&tcon->open_file_lock);
2228 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2229 struct cifsInodeInfo *cinode;
2230 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2231 if (IS_ERR(full_path)) {
2232 spin_unlock(&tcon->open_file_lock);
2233 free_dentry_path(page);
2234 return PTR_ERR(full_path);
2236 if (strcmp(full_path, name))
2239 cinode = CIFS_I(d_inode(cfile->dentry));
2240 spin_unlock(&tcon->open_file_lock);
2241 free_dentry_path(page);
2242 return cifs_get_writable_file(cinode, flags, ret_file);
2245 spin_unlock(&tcon->open_file_lock);
2246 free_dentry_path(page);
2251 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2252 struct cifsFileInfo **ret_file)
2254 struct cifsFileInfo *cfile;
2255 void *page = alloc_dentry_path();
2259 spin_lock(&tcon->open_file_lock);
2260 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2261 struct cifsInodeInfo *cinode;
2262 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2263 if (IS_ERR(full_path)) {
2264 spin_unlock(&tcon->open_file_lock);
2265 free_dentry_path(page);
2266 return PTR_ERR(full_path);
2268 if (strcmp(full_path, name))
2271 cinode = CIFS_I(d_inode(cfile->dentry));
2272 spin_unlock(&tcon->open_file_lock);
2273 free_dentry_path(page);
2274 *ret_file = find_readable_file(cinode, 0);
2275 return *ret_file ? 0 : -ENOENT;
2278 spin_unlock(&tcon->open_file_lock);
2279 free_dentry_path(page);
2284 cifs_writedata_release(struct kref *refcount)
2286 struct cifs_writedata *wdata = container_of(refcount,
2287 struct cifs_writedata, refcount);
2288 #ifdef CONFIG_CIFS_SMB_DIRECT
2290 smbd_deregister_mr(wdata->mr);
2296 cifsFileInfo_put(wdata->cfile);
2298 kvfree(wdata->pages);
2303 * Write failed with a retryable error. Resend the write request. It's also
2304 * possible that the page was redirtied so re-clean the page.
2307 cifs_writev_requeue(struct cifs_writedata *wdata)
2310 struct inode *inode = d_inode(wdata->cfile->dentry);
2311 struct TCP_Server_Info *server;
2312 unsigned int rest_len;
2314 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2316 rest_len = wdata->bytes;
2318 struct cifs_writedata *wdata2;
2319 unsigned int j, nr_pages, wsize, tailsz, cur_len;
2321 wsize = server->ops->wp_retry_size(inode);
2322 if (wsize < rest_len) {
2323 nr_pages = wsize / PAGE_SIZE;
2328 cur_len = nr_pages * PAGE_SIZE;
2331 nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE);
2333 tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE;
2336 wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete);
2342 for (j = 0; j < nr_pages; j++) {
2343 wdata2->pages[j] = wdata->pages[i + j];
2344 lock_page(wdata2->pages[j]);
2345 clear_page_dirty_for_io(wdata2->pages[j]);
2348 wdata2->sync_mode = wdata->sync_mode;
2349 wdata2->nr_pages = nr_pages;
2350 wdata2->offset = page_offset(wdata2->pages[0]);
2351 wdata2->pagesz = PAGE_SIZE;
2352 wdata2->tailsz = tailsz;
2353 wdata2->bytes = cur_len;
2355 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
2357 if (!wdata2->cfile) {
2358 cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
2360 if (!is_retryable_error(rc))
2363 wdata2->pid = wdata2->cfile->pid;
2364 rc = server->ops->async_writev(wdata2,
2365 cifs_writedata_release);
2368 for (j = 0; j < nr_pages; j++) {
2369 unlock_page(wdata2->pages[j]);
2370 if (rc != 0 && !is_retryable_error(rc)) {
2371 SetPageError(wdata2->pages[j]);
2372 end_page_writeback(wdata2->pages[j]);
2373 put_page(wdata2->pages[j]);
2377 kref_put(&wdata2->refcount, cifs_writedata_release);
2379 if (is_retryable_error(rc))
2385 rest_len -= cur_len;
2387 } while (i < wdata->nr_pages);
2389 /* cleanup remaining pages from the original wdata */
2390 for (; i < wdata->nr_pages; i++) {
2391 SetPageError(wdata->pages[i]);
2392 end_page_writeback(wdata->pages[i]);
2393 put_page(wdata->pages[i]);
2396 if (rc != 0 && !is_retryable_error(rc))
2397 mapping_set_error(inode->i_mapping, rc);
2398 kref_put(&wdata->refcount, cifs_writedata_release);
2402 cifs_writev_complete(struct work_struct *work)
2404 struct cifs_writedata *wdata = container_of(work,
2405 struct cifs_writedata, work);
2406 struct inode *inode = d_inode(wdata->cfile->dentry);
2409 if (wdata->result == 0) {
2410 spin_lock(&inode->i_lock);
2411 cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
2412 spin_unlock(&inode->i_lock);
2413 cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
2415 } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
2416 return cifs_writev_requeue(wdata);
2418 for (i = 0; i < wdata->nr_pages; i++) {
2419 struct page *page = wdata->pages[i];
2421 if (wdata->result == -EAGAIN)
2422 __set_page_dirty_nobuffers(page);
2423 else if (wdata->result < 0)
2425 end_page_writeback(page);
2426 cifs_readpage_to_fscache(inode, page);
2429 if (wdata->result != -EAGAIN)
2430 mapping_set_error(inode->i_mapping, wdata->result);
2431 kref_put(&wdata->refcount, cifs_writedata_release);
2434 struct cifs_writedata *
2435 cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
2437 struct page **pages =
2438 kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
2440 return cifs_writedata_direct_alloc(pages, complete);
2445 struct cifs_writedata *
2446 cifs_writedata_direct_alloc(struct page **pages, work_func_t complete)
2448 struct cifs_writedata *wdata;
2450 wdata = kzalloc(sizeof(*wdata), GFP_NOFS);
2451 if (wdata != NULL) {
2452 wdata->pages = pages;
2453 kref_init(&wdata->refcount);
2454 INIT_LIST_HEAD(&wdata->list);
2455 init_completion(&wdata->done);
2456 INIT_WORK(&wdata->work, complete);
2462 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2464 struct address_space *mapping = page->mapping;
2465 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
2468 int bytes_written = 0;
2469 struct inode *inode;
2470 struct cifsFileInfo *open_file;
2472 if (!mapping || !mapping->host)
2475 inode = page->mapping->host;
2477 offset += (loff_t)from;
2478 write_data = kmap(page);
2481 if ((to > PAGE_SIZE) || (from > to)) {
2486 /* racing with truncate? */
2487 if (offset > mapping->host->i_size) {
2489 return 0; /* don't care */
2492 /* check to make sure that we are not extending the file */
2493 if (mapping->host->i_size - offset < (loff_t)to)
2494 to = (unsigned)(mapping->host->i_size - offset);
2496 rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
2499 bytes_written = cifs_write(open_file, open_file->pid,
2500 write_data, to - from, &offset);
2501 cifsFileInfo_put(open_file);
2502 /* Does mm or vfs already set times? */
2503 inode->i_atime = inode->i_mtime = current_time(inode);
2504 if ((bytes_written > 0) && (offset))
2506 else if (bytes_written < 0)
2511 cifs_dbg(FYI, "No writable handle for write page rc=%d\n", rc);
2512 if (!is_retryable_error(rc))
2520 static struct cifs_writedata *
2521 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2522 pgoff_t end, pgoff_t *index,
2523 unsigned int *found_pages)
2525 struct cifs_writedata *wdata;
2527 wdata = cifs_writedata_alloc((unsigned int)tofind,
2528 cifs_writev_complete);
2532 *found_pages = find_get_pages_range_tag(mapping, index, end,
2533 PAGECACHE_TAG_DIRTY, tofind, wdata->pages);
2538 wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2539 struct address_space *mapping,
2540 struct writeback_control *wbc,
2541 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2543 unsigned int nr_pages = 0, i;
2546 for (i = 0; i < found_pages; i++) {
2547 page = wdata->pages[i];
2549 * At this point we hold neither the i_pages lock nor the
2550 * page lock: the page may be truncated or invalidated
2551 * (changing page->mapping to NULL), or even swizzled
2552 * back from swapper_space to tmpfs file mapping
2557 else if (!trylock_page(page))
2560 if (unlikely(page->mapping != mapping)) {
2565 if (!wbc->range_cyclic && page->index > end) {
2571 if (*next && (page->index != *next)) {
2572 /* Not next consecutive page */
2577 if (wbc->sync_mode != WB_SYNC_NONE)
2578 wait_on_page_writeback(page);
2580 if (PageWriteback(page) ||
2581 !clear_page_dirty_for_io(page)) {
2587 * This actually clears the dirty bit in the radix tree.
2588 * See cifs_writepage() for more commentary.
2590 set_page_writeback(page);
2591 if (page_offset(page) >= i_size_read(mapping->host)) {
2594 end_page_writeback(page);
2598 wdata->pages[i] = page;
2599 *next = page->index + 1;
2603 /* reset index to refind any pages skipped */
2605 *index = wdata->pages[0]->index + 1;
2607 /* put any pages we aren't going to use */
2608 for (i = nr_pages; i < found_pages; i++) {
2609 put_page(wdata->pages[i]);
2610 wdata->pages[i] = NULL;
2617 wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2618 struct address_space *mapping, struct writeback_control *wbc)
2622 wdata->sync_mode = wbc->sync_mode;
2623 wdata->nr_pages = nr_pages;
2624 wdata->offset = page_offset(wdata->pages[0]);
2625 wdata->pagesz = PAGE_SIZE;
2626 wdata->tailsz = min(i_size_read(mapping->host) -
2627 page_offset(wdata->pages[nr_pages - 1]),
2629 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2630 wdata->pid = wdata->cfile->pid;
2632 rc = adjust_credits(wdata->server, &wdata->credits, wdata->bytes);
2636 if (wdata->cfile->invalidHandle)
2639 rc = wdata->server->ops->async_writev(wdata,
2640 cifs_writedata_release);
2645 static int cifs_writepages(struct address_space *mapping,
2646 struct writeback_control *wbc)
2648 struct inode *inode = mapping->host;
2649 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2650 struct TCP_Server_Info *server;
2651 bool done = false, scanned = false, range_whole = false;
2653 struct cifs_writedata *wdata;
2654 struct cifsFileInfo *cfile = NULL;
2660 * If wsize is smaller than the page cache size, default to writing
2661 * one page at a time via cifs_writepage
2663 if (cifs_sb->ctx->wsize < PAGE_SIZE)
2664 return generic_writepages(mapping, wbc);
2667 if (wbc->range_cyclic) {
2668 index = mapping->writeback_index; /* Start from prev offset */
2671 index = wbc->range_start >> PAGE_SHIFT;
2672 end = wbc->range_end >> PAGE_SHIFT;
2673 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2677 server = cifs_pick_channel(cifs_sb_master_tcon(cifs_sb)->ses);
2680 while (!done && index <= end) {
2681 unsigned int i, nr_pages, found_pages, wsize;
2682 pgoff_t next = 0, tofind, saved_index = index;
2683 struct cifs_credits credits_on_stack;
2684 struct cifs_credits *credits = &credits_on_stack;
2685 int get_file_rc = 0;
2688 cifsFileInfo_put(cfile);
2690 rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
2692 /* in case of an error store it to return later */
2696 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
2703 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2705 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2710 add_credits_and_wake_if(server, credits, 0);
2714 if (found_pages == 0) {
2715 kref_put(&wdata->refcount, cifs_writedata_release);
2716 add_credits_and_wake_if(server, credits, 0);
2720 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2721 end, &index, &next, &done);
2723 /* nothing to write? */
2724 if (nr_pages == 0) {
2725 kref_put(&wdata->refcount, cifs_writedata_release);
2726 add_credits_and_wake_if(server, credits, 0);
2730 wdata->credits = credits_on_stack;
2731 wdata->cfile = cfile;
2732 wdata->server = server;
2735 if (!wdata->cfile) {
2736 cifs_dbg(VFS, "No writable handle in writepages rc=%d\n",
2738 if (is_retryable_error(get_file_rc))
2743 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2745 for (i = 0; i < nr_pages; ++i)
2746 unlock_page(wdata->pages[i]);
2748 /* send failure -- clean up the mess */
2750 add_credits_and_wake_if(server, &wdata->credits, 0);
2751 for (i = 0; i < nr_pages; ++i) {
2752 if (is_retryable_error(rc))
2753 redirty_page_for_writepage(wbc,
2756 SetPageError(wdata->pages[i]);
2757 end_page_writeback(wdata->pages[i]);
2758 put_page(wdata->pages[i]);
2760 if (!is_retryable_error(rc))
2761 mapping_set_error(mapping, rc);
2763 kref_put(&wdata->refcount, cifs_writedata_release);
2765 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2766 index = saved_index;
2770 /* Return immediately if we received a signal during writing */
2771 if (is_interrupt_error(rc)) {
2776 if (rc != 0 && saved_rc == 0)
2779 wbc->nr_to_write -= nr_pages;
2780 if (wbc->nr_to_write <= 0)
2786 if (!scanned && !done) {
2788 * We hit the last page and there is more work to be done: wrap
2789 * back to the start of the file
2799 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2800 mapping->writeback_index = index;
2803 cifsFileInfo_put(cfile);
2805 /* Indication to update ctime and mtime as close is deferred */
2806 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
2811 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2817 /* BB add check for wbc flags */
2819 if (!PageUptodate(page))
2820 cifs_dbg(FYI, "ppw - page not up to date\n");
2823 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2825 * A writepage() implementation always needs to do either this,
2826 * or re-dirty the page with "redirty_page_for_writepage()" in
2827 * the case of a failure.
2829 * Just unlocking the page will cause the radix tree tag-bits
2830 * to fail to update with the state of the page correctly.
2832 set_page_writeback(page);
2834 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2835 if (is_retryable_error(rc)) {
2836 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
2838 redirty_page_for_writepage(wbc, page);
2839 } else if (rc != 0) {
2841 mapping_set_error(page->mapping, rc);
2843 SetPageUptodate(page);
2845 end_page_writeback(page);
2851 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2853 int rc = cifs_writepage_locked(page, wbc);
2858 static int cifs_write_end(struct file *file, struct address_space *mapping,
2859 loff_t pos, unsigned len, unsigned copied,
2860 struct page *page, void *fsdata)
2863 struct inode *inode = mapping->host;
2864 struct cifsFileInfo *cfile = file->private_data;
2865 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2868 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2871 pid = current->tgid;
2873 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2876 if (PageChecked(page)) {
2878 SetPageUptodate(page);
2879 ClearPageChecked(page);
2880 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2881 SetPageUptodate(page);
2883 if (!PageUptodate(page)) {
2885 unsigned offset = pos & (PAGE_SIZE - 1);
2889 /* this is probably better than directly calling
2890 partialpage_write since in this function the file handle is
2891 known which we might as well leverage */
2892 /* BB check if anything else missing out of ppw
2893 such as updating last write time */
2894 page_data = kmap(page);
2895 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2896 /* if (rc < 0) should we set writebehind rc? */
2903 set_page_dirty(page);
2907 spin_lock(&inode->i_lock);
2908 if (pos > inode->i_size) {
2909 i_size_write(inode, pos);
2910 inode->i_blocks = (512 - 1 + pos) >> 9;
2912 spin_unlock(&inode->i_lock);
2917 /* Indication to update ctime and mtime as close is deferred */
2918 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
2923 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2928 struct cifs_tcon *tcon;
2929 struct TCP_Server_Info *server;
2930 struct cifsFileInfo *smbfile = file->private_data;
2931 struct inode *inode = file_inode(file);
2932 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2934 rc = file_write_and_wait_range(file, start, end);
2936 trace_cifs_fsync_err(inode->i_ino, rc);
2942 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2945 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2946 rc = cifs_zap_mapping(inode);
2948 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2949 rc = 0; /* don't care about it in fsync */
2953 tcon = tlink_tcon(smbfile->tlink);
2954 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2955 server = tcon->ses->server;
2956 if (server->ops->flush == NULL) {
2958 goto strict_fsync_exit;
2961 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2962 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2964 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2965 cifsFileInfo_put(smbfile);
2967 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2969 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2977 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2981 struct cifs_tcon *tcon;
2982 struct TCP_Server_Info *server;
2983 struct cifsFileInfo *smbfile = file->private_data;
2984 struct inode *inode = file_inode(file);
2985 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2987 rc = file_write_and_wait_range(file, start, end);
2989 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2995 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2998 tcon = tlink_tcon(smbfile->tlink);
2999 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
3000 server = tcon->ses->server;
3001 if (server->ops->flush == NULL) {
3006 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
3007 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
3009 rc = server->ops->flush(xid, tcon, &smbfile->fid);
3010 cifsFileInfo_put(smbfile);
3012 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
3014 rc = server->ops->flush(xid, tcon, &smbfile->fid);
3023 * As file closes, flush all cached write data for this inode checking
3024 * for write behind errors.
3026 int cifs_flush(struct file *file, fl_owner_t id)
3028 struct inode *inode = file_inode(file);
3031 if (file->f_mode & FMODE_WRITE)
3032 rc = filemap_write_and_wait(inode->i_mapping);
3034 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
3036 /* get more nuanced writeback errors */
3037 rc = filemap_check_wb_err(file->f_mapping, 0);
3038 trace_cifs_flush_err(inode->i_ino, rc);
3044 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
3049 for (i = 0; i < num_pages; i++) {
3050 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3053 * save number of pages we have already allocated and
3054 * return with ENOMEM error
3063 for (i = 0; i < num_pages; i++)
3070 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
3075 clen = min_t(const size_t, len, wsize);
3076 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
3085 cifs_uncached_writedata_release(struct kref *refcount)
3088 struct cifs_writedata *wdata = container_of(refcount,
3089 struct cifs_writedata, refcount);
3091 kref_put(&wdata->ctx->refcount, cifs_aio_ctx_release);
3092 for (i = 0; i < wdata->nr_pages; i++)
3093 put_page(wdata->pages[i]);
3094 cifs_writedata_release(refcount);
3097 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx);
3100 cifs_uncached_writev_complete(struct work_struct *work)
3102 struct cifs_writedata *wdata = container_of(work,
3103 struct cifs_writedata, work);
3104 struct inode *inode = d_inode(wdata->cfile->dentry);
3105 struct cifsInodeInfo *cifsi = CIFS_I(inode);
3107 spin_lock(&inode->i_lock);
3108 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
3109 if (cifsi->server_eof > inode->i_size)
3110 i_size_write(inode, cifsi->server_eof);
3111 spin_unlock(&inode->i_lock);
3113 complete(&wdata->done);
3114 collect_uncached_write_data(wdata->ctx);
3115 /* the below call can possibly free the last ref to aio ctx */
3116 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3120 wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
3121 size_t *len, unsigned long *num_pages)
3123 size_t save_len, copied, bytes, cur_len = *len;
3124 unsigned long i, nr_pages = *num_pages;
3127 for (i = 0; i < nr_pages; i++) {
3128 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
3129 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
3132 * If we didn't copy as much as we expected, then that
3133 * may mean we trod into an unmapped area. Stop copying
3134 * at that point. On the next pass through the big
3135 * loop, we'll likely end up getting a zero-length
3136 * write and bailing out of it.
3141 cur_len = save_len - cur_len;
3145 * If we have no data to send, then that probably means that
3146 * the copy above failed altogether. That's most likely because
3147 * the address in the iovec was bogus. Return -EFAULT and let
3148 * the caller free anything we allocated and bail out.
3154 * i + 1 now represents the number of pages we actually used in
3155 * the copy phase above.
3162 cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
3163 struct cifs_aio_ctx *ctx)
3166 struct cifs_credits credits;
3168 struct TCP_Server_Info *server = wdata->server;
3171 if (wdata->cfile->invalidHandle) {
3172 rc = cifs_reopen_file(wdata->cfile, false);
3181 * Wait for credits to resend this wdata.
3182 * Note: we are attempting to resend the whole wdata not in
3186 rc = server->ops->wait_mtu_credits(server, wdata->bytes,
3191 if (wsize < wdata->bytes) {
3192 add_credits_and_wake_if(server, &credits, 0);
3195 } while (wsize < wdata->bytes);
3196 wdata->credits = credits;
3198 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
3201 if (wdata->cfile->invalidHandle)
3204 #ifdef CONFIG_CIFS_SMB_DIRECT
3206 wdata->mr->need_invalidate = true;
3207 smbd_deregister_mr(wdata->mr);
3211 rc = server->ops->async_writev(wdata,
3212 cifs_uncached_writedata_release);
3216 /* If the write was successfully sent, we are done */
3218 list_add_tail(&wdata->list, wdata_list);
3222 /* Roll back credits and retry if needed */
3223 add_credits_and_wake_if(server, &wdata->credits, 0);
3224 } while (rc == -EAGAIN);
3227 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3232 cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
3233 struct cifsFileInfo *open_file,
3234 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list,
3235 struct cifs_aio_ctx *ctx)
3239 unsigned long nr_pages, num_pages, i;
3240 struct cifs_writedata *wdata;
3241 struct iov_iter saved_from = *from;
3242 loff_t saved_offset = offset;
3244 struct TCP_Server_Info *server;
3245 struct page **pagevec;
3249 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3250 pid = open_file->pid;
3252 pid = current->tgid;
3254 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
3259 struct cifs_credits credits_on_stack;
3260 struct cifs_credits *credits = &credits_on_stack;
3262 if (open_file->invalidHandle) {
3263 rc = cifs_reopen_file(open_file, false);
3270 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->wsize,
3275 cur_len = min_t(const size_t, len, wsize);
3277 if (ctx->direct_io) {
3280 result = iov_iter_get_pages_alloc2(
3281 from, &pagevec, cur_len, &start);
3284 "direct_writev couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
3285 result, iov_iter_type(from),
3286 from->iov_offset, from->count);
3290 add_credits_and_wake_if(server, credits, 0);
3293 cur_len = (size_t)result;
3296 (cur_len + start + PAGE_SIZE - 1) / PAGE_SIZE;
3298 wdata = cifs_writedata_direct_alloc(pagevec,
3299 cifs_uncached_writev_complete);
3302 add_credits_and_wake_if(server, credits, 0);
3307 wdata->page_offset = start;
3310 cur_len - (PAGE_SIZE - start) -
3311 (nr_pages - 2) * PAGE_SIZE :
3314 nr_pages = get_numpages(wsize, len, &cur_len);
3315 wdata = cifs_writedata_alloc(nr_pages,
3316 cifs_uncached_writev_complete);
3319 add_credits_and_wake_if(server, credits, 0);
3323 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
3325 kvfree(wdata->pages);
3327 add_credits_and_wake_if(server, credits, 0);
3331 num_pages = nr_pages;
3332 rc = wdata_fill_from_iovec(
3333 wdata, from, &cur_len, &num_pages);
3335 for (i = 0; i < nr_pages; i++)
3336 put_page(wdata->pages[i]);
3337 kvfree(wdata->pages);
3339 add_credits_and_wake_if(server, credits, 0);
3344 * Bring nr_pages down to the number of pages we
3345 * actually used, and free any pages that we didn't use.
3347 for ( ; nr_pages > num_pages; nr_pages--)
3348 put_page(wdata->pages[nr_pages - 1]);
3350 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
3353 wdata->sync_mode = WB_SYNC_ALL;
3354 wdata->nr_pages = nr_pages;
3355 wdata->offset = (__u64)offset;
3356 wdata->cfile = cifsFileInfo_get(open_file);
3357 wdata->server = server;
3359 wdata->bytes = cur_len;
3360 wdata->pagesz = PAGE_SIZE;
3361 wdata->credits = credits_on_stack;
3363 kref_get(&ctx->refcount);
3365 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
3368 if (wdata->cfile->invalidHandle)
3371 rc = server->ops->async_writev(wdata,
3372 cifs_uncached_writedata_release);
3376 add_credits_and_wake_if(server, &wdata->credits, 0);
3377 kref_put(&wdata->refcount,
3378 cifs_uncached_writedata_release);
3379 if (rc == -EAGAIN) {
3381 iov_iter_advance(from, offset - saved_offset);
3387 list_add_tail(&wdata->list, wdata_list);
3396 static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
3398 struct cifs_writedata *wdata, *tmp;
3399 struct cifs_tcon *tcon;
3400 struct cifs_sb_info *cifs_sb;
3401 struct dentry *dentry = ctx->cfile->dentry;
3404 tcon = tlink_tcon(ctx->cfile->tlink);
3405 cifs_sb = CIFS_SB(dentry->d_sb);
3407 mutex_lock(&ctx->aio_mutex);
3409 if (list_empty(&ctx->list)) {
3410 mutex_unlock(&ctx->aio_mutex);
3416 * Wait for and collect replies for any successful sends in order of
3417 * increasing offset. Once an error is hit, then return without waiting
3418 * for any more replies.
3421 list_for_each_entry_safe(wdata, tmp, &ctx->list, list) {
3423 if (!try_wait_for_completion(&wdata->done)) {
3424 mutex_unlock(&ctx->aio_mutex);
3431 ctx->total_len += wdata->bytes;
3433 /* resend call if it's a retryable error */
3434 if (rc == -EAGAIN) {
3435 struct list_head tmp_list;
3436 struct iov_iter tmp_from = ctx->iter;
3438 INIT_LIST_HEAD(&tmp_list);
3439 list_del_init(&wdata->list);
3442 rc = cifs_resend_wdata(
3443 wdata, &tmp_list, ctx);
3445 iov_iter_advance(&tmp_from,
3446 wdata->offset - ctx->pos);
3448 rc = cifs_write_from_iter(wdata->offset,
3449 wdata->bytes, &tmp_from,
3450 ctx->cfile, cifs_sb, &tmp_list,
3453 kref_put(&wdata->refcount,
3454 cifs_uncached_writedata_release);
3457 list_splice(&tmp_list, &ctx->list);
3461 list_del_init(&wdata->list);
3462 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
3465 cifs_stats_bytes_written(tcon, ctx->total_len);
3466 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(dentry->d_inode)->flags);
3468 ctx->rc = (rc == 0) ? ctx->total_len : rc;
3470 mutex_unlock(&ctx->aio_mutex);
3472 if (ctx->iocb && ctx->iocb->ki_complete)
3473 ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
3475 complete(&ctx->done);
3478 static ssize_t __cifs_writev(
3479 struct kiocb *iocb, struct iov_iter *from, bool direct)
3481 struct file *file = iocb->ki_filp;
3482 ssize_t total_written = 0;
3483 struct cifsFileInfo *cfile;
3484 struct cifs_tcon *tcon;
3485 struct cifs_sb_info *cifs_sb;
3486 struct cifs_aio_ctx *ctx;
3487 struct iov_iter saved_from = *from;
3488 size_t len = iov_iter_count(from);
3492 * iov_iter_get_pages_alloc doesn't work with ITER_KVEC.
3493 * In this case, fall back to non-direct write function.
3494 * this could be improved by getting pages directly in ITER_KVEC
3496 if (direct && iov_iter_is_kvec(from)) {
3497 cifs_dbg(FYI, "use non-direct cifs_writev for kvec I/O\n");
3501 rc = generic_write_checks(iocb, from);
3505 cifs_sb = CIFS_FILE_SB(file);
3506 cfile = file->private_data;
3507 tcon = tlink_tcon(cfile->tlink);
3509 if (!tcon->ses->server->ops->async_writev)
3512 ctx = cifs_aio_ctx_alloc();
3516 ctx->cfile = cifsFileInfo_get(cfile);
3518 if (!is_sync_kiocb(iocb))
3521 ctx->pos = iocb->ki_pos;
3524 ctx->direct_io = true;
3528 rc = setup_aio_ctx_iter(ctx, from, WRITE);
3530 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3535 /* grab a lock here due to read response handlers can access ctx */
3536 mutex_lock(&ctx->aio_mutex);
3538 rc = cifs_write_from_iter(iocb->ki_pos, ctx->len, &saved_from,
3539 cfile, cifs_sb, &ctx->list, ctx);
3542 * If at least one write was successfully sent, then discard any rc
3543 * value from the later writes. If the other write succeeds, then
3544 * we'll end up returning whatever was written. If it fails, then
3545 * we'll get a new rc value from that.
3547 if (!list_empty(&ctx->list))
3550 mutex_unlock(&ctx->aio_mutex);
3553 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3557 if (!is_sync_kiocb(iocb)) {
3558 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3559 return -EIOCBQUEUED;
3562 rc = wait_for_completion_killable(&ctx->done);
3564 mutex_lock(&ctx->aio_mutex);
3565 ctx->rc = rc = -EINTR;
3566 total_written = ctx->total_len;
3567 mutex_unlock(&ctx->aio_mutex);
3570 total_written = ctx->total_len;
3573 kref_put(&ctx->refcount, cifs_aio_ctx_release);
3575 if (unlikely(!total_written))
3578 iocb->ki_pos += total_written;
3579 return total_written;
3582 ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3584 struct file *file = iocb->ki_filp;
3586 cifs_revalidate_mapping(file->f_inode);
3587 return __cifs_writev(iocb, from, true);
3590 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3592 return __cifs_writev(iocb, from, false);
3596 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
3598 struct file *file = iocb->ki_filp;
3599 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
3600 struct inode *inode = file->f_mapping->host;
3601 struct cifsInodeInfo *cinode = CIFS_I(inode);
3602 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
3607 * We need to hold the sem to be sure nobody modifies lock list
3608 * with a brlock that prevents writing.
3610 down_read(&cinode->lock_sem);
3612 rc = generic_write_checks(iocb, from);
3616 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
3617 server->vals->exclusive_lock_type, 0,
3618 NULL, CIFS_WRITE_OP))
3619 rc = __generic_file_write_iter(iocb, from);
3623 up_read(&cinode->lock_sem);
3624 inode_unlock(inode);
3627 rc = generic_write_sync(iocb, rc);
3632 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
3634 struct inode *inode = file_inode(iocb->ki_filp);
3635 struct cifsInodeInfo *cinode = CIFS_I(inode);
3636 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3637 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3638 iocb->ki_filp->private_data;
3639 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3642 written = cifs_get_writer(cinode);
3646 if (CIFS_CACHE_WRITE(cinode)) {
3647 if (cap_unix(tcon->ses) &&
3648 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
3649 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
3650 written = generic_file_write_iter(iocb, from);
3653 written = cifs_writev(iocb, from);
3657 * For non-oplocked files in strict cache mode we need to write the data
3658 * to the server exactly from the pos to pos+len-1 rather than flush all
3659 * affected pages because it may cause a error with mandatory locks on
3660 * these pages but not on the region from pos to ppos+len-1.
3662 written = cifs_user_writev(iocb, from);
3663 if (CIFS_CACHE_READ(cinode)) {
3665 * We have read level caching and we have just sent a write
3666 * request to the server thus making data in the cache stale.
3667 * Zap the cache and set oplock/lease level to NONE to avoid
3668 * reading stale data from the cache. All subsequent read
3669 * operations will read new data from the server.
3671 cifs_zap_mapping(inode);
3672 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
3677 cifs_put_writer(cinode);
3681 static struct cifs_readdata *
3682 cifs_readdata_direct_alloc(struct page **pages, work_func_t complete)
3684 struct cifs_readdata *rdata;
3686 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
3687 if (rdata != NULL) {
3688 rdata->pages = pages;
3689 kref_init(&rdata->refcount);
3690 INIT_LIST_HEAD(&rdata->list);
3691 init_completion(&rdata->done);
3692 INIT_WORK(&rdata->work, complete);
3698 static struct cifs_readdata *
3699 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
3701 struct page **pages =
3702 kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
3703 struct cifs_readdata *ret = NULL;
3706 ret = cifs_readdata_direct_alloc(pages, complete);
3715 cifs_readdata_release(struct kref *refcount)
3717 struct cifs_readdata *rdata = container_of(refcount,
3718 struct cifs_readdata, refcount);
3719 #ifdef CONFIG_CIFS_SMB_DIRECT
3721 smbd_deregister_mr(rdata->mr);
3726 cifsFileInfo_put(rdata->cfile);
3728 kvfree(rdata->pages);
3733 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
3739 for (i = 0; i < nr_pages; i++) {
3740 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
3745 rdata->pages[i] = page;
3749 unsigned int nr_page_failed = i;
3751 for (i = 0; i < nr_page_failed; i++) {
3752 put_page(rdata->pages[i]);
3753 rdata->pages[i] = NULL;
3760 cifs_uncached_readdata_release(struct kref *refcount)
3762 struct cifs_readdata *rdata = container_of(refcount,
3763 struct cifs_readdata, refcount);
3766 kref_put(&rdata->ctx->refcount, cifs_aio_ctx_release);
3767 for (i = 0; i < rdata->nr_pages; i++) {
3768 put_page(rdata->pages[i]);
3770 cifs_readdata_release(refcount);
3774 * cifs_readdata_to_iov - copy data from pages in response to an iovec
3775 * @rdata: the readdata response with list of pages holding data
3776 * @iter: destination for our data
3778 * This function copies data from a list of pages in a readdata response into
3779 * an array of iovecs. It will first calculate where the data should go
3780 * based on the info in the readdata and then copy the data into that spot.
3783 cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
3785 size_t remaining = rdata->got_bytes;
3788 for (i = 0; i < rdata->nr_pages; i++) {
3789 struct page *page = rdata->pages[i];
3790 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
3793 if (unlikely(iov_iter_is_pipe(iter))) {
3794 void *addr = kmap_atomic(page);
3796 written = copy_to_iter(addr, copy, iter);
3797 kunmap_atomic(addr);
3799 written = copy_page_to_iter(page, 0, copy, iter);
3800 remaining -= written;
3801 if (written < copy && iov_iter_count(iter) > 0)
3804 return remaining ? -EFAULT : 0;
3807 static void collect_uncached_read_data(struct cifs_aio_ctx *ctx);
3810 cifs_uncached_readv_complete(struct work_struct *work)
3812 struct cifs_readdata *rdata = container_of(work,
3813 struct cifs_readdata, work);
3815 complete(&rdata->done);
3816 collect_uncached_read_data(rdata->ctx);
3817 /* the below call can possibly free the last ref to aio ctx */
3818 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3822 uncached_fill_pages(struct TCP_Server_Info *server,
3823 struct cifs_readdata *rdata, struct iov_iter *iter,
3828 unsigned int nr_pages = rdata->nr_pages;
3829 unsigned int page_offset = rdata->page_offset;
3831 rdata->got_bytes = 0;
3832 rdata->tailsz = PAGE_SIZE;
3833 for (i = 0; i < nr_pages; i++) {
3834 struct page *page = rdata->pages[i];
3836 unsigned int segment_size = rdata->pagesz;
3839 segment_size -= page_offset;
3845 /* no need to hold page hostage */
3846 rdata->pages[i] = NULL;
3853 if (len >= segment_size)
3854 /* enough data to fill the page */
3857 rdata->tailsz = len;
3861 result = copy_page_from_iter(
3862 page, page_offset, n, iter);
3863 #ifdef CONFIG_CIFS_SMB_DIRECT
3868 result = cifs_read_page_from_socket(
3869 server, page, page_offset, n);
3873 rdata->got_bytes += result;
3876 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3877 rdata->got_bytes : result;
3881 cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
3882 struct cifs_readdata *rdata, unsigned int len)
3884 return uncached_fill_pages(server, rdata, NULL, len);
3888 cifs_uncached_copy_into_pages(struct TCP_Server_Info *server,
3889 struct cifs_readdata *rdata,
3890 struct iov_iter *iter)
3892 return uncached_fill_pages(server, rdata, iter, iter->count);
3895 static int cifs_resend_rdata(struct cifs_readdata *rdata,
3896 struct list_head *rdata_list,
3897 struct cifs_aio_ctx *ctx)
3900 struct cifs_credits credits;
3902 struct TCP_Server_Info *server;
3904 /* XXX: should we pick a new channel here? */
3905 server = rdata->server;
3908 if (rdata->cfile->invalidHandle) {
3909 rc = cifs_reopen_file(rdata->cfile, true);
3917 * Wait for credits to resend this rdata.
3918 * Note: we are attempting to resend the whole rdata not in
3922 rc = server->ops->wait_mtu_credits(server, rdata->bytes,
3928 if (rsize < rdata->bytes) {
3929 add_credits_and_wake_if(server, &credits, 0);
3932 } while (rsize < rdata->bytes);
3933 rdata->credits = credits;
3935 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
3937 if (rdata->cfile->invalidHandle)
3940 #ifdef CONFIG_CIFS_SMB_DIRECT
3942 rdata->mr->need_invalidate = true;
3943 smbd_deregister_mr(rdata->mr);
3947 rc = server->ops->async_readv(rdata);
3951 /* If the read was successfully sent, we are done */
3953 /* Add to aio pending list */
3954 list_add_tail(&rdata->list, rdata_list);
3958 /* Roll back credits and retry if needed */
3959 add_credits_and_wake_if(server, &rdata->credits, 0);
3960 } while (rc == -EAGAIN);
3963 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3968 cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3969 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list,
3970 struct cifs_aio_ctx *ctx)
3972 struct cifs_readdata *rdata;
3973 unsigned int npages, rsize;
3974 struct cifs_credits credits_on_stack;
3975 struct cifs_credits *credits = &credits_on_stack;
3979 struct TCP_Server_Info *server;
3980 struct page **pagevec;
3982 struct iov_iter direct_iov = ctx->iter;
3984 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
3986 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3987 pid = open_file->pid;
3989 pid = current->tgid;
3992 iov_iter_advance(&direct_iov, offset - ctx->pos);
3995 if (open_file->invalidHandle) {
3996 rc = cifs_reopen_file(open_file, true);
4003 if (cifs_sb->ctx->rsize == 0)
4004 cifs_sb->ctx->rsize =
4005 server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
4008 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
4013 cur_len = min_t(const size_t, len, rsize);
4015 if (ctx->direct_io) {
4018 result = iov_iter_get_pages_alloc2(
4019 &direct_iov, &pagevec,
4023 "Couldn't get user pages (rc=%zd) iter type %d iov_offset %zd count %zd\n",
4024 result, iov_iter_type(&direct_iov),
4025 direct_iov.iov_offset,
4030 add_credits_and_wake_if(server, credits, 0);
4033 cur_len = (size_t)result;
4035 rdata = cifs_readdata_direct_alloc(
4036 pagevec, cifs_uncached_readv_complete);
4038 add_credits_and_wake_if(server, credits, 0);
4043 npages = (cur_len + start + PAGE_SIZE-1) / PAGE_SIZE;
4044 rdata->page_offset = start;
4045 rdata->tailsz = npages > 1 ?
4046 cur_len-(PAGE_SIZE-start)-(npages-2)*PAGE_SIZE :
4051 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
4052 /* allocate a readdata struct */
4053 rdata = cifs_readdata_alloc(npages,
4054 cifs_uncached_readv_complete);
4056 add_credits_and_wake_if(server, credits, 0);
4061 rc = cifs_read_allocate_pages(rdata, npages);
4063 kvfree(rdata->pages);
4065 add_credits_and_wake_if(server, credits, 0);
4069 rdata->tailsz = PAGE_SIZE;
4072 rdata->server = server;
4073 rdata->cfile = cifsFileInfo_get(open_file);
4074 rdata->nr_pages = npages;
4075 rdata->offset = offset;
4076 rdata->bytes = cur_len;
4078 rdata->pagesz = PAGE_SIZE;
4079 rdata->read_into_pages = cifs_uncached_read_into_pages;
4080 rdata->copy_into_pages = cifs_uncached_copy_into_pages;
4081 rdata->credits = credits_on_stack;
4083 kref_get(&ctx->refcount);
4085 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4088 if (rdata->cfile->invalidHandle)
4091 rc = server->ops->async_readv(rdata);
4095 add_credits_and_wake_if(server, &rdata->credits, 0);
4096 kref_put(&rdata->refcount,
4097 cifs_uncached_readdata_release);
4098 if (rc == -EAGAIN) {
4099 iov_iter_revert(&direct_iov, cur_len);
4105 list_add_tail(&rdata->list, rdata_list);
4114 collect_uncached_read_data(struct cifs_aio_ctx *ctx)
4116 struct cifs_readdata *rdata, *tmp;
4117 struct iov_iter *to = &ctx->iter;
4118 struct cifs_sb_info *cifs_sb;
4121 cifs_sb = CIFS_SB(ctx->cfile->dentry->d_sb);
4123 mutex_lock(&ctx->aio_mutex);
4125 if (list_empty(&ctx->list)) {
4126 mutex_unlock(&ctx->aio_mutex);
4131 /* the loop below should proceed in the order of increasing offsets */
4133 list_for_each_entry_safe(rdata, tmp, &ctx->list, list) {
4135 if (!try_wait_for_completion(&rdata->done)) {
4136 mutex_unlock(&ctx->aio_mutex);
4140 if (rdata->result == -EAGAIN) {
4141 /* resend call if it's a retryable error */
4142 struct list_head tmp_list;
4143 unsigned int got_bytes = rdata->got_bytes;
4145 list_del_init(&rdata->list);
4146 INIT_LIST_HEAD(&tmp_list);
4149 * Got a part of data and then reconnect has
4150 * happened -- fill the buffer and continue
4153 if (got_bytes && got_bytes < rdata->bytes) {
4155 if (!ctx->direct_io)
4156 rc = cifs_readdata_to_iov(rdata, to);
4158 kref_put(&rdata->refcount,
4159 cifs_uncached_readdata_release);
4164 if (ctx->direct_io) {
4166 * Re-use rdata as this is a
4169 rc = cifs_resend_rdata(
4173 rc = cifs_send_async_read(
4174 rdata->offset + got_bytes,
4175 rdata->bytes - got_bytes,
4176 rdata->cfile, cifs_sb,
4179 kref_put(&rdata->refcount,
4180 cifs_uncached_readdata_release);
4183 list_splice(&tmp_list, &ctx->list);
4186 } else if (rdata->result)
4188 else if (!ctx->direct_io)
4189 rc = cifs_readdata_to_iov(rdata, to);
4191 /* if there was a short read -- discard anything left */
4192 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
4195 ctx->total_len += rdata->got_bytes;
4197 list_del_init(&rdata->list);
4198 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
4201 if (!ctx->direct_io)
4202 ctx->total_len = ctx->len - iov_iter_count(to);
4204 /* mask nodata case */
4208 ctx->rc = (rc == 0) ? (ssize_t)ctx->total_len : rc;
4210 mutex_unlock(&ctx->aio_mutex);
4212 if (ctx->iocb && ctx->iocb->ki_complete)
4213 ctx->iocb->ki_complete(ctx->iocb, ctx->rc);
4215 complete(&ctx->done);
4218 static ssize_t __cifs_readv(
4219 struct kiocb *iocb, struct iov_iter *to, bool direct)
4222 struct file *file = iocb->ki_filp;
4223 struct cifs_sb_info *cifs_sb;
4224 struct cifsFileInfo *cfile;
4225 struct cifs_tcon *tcon;
4226 ssize_t rc, total_read = 0;
4227 loff_t offset = iocb->ki_pos;
4228 struct cifs_aio_ctx *ctx;
4231 * iov_iter_get_pages_alloc() doesn't work with ITER_KVEC,
4232 * fall back to data copy read path
4233 * this could be improved by getting pages directly in ITER_KVEC
4235 if (direct && iov_iter_is_kvec(to)) {
4236 cifs_dbg(FYI, "use non-direct cifs_user_readv for kvec I/O\n");
4240 len = iov_iter_count(to);
4244 cifs_sb = CIFS_FILE_SB(file);
4245 cfile = file->private_data;
4246 tcon = tlink_tcon(cfile->tlink);
4248 if (!tcon->ses->server->ops->async_readv)
4251 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
4252 cifs_dbg(FYI, "attempting read on write only file instance\n");
4254 ctx = cifs_aio_ctx_alloc();
4258 ctx->cfile = cifsFileInfo_get(cfile);
4260 if (!is_sync_kiocb(iocb))
4263 if (user_backed_iter(to))
4264 ctx->should_dirty = true;
4268 ctx->direct_io = true;
4272 rc = setup_aio_ctx_iter(ctx, to, READ);
4274 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4281 rc = filemap_write_and_wait_range(file->f_inode->i_mapping,
4282 offset, offset + len - 1);
4284 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4289 /* grab a lock here due to read response handlers can access ctx */
4290 mutex_lock(&ctx->aio_mutex);
4292 rc = cifs_send_async_read(offset, len, cfile, cifs_sb, &ctx->list, ctx);
4294 /* if at least one read request send succeeded, then reset rc */
4295 if (!list_empty(&ctx->list))
4298 mutex_unlock(&ctx->aio_mutex);
4301 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4305 if (!is_sync_kiocb(iocb)) {
4306 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4307 return -EIOCBQUEUED;
4310 rc = wait_for_completion_killable(&ctx->done);
4312 mutex_lock(&ctx->aio_mutex);
4313 ctx->rc = rc = -EINTR;
4314 total_read = ctx->total_len;
4315 mutex_unlock(&ctx->aio_mutex);
4318 total_read = ctx->total_len;
4321 kref_put(&ctx->refcount, cifs_aio_ctx_release);
4324 iocb->ki_pos += total_read;
4330 ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to)
4332 return __cifs_readv(iocb, to, true);
4335 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
4337 return __cifs_readv(iocb, to, false);
4341 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
4343 struct inode *inode = file_inode(iocb->ki_filp);
4344 struct cifsInodeInfo *cinode = CIFS_I(inode);
4345 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
4346 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
4347 iocb->ki_filp->private_data;
4348 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
4352 * In strict cache mode we need to read from the server all the time
4353 * if we don't have level II oplock because the server can delay mtime
4354 * change - so we can't make a decision about inode invalidating.
4355 * And we can also fail with pagereading if there are mandatory locks
4356 * on pages affected by this read but not on the region from pos to
4359 if (!CIFS_CACHE_READ(cinode))
4360 return cifs_user_readv(iocb, to);
4362 if (cap_unix(tcon->ses) &&
4363 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
4364 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
4365 return generic_file_read_iter(iocb, to);
4368 * We need to hold the sem to be sure nobody modifies lock list
4369 * with a brlock that prevents reading.
4371 down_read(&cinode->lock_sem);
4372 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
4373 tcon->ses->server->vals->shared_lock_type,
4374 0, NULL, CIFS_READ_OP))
4375 rc = generic_file_read_iter(iocb, to);
4376 up_read(&cinode->lock_sem);
4381 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
4384 unsigned int bytes_read = 0;
4385 unsigned int total_read;
4386 unsigned int current_read_size;
4388 struct cifs_sb_info *cifs_sb;
4389 struct cifs_tcon *tcon;
4390 struct TCP_Server_Info *server;
4393 struct cifsFileInfo *open_file;
4394 struct cifs_io_parms io_parms = {0};
4395 int buf_type = CIFS_NO_BUFFER;
4399 cifs_sb = CIFS_FILE_SB(file);
4401 /* FIXME: set up handlers for larger reads and/or convert to async */
4402 rsize = min_t(unsigned int, cifs_sb->ctx->rsize, CIFSMaxBufSize);
4404 if (file->private_data == NULL) {
4409 open_file = file->private_data;
4410 tcon = tlink_tcon(open_file->tlink);
4411 server = cifs_pick_channel(tcon->ses);
4413 if (!server->ops->sync_read) {
4418 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4419 pid = open_file->pid;
4421 pid = current->tgid;
4423 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
4424 cifs_dbg(FYI, "attempting read on write only file instance\n");
4426 for (total_read = 0, cur_offset = read_data; read_size > total_read;
4427 total_read += bytes_read, cur_offset += bytes_read) {
4429 current_read_size = min_t(uint, read_size - total_read,
4432 * For windows me and 9x we do not want to request more
4433 * than it negotiated since it will refuse the read
4436 if (!(tcon->ses->capabilities &
4437 tcon->ses->server->vals->cap_large_files)) {
4438 current_read_size = min_t(uint,
4439 current_read_size, CIFSMaxBufSize);
4441 if (open_file->invalidHandle) {
4442 rc = cifs_reopen_file(open_file, true);
4447 io_parms.tcon = tcon;
4448 io_parms.offset = *offset;
4449 io_parms.length = current_read_size;
4450 io_parms.server = server;
4451 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
4452 &bytes_read, &cur_offset,
4454 } while (rc == -EAGAIN);
4456 if (rc || (bytes_read == 0)) {
4464 cifs_stats_bytes_read(tcon, total_read);
4465 *offset += bytes_read;
4473 * If the page is mmap'ed into a process' page tables, then we need to make
4474 * sure that it doesn't change while being written back.
4477 cifs_page_mkwrite(struct vm_fault *vmf)
4479 struct page *page = vmf->page;
4481 /* Wait for the page to be written to the cache before we allow it to
4482 * be modified. We then assume the entire page will need writing back.
4484 #ifdef CONFIG_CIFS_FSCACHE
4485 if (PageFsCache(page) &&
4486 wait_on_page_fscache_killable(page) < 0)
4487 return VM_FAULT_RETRY;
4490 wait_on_page_writeback(page);
4492 if (lock_page_killable(page) < 0)
4493 return VM_FAULT_RETRY;
4494 return VM_FAULT_LOCKED;
4497 static const struct vm_operations_struct cifs_file_vm_ops = {
4498 .fault = filemap_fault,
4499 .map_pages = filemap_map_pages,
4500 .page_mkwrite = cifs_page_mkwrite,
4503 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
4506 struct inode *inode = file_inode(file);
4510 if (!CIFS_CACHE_READ(CIFS_I(inode)))
4511 rc = cifs_zap_mapping(inode);
4513 rc = generic_file_mmap(file, vma);
4515 vma->vm_ops = &cifs_file_vm_ops;
4521 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
4527 rc = cifs_revalidate_file(file);
4529 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
4532 rc = generic_file_mmap(file, vma);
4534 vma->vm_ops = &cifs_file_vm_ops;
4541 cifs_readv_complete(struct work_struct *work)
4543 unsigned int i, got_bytes;
4544 struct cifs_readdata *rdata = container_of(work,
4545 struct cifs_readdata, work);
4547 got_bytes = rdata->got_bytes;
4548 for (i = 0; i < rdata->nr_pages; i++) {
4549 struct page *page = rdata->pages[i];
4551 if (rdata->result == 0 ||
4552 (rdata->result == -EAGAIN && got_bytes)) {
4553 flush_dcache_page(page);
4554 SetPageUptodate(page);
4558 if (rdata->result == 0 ||
4559 (rdata->result == -EAGAIN && got_bytes))
4560 cifs_readpage_to_fscache(rdata->mapping->host, page);
4564 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
4567 rdata->pages[i] = NULL;
4569 kref_put(&rdata->refcount, cifs_readdata_release);
4573 readpages_fill_pages(struct TCP_Server_Info *server,
4574 struct cifs_readdata *rdata, struct iov_iter *iter,
4581 unsigned int nr_pages = rdata->nr_pages;
4582 unsigned int page_offset = rdata->page_offset;
4584 /* determine the eof that the server (probably) has */
4585 eof = CIFS_I(rdata->mapping->host)->server_eof;
4586 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
4587 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
4589 rdata->got_bytes = 0;
4590 rdata->tailsz = PAGE_SIZE;
4591 for (i = 0; i < nr_pages; i++) {
4592 struct page *page = rdata->pages[i];
4593 unsigned int to_read = rdata->pagesz;
4597 to_read -= page_offset;
4603 if (len >= to_read) {
4605 } else if (len > 0) {
4606 /* enough for partial page, fill and zero the rest */
4607 zero_user(page, len + page_offset, to_read - len);
4608 n = rdata->tailsz = len;
4610 } else if (page->index > eof_index) {
4612 * The VFS will not try to do readahead past the
4613 * i_size, but it's possible that we have outstanding
4614 * writes with gaps in the middle and the i_size hasn't
4615 * caught up yet. Populate those with zeroed out pages
4616 * to prevent the VFS from repeatedly attempting to
4617 * fill them until the writes are flushed.
4619 zero_user(page, 0, PAGE_SIZE);
4620 flush_dcache_page(page);
4621 SetPageUptodate(page);
4624 rdata->pages[i] = NULL;
4628 /* no need to hold page hostage */
4631 rdata->pages[i] = NULL;
4637 result = copy_page_from_iter(
4638 page, page_offset, n, iter);
4639 #ifdef CONFIG_CIFS_SMB_DIRECT
4644 result = cifs_read_page_from_socket(
4645 server, page, page_offset, n);
4649 rdata->got_bytes += result;
4652 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
4653 rdata->got_bytes : result;
4657 cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
4658 struct cifs_readdata *rdata, unsigned int len)
4660 return readpages_fill_pages(server, rdata, NULL, len);
4664 cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
4665 struct cifs_readdata *rdata,
4666 struct iov_iter *iter)
4668 return readpages_fill_pages(server, rdata, iter, iter->count);
4671 static void cifs_readahead(struct readahead_control *ractl)
4674 struct cifsFileInfo *open_file = ractl->file->private_data;
4675 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
4676 struct TCP_Server_Info *server;
4678 unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
4679 pgoff_t next_cached = ULONG_MAX;
4680 bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
4681 cifs_inode_cookie(ractl->mapping->host)->cache_priv;
4682 bool check_cache = caching;
4686 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
4687 pid = open_file->pid;
4689 pid = current->tgid;
4692 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
4694 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
4695 __func__, ractl->file, ractl->mapping, readahead_count(ractl));
4698 * Chop the readahead request up into rsize-sized read requests.
4700 while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
4701 unsigned int i, got, rsize;
4703 struct cifs_readdata *rdata;
4704 struct cifs_credits credits_on_stack;
4705 struct cifs_credits *credits = &credits_on_stack;
4706 pgoff_t index = readahead_index(ractl) + last_batch_size;
4709 * Find out if we have anything cached in the range of
4710 * interest, and if so, where the next chunk of cached data is.
4714 rc = cifs_fscache_query_occupancy(
4715 ractl->mapping->host, index, nr_pages,
4716 &next_cached, &cache_nr_pages);
4719 check_cache = false;
4722 if (index == next_cached) {
4724 * TODO: Send a whole batch of pages to be read
4727 struct folio *folio = readahead_folio(ractl);
4729 last_batch_size = folio_nr_pages(folio);
4730 if (cifs_readpage_from_fscache(ractl->mapping->host,
4731 &folio->page) < 0) {
4733 * TODO: Deal with cache read failure
4734 * here, but for the moment, delegate
4739 folio_unlock(folio);
4742 if (cache_nr_pages == 0)
4748 if (open_file->invalidHandle) {
4749 rc = cifs_reopen_file(open_file, true);
4757 if (cifs_sb->ctx->rsize == 0)
4758 cifs_sb->ctx->rsize =
4759 server->ops->negotiate_rsize(tlink_tcon(open_file->tlink),
4762 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
4766 nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
4767 nr_pages = min_t(size_t, nr_pages, next_cached - index);
4770 * Give up immediately if rsize is too small to read an entire
4771 * page. The VFS will fall back to readpage. We should never
4772 * reach this point however since we set ra_pages to 0 when the
4773 * rsize is smaller than a cache page.
4775 if (unlikely(!nr_pages)) {
4776 add_credits_and_wake_if(server, credits, 0);
4780 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
4782 /* best to give up if we're out of mem */
4783 add_credits_and_wake_if(server, credits, 0);
4787 got = __readahead_batch(ractl, rdata->pages, nr_pages);
4788 if (got != nr_pages) {
4789 pr_warn("__readahead_batch() returned %u/%u\n",
4794 rdata->nr_pages = nr_pages;
4795 rdata->bytes = readahead_batch_length(ractl);
4796 rdata->cfile = cifsFileInfo_get(open_file);
4797 rdata->server = server;
4798 rdata->mapping = ractl->mapping;
4799 rdata->offset = readahead_pos(ractl);
4801 rdata->pagesz = PAGE_SIZE;
4802 rdata->tailsz = PAGE_SIZE;
4803 rdata->read_into_pages = cifs_readpages_read_into_pages;
4804 rdata->copy_into_pages = cifs_readpages_copy_into_pages;
4805 rdata->credits = credits_on_stack;
4807 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4809 if (rdata->cfile->invalidHandle)
4812 rc = server->ops->async_readv(rdata);
4816 add_credits_and_wake_if(server, &rdata->credits, 0);
4817 for (i = 0; i < rdata->nr_pages; i++) {
4818 page = rdata->pages[i];
4822 /* Fallback to the readpage in error/reconnect cases */
4823 kref_put(&rdata->refcount, cifs_readdata_release);
4827 kref_put(&rdata->refcount, cifs_readdata_release);
4828 last_batch_size = nr_pages;
4835 * cifs_readpage_worker must be called with the page pinned
4837 static int cifs_readpage_worker(struct file *file, struct page *page,
4843 /* Is the page cached? */
4844 rc = cifs_readpage_from_fscache(file_inode(file), page);
4848 read_data = kmap(page);
4849 /* for reads over a certain size could initiate async read ahead */
4851 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
4856 cifs_dbg(FYI, "Bytes read %d\n", rc);
4858 /* we do not want atime to be less than mtime, it broke some apps */
4859 file_inode(file)->i_atime = current_time(file_inode(file));
4860 if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
4861 file_inode(file)->i_atime = file_inode(file)->i_mtime;
4863 file_inode(file)->i_atime = current_time(file_inode(file));
4866 memset(read_data + rc, 0, PAGE_SIZE - rc);
4868 flush_dcache_page(page);
4869 SetPageUptodate(page);
4871 /* send this page to the cache */
4872 cifs_readpage_to_fscache(file_inode(file), page);
4884 static int cifs_read_folio(struct file *file, struct folio *folio)
4886 struct page *page = &folio->page;
4887 loff_t offset = page_file_offset(page);
4893 if (file->private_data == NULL) {
4899 cifs_dbg(FYI, "read_folio %p at offset %d 0x%x\n",
4900 page, (int)offset, (int)offset);
4902 rc = cifs_readpage_worker(file, page, &offset);
4908 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
4910 struct cifsFileInfo *open_file;
4912 spin_lock(&cifs_inode->open_file_lock);
4913 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
4914 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
4915 spin_unlock(&cifs_inode->open_file_lock);
4919 spin_unlock(&cifs_inode->open_file_lock);
4923 /* We do not want to update the file size from server for inodes
4924 open for write - to avoid races with writepage extending
4925 the file - in the future we could consider allowing
4926 refreshing the inode only on increases in the file size
4927 but this is tricky to do without racing with writebehind
4928 page caching in the current Linux kernel design */
4929 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
4934 if (is_inode_writable(cifsInode)) {
4935 /* This inode is open for write at least once */
4936 struct cifs_sb_info *cifs_sb;
4938 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
4939 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
4940 /* since no page cache to corrupt on directio
4941 we can change size safely */
4945 if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
4953 static int cifs_write_begin(struct file *file, struct address_space *mapping,
4954 loff_t pos, unsigned len,
4955 struct page **pagep, void **fsdata)
4958 pgoff_t index = pos >> PAGE_SHIFT;
4959 loff_t offset = pos & (PAGE_SIZE - 1);
4960 loff_t page_start = pos & PAGE_MASK;
4965 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
4968 page = grab_cache_page_write_begin(mapping, index);
4974 if (PageUptodate(page))
4978 * If we write a full page it will be up to date, no need to read from
4979 * the server. If the write is short, we'll end up doing a sync write
4982 if (len == PAGE_SIZE)
4986 * optimize away the read when we have an oplock, and we're not
4987 * expecting to use any of the data we'd be reading in. That
4988 * is, when the page lies beyond the EOF, or straddles the EOF
4989 * and the write will cover all of the existing data.
4991 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
4992 i_size = i_size_read(mapping->host);
4993 if (page_start >= i_size ||
4994 (offset == 0 && (pos + len) >= i_size)) {
4995 zero_user_segments(page, 0, offset,
4999 * PageChecked means that the parts of the page
5000 * to which we're not writing are considered up
5001 * to date. Once the data is copied to the
5002 * page, it can be set uptodate.
5004 SetPageChecked(page);
5009 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
5011 * might as well read a page, it is fast enough. If we get
5012 * an error, we don't need to return it. cifs_write_end will
5013 * do a sync write instead since PG_uptodate isn't set.
5015 cifs_readpage_worker(file, page, &page_start);
5020 /* we could try using another file handle if there is one -
5021 but how would we lock it to prevent close of that handle
5022 racing with this read? In any case
5023 this will be written out by write_end so is fine */
5030 static bool cifs_release_folio(struct folio *folio, gfp_t gfp)
5032 if (folio_test_private(folio))
5034 if (folio_test_fscache(folio)) {
5035 if (current_is_kswapd() || !(gfp & __GFP_FS))
5037 folio_wait_fscache(folio);
5039 fscache_note_page_release(cifs_inode_cookie(folio->mapping->host));
5043 static void cifs_invalidate_folio(struct folio *folio, size_t offset,
5046 folio_wait_fscache(folio);
5049 static int cifs_launder_folio(struct folio *folio)
5052 loff_t range_start = folio_pos(folio);
5053 loff_t range_end = range_start + folio_size(folio);
5054 struct writeback_control wbc = {
5055 .sync_mode = WB_SYNC_ALL,
5057 .range_start = range_start,
5058 .range_end = range_end,
5061 cifs_dbg(FYI, "Launder page: %lu\n", folio->index);
5063 if (folio_clear_dirty_for_io(folio))
5064 rc = cifs_writepage_locked(&folio->page, &wbc);
5066 folio_wait_fscache(folio);
5070 void cifs_oplock_break(struct work_struct *work)
5072 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
5074 struct inode *inode = d_inode(cfile->dentry);
5075 struct cifsInodeInfo *cinode = CIFS_I(inode);
5076 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
5077 struct TCP_Server_Info *server = tcon->ses->server;
5079 bool purge_cache = false;
5081 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
5082 TASK_UNINTERRUPTIBLE);
5084 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
5085 cfile->oplock_epoch, &purge_cache);
5087 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
5088 cifs_has_mand_locks(cinode)) {
5089 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
5094 if (inode && S_ISREG(inode->i_mode)) {
5095 if (CIFS_CACHE_READ(cinode))
5096 break_lease(inode, O_RDONLY);
5098 break_lease(inode, O_WRONLY);
5099 rc = filemap_fdatawrite(inode->i_mapping);
5100 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
5101 rc = filemap_fdatawait(inode->i_mapping);
5102 mapping_set_error(inode->i_mapping, rc);
5103 cifs_zap_mapping(inode);
5105 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
5106 if (CIFS_CACHE_WRITE(cinode))
5107 goto oplock_break_ack;
5110 rc = cifs_push_locks(cfile);
5112 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
5116 * releasing stale oplock after recent reconnect of smb session using
5117 * a now incorrect file handle is not a data integrity issue but do
5118 * not bother sending an oplock release if session to server still is
5119 * disconnected since oplock already released by the server
5121 if (!cfile->oplock_break_cancelled) {
5122 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
5124 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
5127 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
5128 cifs_done_oplock_break(cinode);
5132 * The presence of cifs_direct_io() in the address space ops vector
5133 * allowes open() O_DIRECT flags which would have failed otherwise.
5135 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
5136 * so this method should never be called.
5138 * Direct IO is not yet supported in the cached mode.
5141 cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
5145 * Eventually need to support direct IO for non forcedirectio mounts
5150 static int cifs_swap_activate(struct swap_info_struct *sis,
5151 struct file *swap_file, sector_t *span)
5153 struct cifsFileInfo *cfile = swap_file->private_data;
5154 struct inode *inode = swap_file->f_mapping->host;
5155 unsigned long blocks;
5158 cifs_dbg(FYI, "swap activate\n");
5160 if (!swap_file->f_mapping->a_ops->swap_rw)
5161 /* Cannot support swap */
5164 spin_lock(&inode->i_lock);
5165 blocks = inode->i_blocks;
5166 isize = inode->i_size;
5167 spin_unlock(&inode->i_lock);
5168 if (blocks*512 < isize) {
5169 pr_warn("swap activate: swapfile has holes\n");
5174 pr_warn_once("Swap support over SMB3 is experimental\n");
5177 * TODO: consider adding ACL (or documenting how) to prevent other
5178 * users (on this or other systems) from reading it
5182 /* TODO: add sk_set_memalloc(inet) or similar */
5185 cfile->swapfile = true;
5187 * TODO: Since file already open, we can't open with DENY_ALL here
5188 * but we could add call to grab a byte range lock to prevent others
5189 * from reading or writing the file
5192 sis->flags |= SWP_FS_OPS;
5193 return add_swap_extent(sis, 0, sis->max, 0);
5196 static void cifs_swap_deactivate(struct file *file)
5198 struct cifsFileInfo *cfile = file->private_data;
5200 cifs_dbg(FYI, "swap deactivate\n");
5202 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
5205 cfile->swapfile = false;
5207 /* do we need to unpin (or unlock) the file */
5211 * Mark a page as having been made dirty and thus needing writeback. We also
5212 * need to pin the cache object to write back to.
5214 #ifdef CONFIG_CIFS_FSCACHE
5215 static bool cifs_dirty_folio(struct address_space *mapping, struct folio *folio)
5217 return fscache_dirty_folio(mapping, folio,
5218 cifs_inode_cookie(mapping->host));
5221 #define cifs_dirty_folio filemap_dirty_folio
5224 const struct address_space_operations cifs_addr_ops = {
5225 .read_folio = cifs_read_folio,
5226 .readahead = cifs_readahead,
5227 .writepage = cifs_writepage,
5228 .writepages = cifs_writepages,
5229 .write_begin = cifs_write_begin,
5230 .write_end = cifs_write_end,
5231 .dirty_folio = cifs_dirty_folio,
5232 .release_folio = cifs_release_folio,
5233 .direct_IO = cifs_direct_io,
5234 .invalidate_folio = cifs_invalidate_folio,
5235 .launder_folio = cifs_launder_folio,
5237 * TODO: investigate and if useful we could add an cifs_migratePage
5238 * helper (under an CONFIG_MIGRATION) in the future, and also
5239 * investigate and add an is_dirty_writeback helper if needed
5241 .swap_activate = cifs_swap_activate,
5242 .swap_deactivate = cifs_swap_deactivate,
5246 * cifs_readahead requires the server to support a buffer large enough to
5247 * contain the header plus one complete page of data. Otherwise, we need
5248 * to leave cifs_readahead out of the address space operations.
5250 const struct address_space_operations cifs_addr_ops_smallbuf = {
5251 .read_folio = cifs_read_folio,
5252 .writepage = cifs_writepage,
5253 .writepages = cifs_writepages,
5254 .write_begin = cifs_write_begin,
5255 .write_end = cifs_write_end,
5256 .dirty_folio = cifs_dirty_folio,
5257 .release_folio = cifs_release_folio,
5258 .invalidate_folio = cifs_invalidate_folio,
5259 .launder_folio = cifs_launder_folio,