1 // SPDX-License-Identifier: LGPL-2.1
4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
12 #include <linux/filelock.h>
13 #include <linux/backing-dev.h>
14 #include <linux/stat.h>
15 #include <linux/fcntl.h>
16 #include <linux/pagemap.h>
17 #include <linux/pagevec.h>
18 #include <linux/writeback.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/delay.h>
21 #include <linux/mount.h>
22 #include <linux/slab.h>
23 #include <linux/swap.h>
25 #include <asm/div64.h>
29 #include "cifsproto.h"
30 #include "smb2proto.h"
31 #include "cifs_unicode.h"
32 #include "cifs_debug.h"
33 #include "cifs_fs_sb.h"
35 #include "smbdirect.h"
36 #include "fs_context.h"
37 #include "cifs_ioctl.h"
38 #include "cached_dir.h"
39 #include <trace/events/netfs.h>
41 static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
44 * Prepare a subrequest to upload to the server. We need to allocate credits
45 * so that we know the maximum amount of data that we can include in it.
47 static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
49 struct cifs_io_subrequest *wdata =
50 container_of(subreq, struct cifs_io_subrequest, subreq);
51 struct cifs_io_request *req = wdata->req;
52 struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
53 struct TCP_Server_Info *server;
54 struct cifsFileInfo *open_file = req->cfile;
55 size_t wsize = req->rreq.wsize;
58 if (!wdata->have_xid) {
59 wdata->xid = get_xid();
60 wdata->have_xid = true;
63 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
64 wdata->server = server;
67 if (open_file->invalidHandle) {
68 rc = cifs_reopen_file(open_file, false);
73 return netfs_prepare_write_failed(subreq);
77 rc = server->ops->wait_mtu_credits(server, wsize, &stream->sreq_max_len,
81 return netfs_prepare_write_failed(subreq);
84 wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
85 wdata->credits.rreq_debug_index = subreq->debug_index;
86 wdata->credits.in_flight_check = 1;
87 trace_smb3_rw_credits(wdata->rreq->debug_id,
88 wdata->subreq.debug_index,
90 server->credits, server->in_flight,
92 cifs_trace_rw_credits_write_prepare);
94 #ifdef CONFIG_CIFS_SMB_DIRECT
95 if (server->smbd_conn)
96 stream->sreq_max_segs = server->smbd_conn->max_frmr_depth;
101 * Issue a subrequest to upload to the server.
103 static void cifs_issue_write(struct netfs_io_subrequest *subreq)
105 struct cifs_io_subrequest *wdata =
106 container_of(subreq, struct cifs_io_subrequest, subreq);
107 struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
110 if (cifs_forced_shutdown(sbi)) {
115 rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
120 if (wdata->req->cfile->invalidHandle)
123 wdata->server->ops->async_writev(wdata);
129 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
131 trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
132 add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
133 cifs_write_subrequest_terminated(wdata, rc, false);
137 static void cifs_netfs_invalidate_cache(struct netfs_io_request *wreq)
139 cifs_invalidate_cache(wreq->inode, 0);
143 * Negotiate the size of a read operation on behalf of the netfs library.
145 static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
147 struct netfs_io_request *rreq = subreq->rreq;
148 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
149 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
150 struct TCP_Server_Info *server;
151 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
155 if (!rdata->have_xid) {
156 rdata->xid = get_xid();
157 rdata->have_xid = true;
160 server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
161 rdata->server = server;
163 if (cifs_sb->ctx->rsize == 0)
164 cifs_sb->ctx->rsize =
165 server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
168 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
169 &size, &rdata->credits);
173 rreq->io_streams[0].sreq_max_len = size;
175 rdata->credits.in_flight_check = 1;
176 rdata->credits.rreq_debug_id = rreq->debug_id;
177 rdata->credits.rreq_debug_index = subreq->debug_index;
179 trace_smb3_rw_credits(rdata->rreq->debug_id,
180 rdata->subreq.debug_index,
181 rdata->credits.value,
182 server->credits, server->in_flight, 0,
183 cifs_trace_rw_credits_read_submit);
185 #ifdef CONFIG_CIFS_SMB_DIRECT
186 if (server->smbd_conn)
187 rreq->io_streams[0].sreq_max_segs = server->smbd_conn->max_frmr_depth;
193 * Issue a read operation on behalf of the netfs helper functions. We're asked
194 * to make a read of a certain size at a point in the file. We are permitted
195 * to only read a portion of that, but as long as we read something, the netfs
196 * helper will call us again so that we can issue another read.
198 static void cifs_issue_read(struct netfs_io_subrequest *subreq)
200 struct netfs_io_request *rreq = subreq->rreq;
201 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
202 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
203 struct TCP_Server_Info *server = rdata->server;
206 cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
207 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
208 subreq->transferred, subreq->len);
210 rc = adjust_credits(server, rdata, cifs_trace_rw_credits_issue_read_adjust);
214 if (req->cfile->invalidHandle) {
216 rc = cifs_reopen_file(req->cfile, true);
217 } while (rc == -EAGAIN);
222 if (subreq->rreq->origin != NETFS_DIO_READ)
223 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
225 trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
226 rc = rdata->server->ops->async_readv(rdata);
233 netfs_read_subreq_terminated(subreq);
237 * Writeback calls this when it finds a folio that needs uploading. This isn't
238 * called if writeback only has copy-to-cache to deal with.
240 static void cifs_begin_writeback(struct netfs_io_request *wreq)
242 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
245 ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
247 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
251 wreq->io_streams[0].avail = true;
255 * Initialise a request.
257 static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
259 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
260 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
261 struct cifsFileInfo *open_file = NULL;
263 rreq->rsize = cifs_sb->ctx->rsize;
264 rreq->wsize = cifs_sb->ctx->wsize;
265 req->pid = current->tgid; // Ummm... This may be a workqueue
268 open_file = file->private_data;
269 rreq->netfs_priv = file->private_data;
270 req->cfile = cifsFileInfo_get(open_file);
271 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
272 req->pid = req->cfile->pid;
273 } else if (rreq->origin != NETFS_WRITEBACK) {
282 * Completion of a request operation.
284 static void cifs_rreq_done(struct netfs_io_request *rreq)
286 struct timespec64 atime, mtime;
287 struct inode *inode = rreq->inode;
289 /* we do not want atime to be less than mtime, it broke some apps */
290 atime = inode_set_atime_to_ts(inode, current_time(inode));
291 mtime = inode_get_mtime(inode);
292 if (timespec64_compare(&atime, &mtime))
293 inode_set_atime_to_ts(inode, inode_get_mtime(inode));
296 static void cifs_free_request(struct netfs_io_request *rreq)
298 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
301 cifsFileInfo_put(req->cfile);
304 static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
306 struct cifs_io_subrequest *rdata =
307 container_of(subreq, struct cifs_io_subrequest, subreq);
308 int rc = subreq->error;
310 if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
311 #ifdef CONFIG_CIFS_SMB_DIRECT
313 smbd_deregister_mr(rdata->mr);
319 if (rdata->credits.value != 0) {
320 trace_smb3_rw_credits(rdata->rreq->debug_id,
321 rdata->subreq.debug_index,
322 rdata->credits.value,
323 rdata->server ? rdata->server->credits : 0,
324 rdata->server ? rdata->server->in_flight : 0,
325 -rdata->credits.value,
326 cifs_trace_rw_credits_free_subreq);
328 add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
330 rdata->credits.value = 0;
334 free_xid(rdata->xid);
337 const struct netfs_request_ops cifs_req_ops = {
338 .request_pool = &cifs_io_request_pool,
339 .subrequest_pool = &cifs_io_subrequest_pool,
340 .init_request = cifs_init_request,
341 .free_request = cifs_free_request,
342 .free_subrequest = cifs_free_subrequest,
343 .prepare_read = cifs_prepare_read,
344 .issue_read = cifs_issue_read,
345 .done = cifs_rreq_done,
346 .begin_writeback = cifs_begin_writeback,
347 .prepare_write = cifs_prepare_write,
348 .issue_write = cifs_issue_write,
349 .invalidate_cache = cifs_netfs_invalidate_cache,
353 * Mark as invalid, all open files on tree connections since they
354 * were closed when session to server was lost.
357 cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
359 struct cifsFileInfo *open_file = NULL;
360 struct list_head *tmp;
361 struct list_head *tmp1;
363 /* only send once per connect */
364 spin_lock(&tcon->tc_lock);
365 if (tcon->need_reconnect)
366 tcon->status = TID_NEED_RECON;
368 if (tcon->status != TID_NEED_RECON) {
369 spin_unlock(&tcon->tc_lock);
372 tcon->status = TID_IN_FILES_INVALIDATE;
373 spin_unlock(&tcon->tc_lock);
375 /* list all files open on tree connection and mark them invalid */
376 spin_lock(&tcon->open_file_lock);
377 list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
378 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
379 open_file->invalidHandle = true;
380 open_file->oplock_break_cancelled = true;
382 spin_unlock(&tcon->open_file_lock);
384 invalidate_all_cached_dirs(tcon);
385 spin_lock(&tcon->tc_lock);
386 if (tcon->status == TID_IN_FILES_INVALIDATE)
387 tcon->status = TID_NEED_TCON;
388 spin_unlock(&tcon->tc_lock);
391 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
396 static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
398 if ((flags & O_ACCMODE) == O_RDONLY)
400 else if ((flags & O_ACCMODE) == O_WRONLY)
401 return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
402 else if ((flags & O_ACCMODE) == O_RDWR) {
403 /* GENERIC_ALL is too much permission to request
404 can cause unnecessary access denied on create */
405 /* return GENERIC_ALL; */
406 return (GENERIC_READ | GENERIC_WRITE);
409 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
410 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
414 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
415 static u32 cifs_posix_convert_flags(unsigned int flags)
419 if ((flags & O_ACCMODE) == O_RDONLY)
420 posix_flags = SMB_O_RDONLY;
421 else if ((flags & O_ACCMODE) == O_WRONLY)
422 posix_flags = SMB_O_WRONLY;
423 else if ((flags & O_ACCMODE) == O_RDWR)
424 posix_flags = SMB_O_RDWR;
426 if (flags & O_CREAT) {
427 posix_flags |= SMB_O_CREAT;
429 posix_flags |= SMB_O_EXCL;
430 } else if (flags & O_EXCL)
431 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
432 current->comm, current->tgid);
435 posix_flags |= SMB_O_TRUNC;
436 /* be safe and imply O_SYNC for O_DSYNC */
438 posix_flags |= SMB_O_SYNC;
439 if (flags & O_DIRECTORY)
440 posix_flags |= SMB_O_DIRECTORY;
441 if (flags & O_NOFOLLOW)
442 posix_flags |= SMB_O_NOFOLLOW;
443 if (flags & O_DIRECT)
444 posix_flags |= SMB_O_DIRECT;
448 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
450 static inline int cifs_get_disposition(unsigned int flags)
452 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
454 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
455 return FILE_OVERWRITE_IF;
456 else if ((flags & O_CREAT) == O_CREAT)
458 else if ((flags & O_TRUNC) == O_TRUNC)
459 return FILE_OVERWRITE;
464 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
465 int cifs_posix_open(const char *full_path, struct inode **pinode,
466 struct super_block *sb, int mode, unsigned int f_flags,
467 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
470 FILE_UNIX_BASIC_INFO *presp_data;
471 __u32 posix_flags = 0;
472 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
473 struct cifs_fattr fattr;
474 struct tcon_link *tlink;
475 struct cifs_tcon *tcon;
477 cifs_dbg(FYI, "posix open %s\n", full_path);
479 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
480 if (presp_data == NULL)
483 tlink = cifs_sb_tlink(cifs_sb);
489 tcon = tlink_tcon(tlink);
490 mode &= ~current_umask();
492 posix_flags = cifs_posix_convert_flags(f_flags);
493 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
494 poplock, full_path, cifs_sb->local_nls,
495 cifs_remap(cifs_sb));
496 cifs_put_tlink(tlink);
501 if (presp_data->Type == cpu_to_le32(-1))
502 goto posix_open_ret; /* open ok, caller does qpathinfo */
505 goto posix_open_ret; /* caller does not need info */
507 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
509 /* get new inode and set it up */
510 if (*pinode == NULL) {
511 cifs_fill_uniqueid(sb, &fattr);
512 *pinode = cifs_iget(sb, &fattr);
518 cifs_revalidate_mapping(*pinode);
519 rc = cifs_fattr_to_inode(*pinode, &fattr, false);
526 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
528 static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
529 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
530 struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
535 int create_options = CREATE_NOT_DIR;
536 struct TCP_Server_Info *server = tcon->ses->server;
537 struct cifs_open_parms oparms;
538 int rdwr_for_fscache = 0;
540 if (!server->ops->open)
543 /* If we're caching, we need to be able to fill in around partial writes. */
544 if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
545 rdwr_for_fscache = 1;
547 desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
549 /*********************************************************************
550 * open flag mapping table:
552 * POSIX Flag CIFS Disposition
553 * ---------- ----------------
554 * O_CREAT FILE_OPEN_IF
555 * O_CREAT | O_EXCL FILE_CREATE
556 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
557 * O_TRUNC FILE_OVERWRITE
558 * none of the above FILE_OPEN
560 * Note that there is not a direct match between disposition
561 * FILE_SUPERSEDE (ie create whether or not file exists although
562 * O_CREAT | O_TRUNC is similar but truncates the existing
563 * file rather than creating a new file as FILE_SUPERSEDE does
564 * (which uses the attributes / metadata passed in on open call)
566 *? O_SYNC is a reasonable match to CIFS writethrough flag
567 *? and the read write flags match reasonably. O_LARGEFILE
568 *? is irrelevant because largefile support is always used
569 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
570 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
571 *********************************************************************/
573 disposition = cifs_get_disposition(f_flags);
575 /* BB pass O_SYNC flag through on file attributes .. BB */
577 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
578 if (f_flags & O_SYNC)
579 create_options |= CREATE_WRITE_THROUGH;
581 if (f_flags & O_DIRECT)
582 create_options |= CREATE_NO_BUFFER;
585 oparms = (struct cifs_open_parms) {
588 .desired_access = desired_access,
589 .create_options = cifs_create_options(cifs_sb, create_options),
590 .disposition = disposition,
595 rc = server->ops->open(xid, &oparms, oplock, buf);
597 if (rc == -EACCES && rdwr_for_fscache == 1) {
598 desired_access = cifs_convert_flags(f_flags, 0);
599 rdwr_for_fscache = 2;
604 if (rdwr_for_fscache == 2)
605 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
607 /* TODO: Add support for calling posix query info but with passing in fid */
609 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
612 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
616 server->ops->close(xid, tcon, fid);
625 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
627 struct cifs_fid_locks *cur;
628 bool has_locks = false;
630 down_read(&cinode->lock_sem);
631 list_for_each_entry(cur, &cinode->llist, llist) {
632 if (!list_empty(&cur->locks)) {
637 up_read(&cinode->lock_sem);
642 cifs_down_write(struct rw_semaphore *sem)
644 while (!down_write_trylock(sem))
648 static void cifsFileInfo_put_work(struct work_struct *work);
649 void serverclose_work(struct work_struct *work);
651 struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
652 struct tcon_link *tlink, __u32 oplock,
653 const char *symlink_target)
655 struct dentry *dentry = file_dentry(file);
656 struct inode *inode = d_inode(dentry);
657 struct cifsInodeInfo *cinode = CIFS_I(inode);
658 struct cifsFileInfo *cfile;
659 struct cifs_fid_locks *fdlocks;
660 struct cifs_tcon *tcon = tlink_tcon(tlink);
661 struct TCP_Server_Info *server = tcon->ses->server;
663 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
667 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
673 if (symlink_target) {
674 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
675 if (!cfile->symlink_target) {
682 INIT_LIST_HEAD(&fdlocks->locks);
683 fdlocks->cfile = cfile;
684 cfile->llist = fdlocks;
687 cfile->pid = current->tgid;
688 cfile->uid = current_fsuid();
689 cfile->dentry = dget(dentry);
690 cfile->f_flags = file->f_flags;
691 cfile->invalidHandle = false;
692 cfile->deferred_close_scheduled = false;
693 cfile->tlink = cifs_get_tlink(tlink);
694 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
695 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
696 INIT_WORK(&cfile->serverclose, serverclose_work);
697 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
698 mutex_init(&cfile->fh_mutex);
699 spin_lock_init(&cfile->file_info_lock);
701 cifs_sb_active(inode->i_sb);
704 * If the server returned a read oplock and we have mandatory brlocks,
705 * set oplock level to None.
707 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
708 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
712 cifs_down_write(&cinode->lock_sem);
713 list_add(&fdlocks->llist, &cinode->llist);
714 up_write(&cinode->lock_sem);
716 spin_lock(&tcon->open_file_lock);
717 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
718 oplock = fid->pending_open->oplock;
719 list_del(&fid->pending_open->olist);
721 fid->purge_cache = false;
722 server->ops->set_fid(cfile, fid, oplock);
724 list_add(&cfile->tlist, &tcon->openFileList);
725 atomic_inc(&tcon->num_local_opens);
727 /* if readable file instance put first in list*/
728 spin_lock(&cinode->open_file_lock);
729 if (file->f_mode & FMODE_READ)
730 list_add(&cfile->flist, &cinode->openFileList);
732 list_add_tail(&cfile->flist, &cinode->openFileList);
733 spin_unlock(&cinode->open_file_lock);
734 spin_unlock(&tcon->open_file_lock);
736 if (fid->purge_cache)
737 cifs_zap_mapping(inode);
739 file->private_data = cfile;
743 struct cifsFileInfo *
744 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
746 spin_lock(&cifs_file->file_info_lock);
747 cifsFileInfo_get_locked(cifs_file);
748 spin_unlock(&cifs_file->file_info_lock);
752 static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
754 struct inode *inode = d_inode(cifs_file->dentry);
755 struct cifsInodeInfo *cifsi = CIFS_I(inode);
756 struct cifsLockInfo *li, *tmp;
757 struct super_block *sb = inode->i_sb;
760 * Delete any outstanding lock records. We'll lose them when the file
763 cifs_down_write(&cifsi->lock_sem);
764 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
765 list_del(&li->llist);
766 cifs_del_lock_waiters(li);
769 list_del(&cifs_file->llist->llist);
770 kfree(cifs_file->llist);
771 up_write(&cifsi->lock_sem);
773 cifs_put_tlink(cifs_file->tlink);
774 dput(cifs_file->dentry);
775 cifs_sb_deactive(sb);
776 kfree(cifs_file->symlink_target);
780 static void cifsFileInfo_put_work(struct work_struct *work)
782 struct cifsFileInfo *cifs_file = container_of(work,
783 struct cifsFileInfo, put);
785 cifsFileInfo_put_final(cifs_file);
788 void serverclose_work(struct work_struct *work)
790 struct cifsFileInfo *cifs_file = container_of(work,
791 struct cifsFileInfo, serverclose);
793 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
795 struct TCP_Server_Info *server = tcon->ses->server;
801 if (server->ops->close_getattr)
802 rc = server->ops->close_getattr(0, tcon, cifs_file);
803 else if (server->ops->close)
804 rc = server->ops->close(0, tcon, &cifs_file->fid);
806 if (rc == -EBUSY || rc == -EAGAIN) {
810 } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
813 if (retries == MAX_RETRIES)
814 pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
816 if (cifs_file->offload)
817 queue_work(fileinfo_put_wq, &cifs_file->put);
819 cifsFileInfo_put_final(cifs_file);
823 * cifsFileInfo_put - release a reference of file priv data
825 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
827 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
829 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
831 _cifsFileInfo_put(cifs_file, true, true);
835 * _cifsFileInfo_put - release a reference of file priv data
837 * This may involve closing the filehandle @cifs_file out on the
838 * server. Must be called without holding tcon->open_file_lock,
839 * cinode->open_file_lock and cifs_file->file_info_lock.
841 * If @wait_for_oplock_handler is true and we are releasing the last
842 * reference, wait for any running oplock break handler of the file
843 * and cancel any pending one.
845 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
846 * @wait_oplock_handler: must be false if called from oplock_break_handler
847 * @offload: not offloaded on close and oplock breaks
850 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
851 bool wait_oplock_handler, bool offload)
853 struct inode *inode = d_inode(cifs_file->dentry);
854 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
855 struct TCP_Server_Info *server = tcon->ses->server;
856 struct cifsInodeInfo *cifsi = CIFS_I(inode);
857 struct super_block *sb = inode->i_sb;
858 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
859 struct cifs_fid fid = {};
860 struct cifs_pending_open open;
861 bool oplock_break_cancelled;
862 bool serverclose_offloaded = false;
864 spin_lock(&tcon->open_file_lock);
865 spin_lock(&cifsi->open_file_lock);
866 spin_lock(&cifs_file->file_info_lock);
868 cifs_file->offload = offload;
869 if (--cifs_file->count > 0) {
870 spin_unlock(&cifs_file->file_info_lock);
871 spin_unlock(&cifsi->open_file_lock);
872 spin_unlock(&tcon->open_file_lock);
875 spin_unlock(&cifs_file->file_info_lock);
877 if (server->ops->get_lease_key)
878 server->ops->get_lease_key(inode, &fid);
880 /* store open in pending opens to make sure we don't miss lease break */
881 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
883 /* remove it from the lists */
884 list_del(&cifs_file->flist);
885 list_del(&cifs_file->tlist);
886 atomic_dec(&tcon->num_local_opens);
888 if (list_empty(&cifsi->openFileList)) {
889 cifs_dbg(FYI, "closing last open instance for inode %p\n",
890 d_inode(cifs_file->dentry));
892 * In strict cache mode we need invalidate mapping on the last
893 * close because it may cause a error when we open this file
894 * again and get at least level II oplock.
896 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
897 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
898 cifs_set_oplock_level(cifsi, 0);
901 spin_unlock(&cifsi->open_file_lock);
902 spin_unlock(&tcon->open_file_lock);
904 oplock_break_cancelled = wait_oplock_handler ?
905 cancel_work_sync(&cifs_file->oplock_break) : false;
907 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
908 struct TCP_Server_Info *server = tcon->ses->server;
913 if (server->ops->close_getattr)
914 rc = server->ops->close_getattr(xid, tcon, cifs_file);
915 else if (server->ops->close)
916 rc = server->ops->close(xid, tcon, &cifs_file->fid);
919 if (rc == -EBUSY || rc == -EAGAIN) {
920 // Server close failed, hence offloading it as an async op
921 queue_work(serverclose_wq, &cifs_file->serverclose);
922 serverclose_offloaded = true;
926 if (oplock_break_cancelled)
927 cifs_done_oplock_break(cifsi);
929 cifs_del_pending_open(&open);
931 // if serverclose has been offloaded to wq (on failure), it will
932 // handle offloading put as well. If serverclose not offloaded,
933 // we need to handle offloading put here.
934 if (!serverclose_offloaded) {
936 queue_work(fileinfo_put_wq, &cifs_file->put);
938 cifsFileInfo_put_final(cifs_file);
942 int cifs_open(struct inode *inode, struct file *file)
948 struct cifs_sb_info *cifs_sb;
949 struct TCP_Server_Info *server;
950 struct cifs_tcon *tcon;
951 struct tcon_link *tlink;
952 struct cifsFileInfo *cfile = NULL;
954 const char *full_path;
955 bool posix_open_ok = false;
956 struct cifs_fid fid = {};
957 struct cifs_pending_open open;
958 struct cifs_open_info_data data = {};
962 cifs_sb = CIFS_SB(inode->i_sb);
963 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
968 tlink = cifs_sb_tlink(cifs_sb);
971 return PTR_ERR(tlink);
973 tcon = tlink_tcon(tlink);
974 server = tcon->ses->server;
976 page = alloc_dentry_path();
977 full_path = build_path_from_dentry(file_dentry(file), page);
978 if (IS_ERR(full_path)) {
979 rc = PTR_ERR(full_path);
983 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
984 inode, file->f_flags, full_path);
986 if (file->f_flags & O_DIRECT &&
987 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
988 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
989 file->f_op = &cifs_file_direct_nobrl_ops;
991 file->f_op = &cifs_file_direct_ops;
994 /* Get the cached handle as SMB2 close is deferred */
995 if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
996 rc = cifs_get_writable_path(tcon, full_path, FIND_WR_FSUID_ONLY, &cfile);
998 rc = cifs_get_readable_path(tcon, full_path, &cfile);
1001 if (file->f_flags == cfile->f_flags) {
1002 file->private_data = cfile;
1003 spin_lock(&CIFS_I(inode)->deferred_lock);
1004 cifs_del_deferred_close(cfile);
1005 spin_unlock(&CIFS_I(inode)->deferred_lock);
1008 _cifsFileInfo_put(cfile, true, false);
1012 if (server->oplocks)
1013 oplock = REQ_OPLOCK;
1017 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1018 if (!tcon->broken_posix_open && tcon->unix_ext &&
1019 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1020 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1021 /* can not refresh inode info since size could be stale */
1022 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
1023 cifs_sb->ctx->file_mode /* ignored */,
1024 file->f_flags, &oplock, &fid.netfid, xid);
1026 cifs_dbg(FYI, "posix open succeeded\n");
1027 posix_open_ok = true;
1028 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1029 if (tcon->ses->serverNOS)
1030 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
1032 tcon->ses->serverNOS);
1033 tcon->broken_posix_open = true;
1034 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
1035 (rc != -EOPNOTSUPP)) /* path not found or net err */
1038 * Else fallthrough to retry open the old way on network i/o
1042 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1044 if (server->ops->get_lease_key)
1045 server->ops->get_lease_key(inode, &fid);
1047 cifs_add_pending_open(&fid, tlink, &open);
1049 if (!posix_open_ok) {
1050 if (server->ops->get_lease_key)
1051 server->ops->get_lease_key(inode, &fid);
1053 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1056 cifs_del_pending_open(&open);
1061 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
1062 if (cfile == NULL) {
1063 if (server->ops->close)
1064 server->ops->close(xid, tcon, &fid);
1065 cifs_del_pending_open(&open);
1070 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1071 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
1073 * Time to set mode which we can not set earlier due to
1074 * problems creating new read-only files.
1076 struct cifs_unix_set_info_args args = {
1077 .mode = inode->i_mode,
1078 .uid = INVALID_UID, /* no change */
1079 .gid = INVALID_GID, /* no change */
1080 .ctime = NO_CHANGE_64,
1081 .atime = NO_CHANGE_64,
1082 .mtime = NO_CHANGE_64,
1085 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1088 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1091 fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1092 file->f_mode & FMODE_WRITE);
1093 if (!(file->f_flags & O_DIRECT))
1095 if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1097 cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
1100 free_dentry_path(page);
1102 cifs_put_tlink(tlink);
1103 cifs_free_open_info(&data);
1107 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1108 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
1109 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1112 * Try to reacquire byte range locks that were released when session
1113 * to server was lost.
1116 cifs_relock_file(struct cifsFileInfo *cfile)
1118 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1119 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1121 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1122 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1123 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1125 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
1126 if (cinode->can_cache_brlcks) {
1127 /* can cache locks - no need to relock */
1128 up_read(&cinode->lock_sem);
1132 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1133 if (cap_unix(tcon->ses) &&
1134 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1135 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1136 rc = cifs_push_posix_locks(cfile);
1138 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1139 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1141 up_read(&cinode->lock_sem);
1146 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1151 struct cifs_sb_info *cifs_sb;
1152 struct cifs_tcon *tcon;
1153 struct TCP_Server_Info *server;
1154 struct cifsInodeInfo *cinode;
1155 struct inode *inode;
1157 const char *full_path;
1159 int disposition = FILE_OPEN;
1160 int create_options = CREATE_NOT_DIR;
1161 struct cifs_open_parms oparms;
1162 int rdwr_for_fscache = 0;
1165 mutex_lock(&cfile->fh_mutex);
1166 if (!cfile->invalidHandle) {
1167 mutex_unlock(&cfile->fh_mutex);
1172 inode = d_inode(cfile->dentry);
1173 cifs_sb = CIFS_SB(inode->i_sb);
1174 tcon = tlink_tcon(cfile->tlink);
1175 server = tcon->ses->server;
1178 * Can not grab rename sem here because various ops, including those
1179 * that already have the rename sem can end up causing writepage to get
1180 * called and if the server was down that means we end up here, and we
1181 * can never tell if the caller already has the rename_sem.
1183 page = alloc_dentry_path();
1184 full_path = build_path_from_dentry(cfile->dentry, page);
1185 if (IS_ERR(full_path)) {
1186 mutex_unlock(&cfile->fh_mutex);
1187 free_dentry_path(page);
1189 return PTR_ERR(full_path);
1192 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1193 inode, cfile->f_flags, full_path);
1195 if (tcon->ses->server->oplocks)
1196 oplock = REQ_OPLOCK;
1200 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1201 if (tcon->unix_ext && cap_unix(tcon->ses) &&
1202 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1203 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
1205 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1206 * original open. Must mask them off for a reopen.
1208 unsigned int oflags = cfile->f_flags &
1209 ~(O_CREAT | O_EXCL | O_TRUNC);
1211 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
1212 cifs_sb->ctx->file_mode /* ignored */,
1213 oflags, &oplock, &cfile->fid.netfid, xid);
1215 cifs_dbg(FYI, "posix reopen succeeded\n");
1216 oparms.reconnect = true;
1217 goto reopen_success;
1220 * fallthrough to retry open the old way on errors, especially
1221 * in the reconnect path it is important to retry hard
1224 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1226 /* If we're caching, we need to be able to fill in around partial writes. */
1227 if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1228 rdwr_for_fscache = 1;
1230 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
1232 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1233 if (cfile->f_flags & O_SYNC)
1234 create_options |= CREATE_WRITE_THROUGH;
1236 if (cfile->f_flags & O_DIRECT)
1237 create_options |= CREATE_NO_BUFFER;
1239 if (server->ops->get_lease_key)
1240 server->ops->get_lease_key(inode, &cfile->fid);
1243 oparms = (struct cifs_open_parms) {
1246 .desired_access = desired_access,
1247 .create_options = cifs_create_options(cifs_sb, create_options),
1248 .disposition = disposition,
1255 * Can not refresh inode by passing in file_info buf to be returned by
1256 * ops->open and then calling get_inode_info with returned buf since
1257 * file might have write behind data that needs to be flushed and server
1258 * version of file size can be stale. If we knew for sure that inode was
1259 * not dirty locally we could do this.
1261 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1262 if (rc == -ENOENT && oparms.reconnect == false) {
1263 /* durable handle timeout is expired - open the file again */
1264 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1265 /* indicate that we need to relock the file */
1266 oparms.reconnect = true;
1268 if (rc == -EACCES && rdwr_for_fscache == 1) {
1269 desired_access = cifs_convert_flags(cfile->f_flags, 0);
1270 rdwr_for_fscache = 2;
1275 mutex_unlock(&cfile->fh_mutex);
1276 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1277 cifs_dbg(FYI, "oplock: %d\n", oplock);
1278 goto reopen_error_exit;
1281 if (rdwr_for_fscache == 2)
1282 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1284 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1286 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1287 cfile->invalidHandle = false;
1288 mutex_unlock(&cfile->fh_mutex);
1289 cinode = CIFS_I(inode);
1292 rc = filemap_write_and_wait(inode->i_mapping);
1293 if (!is_interrupt_error(rc))
1294 mapping_set_error(inode->i_mapping, rc);
1296 if (tcon->posix_extensions) {
1297 rc = smb311_posix_get_inode_info(&inode, full_path,
1298 NULL, inode->i_sb, xid);
1299 } else if (tcon->unix_ext) {
1300 rc = cifs_get_inode_info_unix(&inode, full_path,
1303 rc = cifs_get_inode_info(&inode, full_path, NULL,
1304 inode->i_sb, xid, NULL);
1308 * Else we are writing out data to server already and could deadlock if
1309 * we tried to flush data, and since we do not know if we have data that
1310 * would invalidate the current end of file on the server we can not go
1311 * to the server to get the new inode info.
1315 * If the server returned a read oplock and we have mandatory brlocks,
1316 * set oplock level to None.
1318 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1319 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1323 server->ops->set_fid(cfile, &cfile->fid, oplock);
1324 if (oparms.reconnect)
1325 cifs_relock_file(cfile);
1328 free_dentry_path(page);
1333 void smb2_deferred_work_close(struct work_struct *work)
1335 struct cifsFileInfo *cfile = container_of(work,
1336 struct cifsFileInfo, deferred.work);
1338 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1339 cifs_del_deferred_close(cfile);
1340 cfile->deferred_close_scheduled = false;
1341 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1342 _cifsFileInfo_put(cfile, true, false);
1346 smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1348 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1349 struct cifsInodeInfo *cinode = CIFS_I(inode);
1351 return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1352 (cinode->oplock == CIFS_CACHE_RHW_FLG ||
1353 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1354 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1358 int cifs_close(struct inode *inode, struct file *file)
1360 struct cifsFileInfo *cfile;
1361 struct cifsInodeInfo *cinode = CIFS_I(inode);
1362 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1363 struct cifs_deferred_close *dclose;
1365 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1367 if (file->private_data != NULL) {
1368 cfile = file->private_data;
1369 file->private_data = NULL;
1370 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
1371 if ((cfile->status_file_deleted == false) &&
1372 (smb2_can_defer_close(inode, dclose))) {
1373 if (test_and_clear_bit(NETFS_ICTX_MODIFIED_ATTR, &cinode->netfs.flags)) {
1374 inode_set_mtime_to_ts(inode,
1375 inode_set_ctime_current(inode));
1377 spin_lock(&cinode->deferred_lock);
1378 cifs_add_deferred_close(cfile, dclose);
1379 if (cfile->deferred_close_scheduled &&
1380 delayed_work_pending(&cfile->deferred)) {
1382 * If there is no pending work, mod_delayed_work queues new work.
1383 * So, Increase the ref count to avoid use-after-free.
1385 if (!mod_delayed_work(deferredclose_wq,
1386 &cfile->deferred, cifs_sb->ctx->closetimeo))
1387 cifsFileInfo_get(cfile);
1389 /* Deferred close for files */
1390 queue_delayed_work(deferredclose_wq,
1391 &cfile->deferred, cifs_sb->ctx->closetimeo);
1392 cfile->deferred_close_scheduled = true;
1393 spin_unlock(&cinode->deferred_lock);
1396 spin_unlock(&cinode->deferred_lock);
1397 _cifsFileInfo_put(cfile, true, false);
1399 _cifsFileInfo_put(cfile, true, false);
1404 /* return code from the ->release op is always ignored */
1409 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1411 struct cifsFileInfo *open_file, *tmp;
1412 LIST_HEAD(tmp_list);
1414 if (!tcon->use_persistent || !tcon->need_reopen_files)
1417 tcon->need_reopen_files = false;
1419 cifs_dbg(FYI, "Reopen persistent handles\n");
1421 /* list all files open on tree connection, reopen resilient handles */
1422 spin_lock(&tcon->open_file_lock);
1423 list_for_each_entry(open_file, &tcon->openFileList, tlist) {
1424 if (!open_file->invalidHandle)
1426 cifsFileInfo_get(open_file);
1427 list_add_tail(&open_file->rlist, &tmp_list);
1429 spin_unlock(&tcon->open_file_lock);
1431 list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
1432 if (cifs_reopen_file(open_file, false /* do not flush */))
1433 tcon->need_reopen_files = true;
1434 list_del_init(&open_file->rlist);
1435 cifsFileInfo_put(open_file);
1439 int cifs_closedir(struct inode *inode, struct file *file)
1443 struct cifsFileInfo *cfile = file->private_data;
1444 struct cifs_tcon *tcon;
1445 struct TCP_Server_Info *server;
1448 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1454 tcon = tlink_tcon(cfile->tlink);
1455 server = tcon->ses->server;
1457 cifs_dbg(FYI, "Freeing private data in close dir\n");
1458 spin_lock(&cfile->file_info_lock);
1459 if (server->ops->dir_needs_close(cfile)) {
1460 cfile->invalidHandle = true;
1461 spin_unlock(&cfile->file_info_lock);
1462 if (server->ops->close_dir)
1463 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1466 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
1467 /* not much we can do if it fails anyway, ignore rc */
1470 spin_unlock(&cfile->file_info_lock);
1472 buf = cfile->srch_inf.ntwrk_buf_start;
1474 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
1475 cfile->srch_inf.ntwrk_buf_start = NULL;
1476 if (cfile->srch_inf.smallBuf)
1477 cifs_small_buf_release(buf);
1479 cifs_buf_release(buf);
1482 cifs_put_tlink(cfile->tlink);
1483 kfree(file->private_data);
1484 file->private_data = NULL;
1485 /* BB can we lock the filestruct while this is going on? */
1490 static struct cifsLockInfo *
1491 cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
1493 struct cifsLockInfo *lock =
1494 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
1497 lock->offset = offset;
1498 lock->length = length;
1500 lock->pid = current->tgid;
1501 lock->flags = flags;
1502 INIT_LIST_HEAD(&lock->blist);
1503 init_waitqueue_head(&lock->block_q);
1508 cifs_del_lock_waiters(struct cifsLockInfo *lock)
1510 struct cifsLockInfo *li, *tmp;
1511 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1512 list_del_init(&li->blist);
1513 wake_up(&li->block_q);
1517 #define CIFS_LOCK_OP 0
1518 #define CIFS_READ_OP 1
1519 #define CIFS_WRITE_OP 2
1521 /* @rw_check : 0 - no op, 1 - read, 2 - write */
1523 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
1524 __u64 length, __u8 type, __u16 flags,
1525 struct cifsFileInfo *cfile,
1526 struct cifsLockInfo **conf_lock, int rw_check)
1528 struct cifsLockInfo *li;
1529 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
1530 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1532 list_for_each_entry(li, &fdlocks->locks, llist) {
1533 if (offset + length <= li->offset ||
1534 offset >= li->offset + li->length)
1536 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1537 server->ops->compare_fids(cfile, cur_cfile)) {
1538 /* shared lock prevents write op through the same fid */
1539 if (!(li->type & server->vals->shared_lock_type) ||
1540 rw_check != CIFS_WRITE_OP)
1543 if ((type & server->vals->shared_lock_type) &&
1544 ((server->ops->compare_fids(cfile, cur_cfile) &&
1545 current->tgid == li->pid) || type == li->type))
1547 if (rw_check == CIFS_LOCK_OP &&
1548 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1549 server->ops->compare_fids(cfile, cur_cfile))
1559 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1560 __u8 type, __u16 flags,
1561 struct cifsLockInfo **conf_lock, int rw_check)
1564 struct cifs_fid_locks *cur;
1565 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1567 list_for_each_entry(cur, &cinode->llist, llist) {
1568 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
1569 flags, cfile, conf_lock,
1579 * Check if there is another lock that prevents us to set the lock (mandatory
1580 * style). If such a lock exists, update the flock structure with its
1581 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1582 * or leave it the same if we can't. Returns 0 if we don't need to request to
1583 * the server or 1 otherwise.
1586 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1587 __u8 type, struct file_lock *flock)
1590 struct cifsLockInfo *conf_lock;
1591 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1592 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1595 down_read(&cinode->lock_sem);
1597 exist = cifs_find_lock_conflict(cfile, offset, length, type,
1598 flock->c.flc_flags, &conf_lock,
1601 flock->fl_start = conf_lock->offset;
1602 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1603 flock->c.flc_pid = conf_lock->pid;
1604 if (conf_lock->type & server->vals->shared_lock_type)
1605 flock->c.flc_type = F_RDLCK;
1607 flock->c.flc_type = F_WRLCK;
1608 } else if (!cinode->can_cache_brlcks)
1611 flock->c.flc_type = F_UNLCK;
1613 up_read(&cinode->lock_sem);
1618 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1620 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1621 cifs_down_write(&cinode->lock_sem);
1622 list_add_tail(&lock->llist, &cfile->llist->locks);
1623 up_write(&cinode->lock_sem);
1627 * Set the byte-range lock (mandatory style). Returns:
1628 * 1) 0, if we set the lock and don't need to request to the server;
1629 * 2) 1, if no locks prevent us but we need to request to the server;
1630 * 3) -EACCES, if there is a lock that prevents us and wait is false.
1633 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1636 struct cifsLockInfo *conf_lock;
1637 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1643 cifs_down_write(&cinode->lock_sem);
1645 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1646 lock->type, lock->flags, &conf_lock,
1648 if (!exist && cinode->can_cache_brlcks) {
1649 list_add_tail(&lock->llist, &cfile->llist->locks);
1650 up_write(&cinode->lock_sem);
1659 list_add_tail(&lock->blist, &conf_lock->blist);
1660 up_write(&cinode->lock_sem);
1661 rc = wait_event_interruptible(lock->block_q,
1662 (lock->blist.prev == &lock->blist) &&
1663 (lock->blist.next == &lock->blist));
1666 cifs_down_write(&cinode->lock_sem);
1667 list_del_init(&lock->blist);
1670 up_write(&cinode->lock_sem);
1674 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1676 * Check if there is another lock that prevents us to set the lock (posix
1677 * style). If such a lock exists, update the flock structure with its
1678 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1679 * or leave it the same if we can't. Returns 0 if we don't need to request to
1680 * the server or 1 otherwise.
1683 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1686 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1687 unsigned char saved_type = flock->c.flc_type;
1689 if ((flock->c.flc_flags & FL_POSIX) == 0)
1692 down_read(&cinode->lock_sem);
1693 posix_test_lock(file, flock);
1695 if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
1696 flock->c.flc_type = saved_type;
1700 up_read(&cinode->lock_sem);
1705 * Set the byte-range lock (posix style). Returns:
1706 * 1) <0, if the error occurs while setting the lock;
1707 * 2) 0, if we set the lock and don't need to request to the server;
1708 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1709 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
1712 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1714 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1715 int rc = FILE_LOCK_DEFERRED + 1;
1717 if ((flock->c.flc_flags & FL_POSIX) == 0)
1720 cifs_down_write(&cinode->lock_sem);
1721 if (!cinode->can_cache_brlcks) {
1722 up_write(&cinode->lock_sem);
1726 rc = posix_lock_file(file, flock, NULL);
1727 up_write(&cinode->lock_sem);
1732 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1735 int rc = 0, stored_rc;
1736 struct cifsLockInfo *li, *tmp;
1737 struct cifs_tcon *tcon;
1738 unsigned int num, max_num, max_buf;
1739 LOCKING_ANDX_RANGE *buf, *cur;
1740 static const int types[] = {
1741 LOCKING_ANDX_LARGE_FILES,
1742 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1747 tcon = tlink_tcon(cfile->tlink);
1750 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1751 * and check it before using.
1753 max_buf = tcon->ses->server->maxBuf;
1754 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1759 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1761 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1763 max_num = (max_buf - sizeof(struct smb_hdr)) /
1764 sizeof(LOCKING_ANDX_RANGE);
1765 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1771 for (i = 0; i < 2; i++) {
1774 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1775 if (li->type != types[i])
1777 cur->Pid = cpu_to_le16(li->pid);
1778 cur->LengthLow = cpu_to_le32((u32)li->length);
1779 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1780 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1781 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1782 if (++num == max_num) {
1783 stored_rc = cifs_lockv(xid, tcon,
1785 (__u8)li->type, 0, num,
1796 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1797 (__u8)types[i], 0, num, buf);
1809 hash_lockowner(fl_owner_t owner)
1811 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1813 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1815 struct lock_to_push {
1816 struct list_head llist;
1824 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1826 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1828 struct inode *inode = d_inode(cfile->dentry);
1829 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1830 struct file_lock *flock;
1831 struct file_lock_context *flctx = locks_inode_context(inode);
1832 unsigned int count = 0, i;
1833 int rc = 0, xid, type;
1834 struct list_head locks_to_send, *el;
1835 struct lock_to_push *lck, *tmp;
1843 spin_lock(&flctx->flc_lock);
1844 list_for_each(el, &flctx->flc_posix) {
1847 spin_unlock(&flctx->flc_lock);
1849 INIT_LIST_HEAD(&locks_to_send);
1852 * Allocating count locks is enough because no FL_POSIX locks can be
1853 * added to the list while we are holding cinode->lock_sem that
1854 * protects locking operations of this inode.
1856 for (i = 0; i < count; i++) {
1857 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1862 list_add_tail(&lck->llist, &locks_to_send);
1865 el = locks_to_send.next;
1866 spin_lock(&flctx->flc_lock);
1867 for_each_file_lock(flock, &flctx->flc_posix) {
1868 unsigned char ftype = flock->c.flc_type;
1870 if (el == &locks_to_send) {
1872 * The list ended. We don't have enough allocated
1873 * structures - something is really wrong.
1875 cifs_dbg(VFS, "Can't push all brlocks!\n");
1878 length = cifs_flock_len(flock);
1879 if (ftype == F_RDLCK || ftype == F_SHLCK)
1883 lck = list_entry(el, struct lock_to_push, llist);
1884 lck->pid = hash_lockowner(flock->c.flc_owner);
1885 lck->netfid = cfile->fid.netfid;
1886 lck->length = length;
1888 lck->offset = flock->fl_start;
1890 spin_unlock(&flctx->flc_lock);
1892 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1895 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1896 lck->offset, lck->length, NULL,
1900 list_del(&lck->llist);
1908 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1909 list_del(&lck->llist);
1914 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1917 cifs_push_locks(struct cifsFileInfo *cfile)
1919 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1920 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1922 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1923 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1924 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1926 /* we are going to update can_cache_brlcks here - need a write access */
1927 cifs_down_write(&cinode->lock_sem);
1928 if (!cinode->can_cache_brlcks) {
1929 up_write(&cinode->lock_sem);
1933 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1934 if (cap_unix(tcon->ses) &&
1935 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1936 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1937 rc = cifs_push_posix_locks(cfile);
1939 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1940 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1942 cinode->can_cache_brlcks = false;
1943 up_write(&cinode->lock_sem);
1948 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1949 bool *wait_flag, struct TCP_Server_Info *server)
1951 if (flock->c.flc_flags & FL_POSIX)
1952 cifs_dbg(FYI, "Posix\n");
1953 if (flock->c.flc_flags & FL_FLOCK)
1954 cifs_dbg(FYI, "Flock\n");
1955 if (flock->c.flc_flags & FL_SLEEP) {
1956 cifs_dbg(FYI, "Blocking lock\n");
1959 if (flock->c.flc_flags & FL_ACCESS)
1960 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1961 if (flock->c.flc_flags & FL_LEASE)
1962 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1963 if (flock->c.flc_flags &
1964 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1965 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
1966 cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1967 flock->c.flc_flags);
1969 *type = server->vals->large_lock_type;
1970 if (lock_is_write(flock)) {
1971 cifs_dbg(FYI, "F_WRLCK\n");
1972 *type |= server->vals->exclusive_lock_type;
1974 } else if (lock_is_unlock(flock)) {
1975 cifs_dbg(FYI, "F_UNLCK\n");
1976 *type |= server->vals->unlock_lock_type;
1978 /* Check if unlock includes more than one lock range */
1979 } else if (lock_is_read(flock)) {
1980 cifs_dbg(FYI, "F_RDLCK\n");
1981 *type |= server->vals->shared_lock_type;
1983 } else if (flock->c.flc_type == F_EXLCK) {
1984 cifs_dbg(FYI, "F_EXLCK\n");
1985 *type |= server->vals->exclusive_lock_type;
1987 } else if (flock->c.flc_type == F_SHLCK) {
1988 cifs_dbg(FYI, "F_SHLCK\n");
1989 *type |= server->vals->shared_lock_type;
1992 cifs_dbg(FYI, "Unknown type of lock\n");
1996 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1997 bool wait_flag, bool posix_lck, unsigned int xid)
2000 __u64 length = cifs_flock_len(flock);
2001 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2002 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2003 struct TCP_Server_Info *server = tcon->ses->server;
2004 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2005 __u16 netfid = cfile->fid.netfid;
2008 int posix_lock_type;
2010 rc = cifs_posix_lock_test(file, flock);
2014 if (type & server->vals->shared_lock_type)
2015 posix_lock_type = CIFS_RDLCK;
2017 posix_lock_type = CIFS_WRLCK;
2018 rc = CIFSSMBPosixLock(xid, tcon, netfid,
2019 hash_lockowner(flock->c.flc_owner),
2020 flock->fl_start, length, flock,
2021 posix_lock_type, wait_flag);
2024 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2026 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
2030 /* BB we could chain these into one lock request BB */
2031 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2034 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2036 flock->c.flc_type = F_UNLCK;
2038 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2043 if (type & server->vals->shared_lock_type) {
2044 flock->c.flc_type = F_WRLCK;
2048 type &= ~server->vals->exclusive_lock_type;
2050 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2051 type | server->vals->shared_lock_type,
2054 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2055 type | server->vals->shared_lock_type, 0, 1, false);
2056 flock->c.flc_type = F_RDLCK;
2058 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2061 flock->c.flc_type = F_WRLCK;
2067 cifs_move_llist(struct list_head *source, struct list_head *dest)
2069 struct list_head *li, *tmp;
2070 list_for_each_safe(li, tmp, source)
2071 list_move(li, dest);
2075 cifs_free_llist(struct list_head *llist)
2077 struct cifsLockInfo *li, *tmp;
2078 list_for_each_entry_safe(li, tmp, llist, llist) {
2079 cifs_del_lock_waiters(li);
2080 list_del(&li->llist);
2085 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2087 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2090 int rc = 0, stored_rc;
2091 static const int types[] = {
2092 LOCKING_ANDX_LARGE_FILES,
2093 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2096 unsigned int max_num, num, max_buf;
2097 LOCKING_ANDX_RANGE *buf, *cur;
2098 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2099 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
2100 struct cifsLockInfo *li, *tmp;
2101 __u64 length = cifs_flock_len(flock);
2102 LIST_HEAD(tmp_llist);
2105 * Accessing maxBuf is racy with cifs_reconnect - need to store value
2106 * and check it before using.
2108 max_buf = tcon->ses->server->maxBuf;
2109 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
2112 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2114 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2116 max_num = (max_buf - sizeof(struct smb_hdr)) /
2117 sizeof(LOCKING_ANDX_RANGE);
2118 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
2122 cifs_down_write(&cinode->lock_sem);
2123 for (i = 0; i < 2; i++) {
2126 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
2127 if (flock->fl_start > li->offset ||
2128 (flock->fl_start + length) <
2129 (li->offset + li->length))
2131 if (current->tgid != li->pid)
2133 if (types[i] != li->type)
2135 if (cinode->can_cache_brlcks) {
2137 * We can cache brlock requests - simply remove
2138 * a lock from the file's list.
2140 list_del(&li->llist);
2141 cifs_del_lock_waiters(li);
2145 cur->Pid = cpu_to_le16(li->pid);
2146 cur->LengthLow = cpu_to_le32((u32)li->length);
2147 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2148 cur->OffsetLow = cpu_to_le32((u32)li->offset);
2149 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2151 * We need to save a lock here to let us add it again to
2152 * the file's list if the unlock range request fails on
2155 list_move(&li->llist, &tmp_llist);
2156 if (++num == max_num) {
2157 stored_rc = cifs_lockv(xid, tcon,
2159 li->type, num, 0, buf);
2162 * We failed on the unlock range
2163 * request - add all locks from the tmp
2164 * list to the head of the file's list.
2166 cifs_move_llist(&tmp_llist,
2167 &cfile->llist->locks);
2171 * The unlock range request succeed -
2172 * free the tmp list.
2174 cifs_free_llist(&tmp_llist);
2181 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
2182 types[i], num, 0, buf);
2184 cifs_move_llist(&tmp_llist,
2185 &cfile->llist->locks);
2188 cifs_free_llist(&tmp_llist);
2192 up_write(&cinode->lock_sem);
2196 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2199 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
2200 bool wait_flag, bool posix_lck, int lock, int unlock,
2204 __u64 length = cifs_flock_len(flock);
2205 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2206 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2207 struct TCP_Server_Info *server = tcon->ses->server;
2208 struct inode *inode = d_inode(cfile->dentry);
2210 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
2212 int posix_lock_type;
2214 rc = cifs_posix_lock_set(file, flock);
2215 if (rc <= FILE_LOCK_DEFERRED)
2218 if (type & server->vals->shared_lock_type)
2219 posix_lock_type = CIFS_RDLCK;
2221 posix_lock_type = CIFS_WRLCK;
2224 posix_lock_type = CIFS_UNLCK;
2226 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
2227 hash_lockowner(flock->c.flc_owner),
2228 flock->fl_start, length,
2229 NULL, posix_lock_type, wait_flag);
2232 #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2234 struct cifsLockInfo *lock;
2236 lock = cifs_lock_init(flock->fl_start, length, type,
2237 flock->c.flc_flags);
2241 rc = cifs_lock_add_if(cfile, lock, wait_flag);
2250 * Windows 7 server can delay breaking lease from read to None
2251 * if we set a byte-range lock on a file - break it explicitly
2252 * before sending the lock to the server to be sure the next
2253 * read won't conflict with non-overlapted locks due to
2256 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2257 CIFS_CACHE_READ(CIFS_I(inode))) {
2258 cifs_zap_mapping(inode);
2259 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2261 CIFS_I(inode)->oplock = 0;
2264 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2265 type, 1, 0, wait_flag);
2271 cifs_lock_add(cfile, lock);
2273 rc = server->ops->mand_unlock_range(cfile, flock, xid);
2276 if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
2278 * If this is a request to remove all locks because we
2279 * are closing the file, it doesn't matter if the
2280 * unlocking failed as both cifs.ko and the SMB server
2281 * remove the lock on file close
2284 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
2285 if (!(flock->c.flc_flags & FL_CLOSE))
2288 rc = locks_lock_file_wait(file, flock);
2293 int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2296 int lock = 0, unlock = 0;
2297 bool wait_flag = false;
2298 bool posix_lck = false;
2299 struct cifs_sb_info *cifs_sb;
2300 struct cifs_tcon *tcon;
2301 struct cifsFileInfo *cfile;
2306 if (!(fl->c.flc_flags & FL_FLOCK)) {
2312 cfile = (struct cifsFileInfo *)file->private_data;
2313 tcon = tlink_tcon(cfile->tlink);
2315 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2317 cifs_sb = CIFS_FILE_SB(file);
2319 if (cap_unix(tcon->ses) &&
2320 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2321 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2324 if (!lock && !unlock) {
2326 * if no lock or unlock then nothing to do since we do not
2334 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2342 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2345 int lock = 0, unlock = 0;
2346 bool wait_flag = false;
2347 bool posix_lck = false;
2348 struct cifs_sb_info *cifs_sb;
2349 struct cifs_tcon *tcon;
2350 struct cifsFileInfo *cfile;
2356 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
2357 flock->c.flc_flags, flock->c.flc_type,
2358 (long long)flock->fl_start,
2359 (long long)flock->fl_end);
2361 cfile = (struct cifsFileInfo *)file->private_data;
2362 tcon = tlink_tcon(cfile->tlink);
2364 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2366 cifs_sb = CIFS_FILE_SB(file);
2367 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
2369 if (cap_unix(tcon->ses) &&
2370 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2371 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2374 * BB add code here to normalize offset and length to account for
2375 * negative length which we can not accept over the wire.
2377 if (IS_GETLK(cmd)) {
2378 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
2383 if (!lock && !unlock) {
2385 * if no lock or unlock then nothing to do since we do not
2392 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2398 void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2401 struct netfs_io_request *wreq = wdata->rreq;
2402 struct netfs_inode *ictx = netfs_inode(wreq->inode);
2406 wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2408 if (wrend > ictx->zero_point &&
2409 (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2410 wdata->rreq->origin == NETFS_DIO_WRITE))
2411 ictx->zero_point = wrend;
2412 if (wrend > ictx->remote_i_size)
2413 netfs_resize_file(ictx, wrend, true);
2416 netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
2419 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2422 struct cifsFileInfo *open_file = NULL;
2423 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2425 /* only filter by fsuid on multiuser mounts */
2426 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2429 spin_lock(&cifs_inode->open_file_lock);
2430 /* we could simply get the first_list_entry since write-only entries
2431 are always at the end of the list but since the first entry might
2432 have a close pending, we go through the whole list */
2433 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2434 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2436 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
2437 if ((!open_file->invalidHandle)) {
2438 /* found a good file */
2439 /* lock it so it will not be closed on us */
2440 cifsFileInfo_get(open_file);
2441 spin_unlock(&cifs_inode->open_file_lock);
2443 } /* else might as well continue, and look for
2444 another, or simply have the caller reopen it
2445 again rather than trying to fix this handle */
2446 } else /* write only file */
2447 break; /* write only files are last so must be done */
2449 spin_unlock(&cifs_inode->open_file_lock);
2453 /* Return -EBADF if no handle is found and general rc otherwise */
2455 cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
2456 struct cifsFileInfo **ret_file)
2458 struct cifsFileInfo *open_file, *inv_file = NULL;
2459 struct cifs_sb_info *cifs_sb;
2460 bool any_available = false;
2462 unsigned int refind = 0;
2463 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2464 bool with_delete = flags & FIND_WR_WITH_DELETE;
2468 * Having a null inode here (because mapping->host was set to zero by
2469 * the VFS or MM) should not happen but we had reports of on oops (due
2470 * to it being zero) during stress testcases so we need to check for it
2473 if (cifs_inode == NULL) {
2474 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
2479 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
2481 /* only filter by fsuid on multiuser mounts */
2482 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2485 spin_lock(&cifs_inode->open_file_lock);
2487 if (refind > MAX_REOPEN_ATT) {
2488 spin_unlock(&cifs_inode->open_file_lock);
2491 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2492 if (!any_available && open_file->pid != current->tgid)
2494 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
2496 if (with_delete && !(open_file->fid.access & DELETE))
2498 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
2499 if (!open_file->invalidHandle) {
2500 /* found a good writable file */
2501 cifsFileInfo_get(open_file);
2502 spin_unlock(&cifs_inode->open_file_lock);
2503 *ret_file = open_file;
2507 inv_file = open_file;
2511 /* couldn't find usable FH with same pid, try any available */
2512 if (!any_available) {
2513 any_available = true;
2514 goto refind_writable;
2518 any_available = false;
2519 cifsFileInfo_get(inv_file);
2522 spin_unlock(&cifs_inode->open_file_lock);
2525 rc = cifs_reopen_file(inv_file, false);
2527 *ret_file = inv_file;
2531 spin_lock(&cifs_inode->open_file_lock);
2532 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
2533 spin_unlock(&cifs_inode->open_file_lock);
2534 cifsFileInfo_put(inv_file);
2537 spin_lock(&cifs_inode->open_file_lock);
2538 goto refind_writable;
2544 struct cifsFileInfo *
2545 find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
2547 struct cifsFileInfo *cfile;
2550 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
2552 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
2558 cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
2560 struct cifsFileInfo **ret_file)
2562 struct cifsFileInfo *cfile;
2563 void *page = alloc_dentry_path();
2567 spin_lock(&tcon->open_file_lock);
2568 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2569 struct cifsInodeInfo *cinode;
2570 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2571 if (IS_ERR(full_path)) {
2572 spin_unlock(&tcon->open_file_lock);
2573 free_dentry_path(page);
2574 return PTR_ERR(full_path);
2576 if (strcmp(full_path, name))
2579 cinode = CIFS_I(d_inode(cfile->dentry));
2580 spin_unlock(&tcon->open_file_lock);
2581 free_dentry_path(page);
2582 return cifs_get_writable_file(cinode, flags, ret_file);
2585 spin_unlock(&tcon->open_file_lock);
2586 free_dentry_path(page);
2591 cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2592 struct cifsFileInfo **ret_file)
2594 struct cifsFileInfo *cfile;
2595 void *page = alloc_dentry_path();
2599 spin_lock(&tcon->open_file_lock);
2600 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2601 struct cifsInodeInfo *cinode;
2602 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2603 if (IS_ERR(full_path)) {
2604 spin_unlock(&tcon->open_file_lock);
2605 free_dentry_path(page);
2606 return PTR_ERR(full_path);
2608 if (strcmp(full_path, name))
2611 cinode = CIFS_I(d_inode(cfile->dentry));
2612 spin_unlock(&tcon->open_file_lock);
2613 free_dentry_path(page);
2614 *ret_file = find_readable_file(cinode, 0);
2615 return *ret_file ? 0 : -ENOENT;
2618 spin_unlock(&tcon->open_file_lock);
2619 free_dentry_path(page);
2624 * Flush data on a strict file.
2626 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2631 struct cifs_tcon *tcon;
2632 struct TCP_Server_Info *server;
2633 struct cifsFileInfo *smbfile = file->private_data;
2634 struct inode *inode = file_inode(file);
2635 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2637 rc = file_write_and_wait_range(file, start, end);
2639 trace_cifs_fsync_err(inode->i_ino, rc);
2645 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2648 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2649 rc = cifs_zap_mapping(inode);
2651 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2652 rc = 0; /* don't care about it in fsync */
2656 tcon = tlink_tcon(smbfile->tlink);
2657 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2658 server = tcon->ses->server;
2659 if (server->ops->flush == NULL) {
2661 goto strict_fsync_exit;
2664 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2665 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2667 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2668 cifsFileInfo_put(smbfile);
2670 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2672 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2681 * Flush data on a non-strict data.
2683 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2687 struct cifs_tcon *tcon;
2688 struct TCP_Server_Info *server;
2689 struct cifsFileInfo *smbfile = file->private_data;
2690 struct inode *inode = file_inode(file);
2691 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2693 rc = file_write_and_wait_range(file, start, end);
2695 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
2701 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2704 tcon = tlink_tcon(smbfile->tlink);
2705 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2706 server = tcon->ses->server;
2707 if (server->ops->flush == NULL) {
2712 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2713 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2715 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2716 cifsFileInfo_put(smbfile);
2718 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2720 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2729 * As file closes, flush all cached write data for this inode checking
2730 * for write behind errors.
2732 int cifs_flush(struct file *file, fl_owner_t id)
2734 struct inode *inode = file_inode(file);
2737 if (file->f_mode & FMODE_WRITE)
2738 rc = filemap_write_and_wait(inode->i_mapping);
2740 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2742 /* get more nuanced writeback errors */
2743 rc = filemap_check_wb_err(file->f_mapping, 0);
2744 trace_cifs_flush_err(inode->i_ino, rc);
2750 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2752 struct file *file = iocb->ki_filp;
2753 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2754 struct inode *inode = file->f_mapping->host;
2755 struct cifsInodeInfo *cinode = CIFS_I(inode);
2756 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2757 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2760 rc = netfs_start_io_write(inode);
2765 * We need to hold the sem to be sure nobody modifies lock list
2766 * with a brlock that prevents writing.
2768 down_read(&cinode->lock_sem);
2770 rc = generic_write_checks(iocb, from);
2774 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
2775 (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2776 server->vals->exclusive_lock_type, 0,
2777 NULL, CIFS_WRITE_OP))) {
2782 rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
2785 up_read(&cinode->lock_sem);
2786 netfs_end_io_write(inode);
2788 rc = generic_write_sync(iocb, rc);
2793 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2795 struct inode *inode = file_inode(iocb->ki_filp);
2796 struct cifsInodeInfo *cinode = CIFS_I(inode);
2797 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2798 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2799 iocb->ki_filp->private_data;
2800 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2803 written = cifs_get_writer(cinode);
2807 if (CIFS_CACHE_WRITE(cinode)) {
2808 if (cap_unix(tcon->ses) &&
2809 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2810 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2811 written = netfs_file_write_iter(iocb, from);
2814 written = cifs_writev(iocb, from);
2818 * For non-oplocked files in strict cache mode we need to write the data
2819 * to the server exactly from the pos to pos+len-1 rather than flush all
2820 * affected pages because it may cause a error with mandatory locks on
2821 * these pages but not on the region from pos to ppos+len-1.
2823 written = netfs_file_write_iter(iocb, from);
2824 if (CIFS_CACHE_READ(cinode)) {
2826 * We have read level caching and we have just sent a write
2827 * request to the server thus making data in the cache stale.
2828 * Zap the cache and set oplock/lease level to NONE to avoid
2829 * reading stale data from the cache. All subsequent read
2830 * operations will read new data from the server.
2832 cifs_zap_mapping(inode);
2833 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2838 cifs_put_writer(cinode);
2842 ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2845 struct inode *inode = file_inode(iocb->ki_filp);
2847 if (iocb->ki_flags & IOCB_DIRECT)
2848 return netfs_unbuffered_read_iter(iocb, iter);
2850 rc = cifs_revalidate_mapping(inode);
2854 return netfs_file_read_iter(iocb, iter);
2857 ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2859 struct inode *inode = file_inode(iocb->ki_filp);
2860 struct cifsInodeInfo *cinode = CIFS_I(inode);
2864 if (iocb->ki_filp->f_flags & O_DIRECT) {
2865 written = netfs_unbuffered_write_iter(iocb, from);
2866 if (written > 0 && CIFS_CACHE_READ(cinode)) {
2867 cifs_zap_mapping(inode);
2869 "Set no oplock for inode=%p after a write operation\n",
2876 written = cifs_get_writer(cinode);
2880 written = netfs_file_write_iter(iocb, from);
2882 if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2883 rc = filemap_fdatawrite(inode->i_mapping);
2885 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2889 cifs_put_writer(cinode);
2894 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
2896 struct inode *inode = file_inode(iocb->ki_filp);
2897 struct cifsInodeInfo *cinode = CIFS_I(inode);
2898 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2899 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2900 iocb->ki_filp->private_data;
2901 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2905 * In strict cache mode we need to read from the server all the time
2906 * if we don't have level II oplock because the server can delay mtime
2907 * change - so we can't make a decision about inode invalidating.
2908 * And we can also fail with pagereading if there are mandatory locks
2909 * on pages affected by this read but not on the region from pos to
2912 if (!CIFS_CACHE_READ(cinode))
2913 return netfs_unbuffered_read_iter(iocb, to);
2915 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0) {
2916 if (iocb->ki_flags & IOCB_DIRECT)
2917 return netfs_unbuffered_read_iter(iocb, to);
2918 return netfs_buffered_read_iter(iocb, to);
2922 * We need to hold the sem to be sure nobody modifies lock list
2923 * with a brlock that prevents reading.
2925 if (iocb->ki_flags & IOCB_DIRECT) {
2926 rc = netfs_start_io_direct(inode);
2930 down_read(&cinode->lock_sem);
2931 if (!cifs_find_lock_conflict(
2932 cfile, iocb->ki_pos, iov_iter_count(to),
2933 tcon->ses->server->vals->shared_lock_type,
2934 0, NULL, CIFS_READ_OP))
2935 rc = netfs_unbuffered_read_iter_locked(iocb, to);
2936 up_read(&cinode->lock_sem);
2937 netfs_end_io_direct(inode);
2939 rc = netfs_start_io_read(inode);
2943 down_read(&cinode->lock_sem);
2944 if (!cifs_find_lock_conflict(
2945 cfile, iocb->ki_pos, iov_iter_count(to),
2946 tcon->ses->server->vals->shared_lock_type,
2947 0, NULL, CIFS_READ_OP))
2948 rc = filemap_read(iocb, to, 0);
2949 up_read(&cinode->lock_sem);
2950 netfs_end_io_read(inode);
2956 static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
2958 return netfs_page_mkwrite(vmf, NULL);
2961 static const struct vm_operations_struct cifs_file_vm_ops = {
2962 .fault = filemap_fault,
2963 .map_pages = filemap_map_pages,
2964 .page_mkwrite = cifs_page_mkwrite,
2967 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2970 struct inode *inode = file_inode(file);
2974 if (!CIFS_CACHE_READ(CIFS_I(inode)))
2975 rc = cifs_zap_mapping(inode);
2977 rc = generic_file_mmap(file, vma);
2979 vma->vm_ops = &cifs_file_vm_ops;
2985 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2991 rc = cifs_revalidate_file(file);
2993 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2996 rc = generic_file_mmap(file, vma);
2998 vma->vm_ops = &cifs_file_vm_ops;
3004 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3006 struct cifsFileInfo *open_file;
3008 spin_lock(&cifs_inode->open_file_lock);
3009 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3010 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3011 spin_unlock(&cifs_inode->open_file_lock);
3015 spin_unlock(&cifs_inode->open_file_lock);
3019 /* We do not want to update the file size from server for inodes
3020 open for write - to avoid races with writepage extending
3021 the file - in the future we could consider allowing
3022 refreshing the inode only on increases in the file size
3023 but this is tricky to do without racing with writebehind
3024 page caching in the current Linux kernel design */
3025 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3031 if (is_inode_writable(cifsInode) ||
3032 ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
3033 /* This inode is open for write at least once */
3034 struct cifs_sb_info *cifs_sb;
3036 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
3037 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3038 /* since no page cache to corrupt on directio
3039 we can change size safely */
3043 if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
3051 void cifs_oplock_break(struct work_struct *work)
3053 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3055 struct inode *inode = d_inode(cfile->dentry);
3056 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3057 struct cifsInodeInfo *cinode = CIFS_I(inode);
3058 struct cifs_tcon *tcon;
3059 struct TCP_Server_Info *server;
3060 struct tcon_link *tlink;
3062 bool purge_cache = false, oplock_break_cancelled;
3063 __u64 persistent_fid, volatile_fid;
3066 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3067 TASK_UNINTERRUPTIBLE);
3069 tlink = cifs_sb_tlink(cifs_sb);
3072 tcon = tlink_tcon(tlink);
3073 server = tcon->ses->server;
3075 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3076 cfile->oplock_epoch, &purge_cache);
3078 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3079 cifs_has_mand_locks(cinode)) {
3080 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3085 if (inode && S_ISREG(inode->i_mode)) {
3086 if (CIFS_CACHE_READ(cinode))
3087 break_lease(inode, O_RDONLY);
3089 break_lease(inode, O_WRONLY);
3090 rc = filemap_fdatawrite(inode->i_mapping);
3091 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3092 rc = filemap_fdatawait(inode->i_mapping);
3093 mapping_set_error(inode->i_mapping, rc);
3094 cifs_zap_mapping(inode);
3096 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3097 if (CIFS_CACHE_WRITE(cinode))
3098 goto oplock_break_ack;
3101 rc = cifs_push_locks(cfile);
3103 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3107 * When oplock break is received and there are no active
3108 * file handles but cached, then schedule deferred close immediately.
3109 * So, new open will not use cached handle.
3112 if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
3113 cifs_close_deferred_file(cinode);
3115 persistent_fid = cfile->fid.persistent_fid;
3116 volatile_fid = cfile->fid.volatile_fid;
3117 net_fid = cfile->fid.netfid;
3118 oplock_break_cancelled = cfile->oplock_break_cancelled;
3120 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
3122 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3123 * an acknowledgment to be sent when the file has already been closed.
3125 spin_lock(&cinode->open_file_lock);
3126 /* check list empty since can race with kill_sb calling tree disconnect */
3127 if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
3128 spin_unlock(&cinode->open_file_lock);
3129 rc = server->ops->oplock_response(tcon, persistent_fid,
3130 volatile_fid, net_fid, cinode);
3131 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3133 spin_unlock(&cinode->open_file_lock);
3135 cifs_put_tlink(tlink);
3137 cifs_done_oplock_break(cinode);
3140 static int cifs_swap_activate(struct swap_info_struct *sis,
3141 struct file *swap_file, sector_t *span)
3143 struct cifsFileInfo *cfile = swap_file->private_data;
3144 struct inode *inode = swap_file->f_mapping->host;
3145 unsigned long blocks;
3148 cifs_dbg(FYI, "swap activate\n");
3150 if (!swap_file->f_mapping->a_ops->swap_rw)
3151 /* Cannot support swap */
3154 spin_lock(&inode->i_lock);
3155 blocks = inode->i_blocks;
3156 isize = inode->i_size;
3157 spin_unlock(&inode->i_lock);
3158 if (blocks*512 < isize) {
3159 pr_warn("swap activate: swapfile has holes\n");
3164 pr_warn_once("Swap support over SMB3 is experimental\n");
3167 * TODO: consider adding ACL (or documenting how) to prevent other
3168 * users (on this or other systems) from reading it
3172 /* TODO: add sk_set_memalloc(inet) or similar */
3175 cfile->swapfile = true;
3177 * TODO: Since file already open, we can't open with DENY_ALL here
3178 * but we could add call to grab a byte range lock to prevent others
3179 * from reading or writing the file
3182 sis->flags |= SWP_FS_OPS;
3183 return add_swap_extent(sis, 0, sis->max, 0);
3186 static void cifs_swap_deactivate(struct file *file)
3188 struct cifsFileInfo *cfile = file->private_data;
3190 cifs_dbg(FYI, "swap deactivate\n");
3192 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3195 cfile->swapfile = false;
3197 /* do we need to unpin (or unlock) the file */
3201 * cifs_swap_rw - SMB3 address space operation for swap I/O
3202 * @iocb: target I/O control block
3205 * Perform IO to the swap-file. This is much like direct IO.
3207 static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3211 if (iov_iter_rw(iter) == READ)
3212 ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3214 ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3220 const struct address_space_operations cifs_addr_ops = {
3221 .read_folio = netfs_read_folio,
3222 .readahead = netfs_readahead,
3223 .writepages = netfs_writepages,
3224 .dirty_folio = netfs_dirty_folio,
3225 .release_folio = netfs_release_folio,
3226 .direct_IO = noop_direct_IO,
3227 .invalidate_folio = netfs_invalidate_folio,
3228 .migrate_folio = filemap_migrate_folio,
3230 * TODO: investigate and if useful we could add an is_dirty_writeback
3233 .swap_activate = cifs_swap_activate,
3234 .swap_deactivate = cifs_swap_deactivate,
3235 .swap_rw = cifs_swap_rw,
3239 * cifs_readahead requires the server to support a buffer large enough to
3240 * contain the header plus one complete page of data. Otherwise, we need
3241 * to leave cifs_readahead out of the address space operations.
3243 const struct address_space_operations cifs_addr_ops_smallbuf = {
3244 .read_folio = netfs_read_folio,
3245 .writepages = netfs_writepages,
3246 .dirty_folio = netfs_dirty_folio,
3247 .release_folio = netfs_release_folio,
3248 .invalidate_folio = netfs_invalidate_folio,
3249 .migrate_folio = filemap_migrate_folio,