]> Git Repo - linux.git/blame - fs/smb/client/file.c
cifs: Add a tracepoint to track credits involved in R/W requests
[linux.git] / fs / smb / client / file.c
CommitLineData
929be906 1// SPDX-License-Identifier: LGPL-2.1
1da177e4 2/*
1da177e4
LT
3 *
4 * vfs operations that deal with files
fb8c4b14 5 *
f19159dc 6 * Copyright (C) International Business Machines Corp., 2002,2010
1da177e4 7 * Author(s): Steve French ([email protected])
7ee1af76 8 * Jeremy Allison ([email protected])
1da177e4 9 *
1da177e4
LT
10 */
11#include <linux/fs.h>
5970e15d 12#include <linux/filelock.h>
37c0eb46 13#include <linux/backing-dev.h>
1da177e4
LT
14#include <linux/stat.h>
15#include <linux/fcntl.h>
16#include <linux/pagemap.h>
17#include <linux/pagevec.h>
37c0eb46 18#include <linux/writeback.h>
6f88cc2e 19#include <linux/task_io_accounting_ops.h>
23e7dd7d 20#include <linux/delay.h>
3bc303c2 21#include <linux/mount.h>
5a0e3ad6 22#include <linux/slab.h>
690c5e31 23#include <linux/swap.h>
f86196ea 24#include <linux/mm.h>
1da177e4
LT
25#include <asm/div64.h>
26#include "cifsfs.h"
27#include "cifspdu.h"
28#include "cifsglob.h"
29#include "cifsproto.h"
fb157ed2 30#include "smb2proto.h"
1da177e4
LT
31#include "cifs_unicode.h"
32#include "cifs_debug.h"
33#include "cifs_fs_sb.h"
9451a9a5 34#include "fscache.h"
bd3dcc6a 35#include "smbdirect.h"
8401e936 36#include "fs_context.h"
087f757b 37#include "cifs_ioctl.h"
05b98fd2 38#include "cached_dir.h"
69c3c023
DH
39#include <trace/events/netfs.h>
40
41static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush);
42
43/*
44 * Prepare a subrequest to upload to the server. We need to allocate credits
45 * so that we know the maximum amount of data that we can include in it.
46 */
47static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
48{
49 struct cifs_io_subrequest *wdata =
50 container_of(subreq, struct cifs_io_subrequest, subreq);
51 struct cifs_io_request *req = wdata->req;
52 struct TCP_Server_Info *server;
53 struct cifsFileInfo *open_file = req->cfile;
54 size_t wsize = req->rreq.wsize;
55 int rc;
56
57 if (!wdata->have_xid) {
58 wdata->xid = get_xid();
59 wdata->have_xid = true;
60 }
61
62 server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
63 wdata->server = server;
64
65retry:
66 if (open_file->invalidHandle) {
67 rc = cifs_reopen_file(open_file, false);
68 if (rc < 0) {
69 if (rc == -EAGAIN)
70 goto retry;
71 subreq->error = rc;
72 return netfs_prepare_write_failed(subreq);
73 }
74 }
75
76 rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len,
77 &wdata->credits);
78 if (rc < 0) {
79 subreq->error = rc;
80 return netfs_prepare_write_failed(subreq);
81 }
82
519be989
DH
83 wdata->credits.rreq_debug_id = subreq->rreq->debug_id;
84 wdata->credits.rreq_debug_index = subreq->debug_index;
85 wdata->credits.in_flight_check = 1;
86 trace_smb3_rw_credits(wdata->rreq->debug_id,
87 wdata->subreq.debug_index,
88 wdata->credits.value,
89 server->credits, server->in_flight,
90 wdata->credits.value,
91 cifs_trace_rw_credits_write_prepare);
92
69c3c023
DH
93#ifdef CONFIG_CIFS_SMB_DIRECT
94 if (server->smbd_conn)
95 subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
96#endif
97}
98
99/*
100 * Issue a subrequest to upload to the server.
101 */
102static void cifs_issue_write(struct netfs_io_subrequest *subreq)
103{
104 struct cifs_io_subrequest *wdata =
105 container_of(subreq, struct cifs_io_subrequest, subreq);
106 struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb);
107 int rc;
108
109 if (cifs_forced_shutdown(sbi)) {
110 rc = -EIO;
111 goto fail;
112 }
113
519be989 114 rc = adjust_credits(wdata->server, wdata, cifs_trace_rw_credits_issue_write_adjust);
69c3c023
DH
115 if (rc)
116 goto fail;
117
118 rc = -EAGAIN;
119 if (wdata->req->cfile->invalidHandle)
120 goto fail;
121
122 wdata->server->ops->async_writev(wdata);
123out:
124 return;
125
126fail:
127 if (rc == -EAGAIN)
128 trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
129 else
130 trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
131 add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
3ee1a1fc 132 cifs_write_subrequest_terminated(wdata, rc, false);
69c3c023
DH
133 goto out;
134}
135
136/*
137 * Split the read up according to how many credits we can get for each piece.
138 * It's okay to sleep here if we need to wait for more credit to become
139 * available.
140 *
141 * We also choose the server and allocate an operation ID to be cleaned up
142 * later.
143 */
144static bool cifs_clamp_length(struct netfs_io_subrequest *subreq)
145{
146 struct netfs_io_request *rreq = subreq->rreq;
69c3c023
DH
147 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
148 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
969b3010 149 struct TCP_Server_Info *server = req->server;
69c3c023
DH
150 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
151 size_t rsize = 0;
152 int rc;
153
154 rdata->xid = get_xid();
155 rdata->have_xid = true;
69c3c023
DH
156 rdata->server = server;
157
158 if (cifs_sb->ctx->rsize == 0)
159 cifs_sb->ctx->rsize =
160 server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
161 cifs_sb->ctx);
162
163
164 rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize,
165 &rdata->credits);
166 if (rc) {
167 subreq->error = rc;
168 return false;
169 }
170
519be989
DH
171 rdata->credits.in_flight_check = 1;
172 rdata->credits.rreq_debug_id = rreq->debug_id;
173 rdata->credits.rreq_debug_index = subreq->debug_index;
174
175 trace_smb3_rw_credits(rdata->rreq->debug_id,
176 rdata->subreq.debug_index,
177 rdata->credits.value,
178 server->credits, server->in_flight, 0,
179 cifs_trace_rw_credits_read_submit);
180
69c3c023 181 subreq->len = min_t(size_t, subreq->len, rsize);
519be989 182
69c3c023
DH
183#ifdef CONFIG_CIFS_SMB_DIRECT
184 if (server->smbd_conn)
185 subreq->max_nr_segs = server->smbd_conn->max_frmr_depth;
186#endif
187 return true;
188}
189
190/*
191 * Issue a read operation on behalf of the netfs helper functions. We're asked
192 * to make a read of a certain size at a point in the file. We are permitted
193 * to only read a portion of that, but as long as we read something, the netfs
194 * helper will call us again so that we can issue another read.
195 */
196static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
197{
198 struct netfs_io_request *rreq = subreq->rreq;
199 struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq);
200 struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq);
69c3c023
DH
201 int rc = 0;
202
69c3c023
DH
203 cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n",
204 __func__, rreq->debug_id, subreq->debug_index, rreq->mapping,
205 subreq->transferred, subreq->len);
206
207 if (req->cfile->invalidHandle) {
208 do {
209 rc = cifs_reopen_file(req->cfile, true);
210 } while (rc == -EAGAIN);
211 if (rc)
212 goto out;
213 }
214
215 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
69c3c023 216
969b3010 217 rc = rdata->server->ops->async_readv(rdata);
69c3c023
DH
218out:
219 if (rc)
220 netfs_subreq_terminated(subreq, rc, false);
221}
222
223/*
224 * Writeback calls this when it finds a folio that needs uploading. This isn't
225 * called if writeback only has copy-to-cache to deal with.
226 */
227static void cifs_begin_writeback(struct netfs_io_request *wreq)
228{
229 struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
230 int ret;
231
232 ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
233 if (ret) {
234 cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
235 return;
236 }
237
238 wreq->io_streams[0].avail = true;
239}
240
241/*
242 * Initialise a request.
243 */
244static int cifs_init_request(struct netfs_io_request *rreq, struct file *file)
245{
246 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
247 struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb);
248 struct cifsFileInfo *open_file = NULL;
249
250 rreq->rsize = cifs_sb->ctx->rsize;
251 rreq->wsize = cifs_sb->ctx->wsize;
3f591385 252 req->pid = current->tgid; // Ummm... This may be a workqueue
69c3c023
DH
253
254 if (file) {
255 open_file = file->private_data;
256 rreq->netfs_priv = file->private_data;
257 req->cfile = cifsFileInfo_get(open_file);
969b3010 258 req->server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses);
3f591385
DH
259 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
260 req->pid = req->cfile->pid;
69c3c023
DH
261 } else if (rreq->origin != NETFS_WRITEBACK) {
262 WARN_ON_ONCE(1);
263 return -EIO;
264 }
265
266 return 0;
267}
268
69c3c023
DH
269/*
270 * Completion of a request operation.
271 */
272static void cifs_rreq_done(struct netfs_io_request *rreq)
273{
274 struct timespec64 atime, mtime;
275 struct inode *inode = rreq->inode;
276
277 /* we do not want atime to be less than mtime, it broke some apps */
278 atime = inode_set_atime_to_ts(inode, current_time(inode));
279 mtime = inode_get_mtime(inode);
280 if (timespec64_compare(&atime, &mtime))
281 inode_set_atime_to_ts(inode, inode_get_mtime(inode));
282}
283
284static void cifs_post_modify(struct inode *inode)
285{
286 /* Indication to update ctime and mtime as close is deferred */
287 set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags);
288}
289
290static void cifs_free_request(struct netfs_io_request *rreq)
291{
292 struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq);
293
294 if (req->cfile)
295 cifsFileInfo_put(req->cfile);
296}
297
298static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
299{
300 struct cifs_io_subrequest *rdata =
301 container_of(subreq, struct cifs_io_subrequest, subreq);
302 int rc = subreq->error;
303
304 if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) {
305#ifdef CONFIG_CIFS_SMB_DIRECT
306 if (rdata->mr) {
307 smbd_deregister_mr(rdata->mr);
308 rdata->mr = NULL;
309 }
310#endif
311 }
312
519be989
DH
313 if (rdata->credits.value != 0)
314 trace_smb3_rw_credits(rdata->rreq->debug_id,
315 rdata->subreq.debug_index,
316 rdata->credits.value,
317 rdata->server ? rdata->server->credits : 0,
318 rdata->server ? rdata->server->in_flight : 0,
319 -rdata->credits.value,
320 cifs_trace_rw_credits_free_subreq);
321
69c3c023
DH
322 add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
323 if (rdata->have_xid)
324 free_xid(rdata->xid);
325}
326
327const struct netfs_request_ops cifs_req_ops = {
328 .request_pool = &cifs_io_request_pool,
329 .subrequest_pool = &cifs_io_subrequest_pool,
330 .init_request = cifs_init_request,
331 .free_request = cifs_free_request,
332 .free_subrequest = cifs_free_subrequest,
69c3c023
DH
333 .clamp_length = cifs_clamp_length,
334 .issue_read = cifs_req_issue_read,
335 .done = cifs_rreq_done,
336 .post_modify = cifs_post_modify,
337 .begin_writeback = cifs_begin_writeback,
338 .prepare_write = cifs_prepare_write,
339 .issue_write = cifs_issue_write,
340};
07b92d0d 341
fb157ed2
SF
342/*
343 * Mark as invalid, all open files on tree connections since they
344 * were closed when session to server was lost.
345 */
346void
347cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
348{
349 struct cifsFileInfo *open_file = NULL;
350 struct list_head *tmp;
351 struct list_head *tmp1;
352
353 /* only send once per connect */
2f0e4f03 354 spin_lock(&tcon->tc_lock);
c6e02eef
SP
355 if (tcon->need_reconnect)
356 tcon->status = TID_NEED_RECON;
357
2f0e4f03
SP
358 if (tcon->status != TID_NEED_RECON) {
359 spin_unlock(&tcon->tc_lock);
fb157ed2
SF
360 return;
361 }
362 tcon->status = TID_IN_FILES_INVALIDATE;
2f0e4f03 363 spin_unlock(&tcon->tc_lock);
fb157ed2
SF
364
365 /* list all files open on tree connection and mark them invalid */
366 spin_lock(&tcon->open_file_lock);
367 list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
368 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
369 open_file->invalidHandle = true;
370 open_file->oplock_break_cancelled = true;
371 }
372 spin_unlock(&tcon->open_file_lock);
373
05b98fd2 374 invalidate_all_cached_dirs(tcon);
fb157ed2
SF
375 spin_lock(&tcon->tc_lock);
376 if (tcon->status == TID_IN_FILES_INVALIDATE)
377 tcon->status = TID_NEED_TCON;
378 spin_unlock(&tcon->tc_lock);
379
380 /*
381 * BB Add call to invalidate_inodes(sb) for all superblocks mounted
382 * to this tcon.
383 */
384}
385
e9e62243 386static inline int cifs_convert_flags(unsigned int flags, int rdwr_for_fscache)
1da177e4
LT
387{
388 if ((flags & O_ACCMODE) == O_RDONLY)
389 return GENERIC_READ;
390 else if ((flags & O_ACCMODE) == O_WRONLY)
e9e62243 391 return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE;
1da177e4
LT
392 else if ((flags & O_ACCMODE) == O_RDWR) {
393 /* GENERIC_ALL is too much permission to request
394 can cause unnecessary access denied on create */
395 /* return GENERIC_ALL; */
396 return (GENERIC_READ | GENERIC_WRITE);
397 }
398
e10f7b55
JL
399 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
400 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
401 FILE_READ_DATA);
7fc8f4e9 402}
e10f7b55 403
fb157ed2 404#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
608712fe 405static u32 cifs_posix_convert_flags(unsigned int flags)
7fc8f4e9 406{
608712fe 407 u32 posix_flags = 0;
e10f7b55 408
7fc8f4e9 409 if ((flags & O_ACCMODE) == O_RDONLY)
608712fe 410 posix_flags = SMB_O_RDONLY;
7fc8f4e9 411 else if ((flags & O_ACCMODE) == O_WRONLY)
608712fe
JL
412 posix_flags = SMB_O_WRONLY;
413 else if ((flags & O_ACCMODE) == O_RDWR)
414 posix_flags = SMB_O_RDWR;
415
07b92d0d 416 if (flags & O_CREAT) {
608712fe 417 posix_flags |= SMB_O_CREAT;
07b92d0d
SF
418 if (flags & O_EXCL)
419 posix_flags |= SMB_O_EXCL;
420 } else if (flags & O_EXCL)
f96637be
JP
421 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
422 current->comm, current->tgid);
07b92d0d 423
608712fe
JL
424 if (flags & O_TRUNC)
425 posix_flags |= SMB_O_TRUNC;
426 /* be safe and imply O_SYNC for O_DSYNC */
6b2f3d1f 427 if (flags & O_DSYNC)
608712fe 428 posix_flags |= SMB_O_SYNC;
7fc8f4e9 429 if (flags & O_DIRECTORY)
608712fe 430 posix_flags |= SMB_O_DIRECTORY;
7fc8f4e9 431 if (flags & O_NOFOLLOW)
608712fe 432 posix_flags |= SMB_O_NOFOLLOW;
7fc8f4e9 433 if (flags & O_DIRECT)
608712fe 434 posix_flags |= SMB_O_DIRECT;
7fc8f4e9
SF
435
436 return posix_flags;
1da177e4 437}
fb157ed2 438#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1da177e4
LT
439
440static inline int cifs_get_disposition(unsigned int flags)
441{
442 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
443 return FILE_CREATE;
444 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
445 return FILE_OVERWRITE_IF;
446 else if ((flags & O_CREAT) == O_CREAT)
447 return FILE_OPEN_IF;
55aa2e09
SF
448 else if ((flags & O_TRUNC) == O_TRUNC)
449 return FILE_OVERWRITE;
1da177e4
LT
450 else
451 return FILE_OPEN;
452}
453
fb157ed2 454#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
f6f1f179 455int cifs_posix_open(const char *full_path, struct inode **pinode,
608712fe 456 struct super_block *sb, int mode, unsigned int f_flags,
6d5786a3 457 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
608712fe
JL
458{
459 int rc;
460 FILE_UNIX_BASIC_INFO *presp_data;
461 __u32 posix_flags = 0;
462 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
463 struct cifs_fattr fattr;
464 struct tcon_link *tlink;
96daf2b0 465 struct cifs_tcon *tcon;
608712fe 466
f96637be 467 cifs_dbg(FYI, "posix open %s\n", full_path);
608712fe
JL
468
469 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
470 if (presp_data == NULL)
471 return -ENOMEM;
472
473 tlink = cifs_sb_tlink(cifs_sb);
474 if (IS_ERR(tlink)) {
475 rc = PTR_ERR(tlink);
476 goto posix_open_ret;
477 }
478
479 tcon = tlink_tcon(tlink);
480 mode &= ~current_umask();
481
482 posix_flags = cifs_posix_convert_flags(f_flags);
483 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
484 poplock, full_path, cifs_sb->local_nls,
bc8ebdc4 485 cifs_remap(cifs_sb));
608712fe
JL
486 cifs_put_tlink(tlink);
487
488 if (rc)
489 goto posix_open_ret;
490
491 if (presp_data->Type == cpu_to_le32(-1))
492 goto posix_open_ret; /* open ok, caller does qpathinfo */
493
494 if (!pinode)
495 goto posix_open_ret; /* caller does not need info */
496
497 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
498
499 /* get new inode and set it up */
500 if (*pinode == NULL) {
501 cifs_fill_uniqueid(sb, &fattr);
502 *pinode = cifs_iget(sb, &fattr);
503 if (!*pinode) {
504 rc = -ENOMEM;
505 goto posix_open_ret;
506 }
507 } else {
cee8f4f6 508 cifs_revalidate_mapping(*pinode);
e4b61f3b 509 rc = cifs_fattr_to_inode(*pinode, &fattr, false);
608712fe
JL
510 }
511
512posix_open_ret:
513 kfree(presp_data);
514 return rc;
515}
fb157ed2 516#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
608712fe 517
76894f3e
PA
518static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
519 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
520 struct cifs_fid *fid, unsigned int xid, struct cifs_open_info_data *buf)
eeb910a6
PS
521{
522 int rc;
fb1214e4 523 int desired_access;
eeb910a6 524 int disposition;
3d3ea8e6 525 int create_options = CREATE_NOT_DIR;
b8c32dbb 526 struct TCP_Server_Info *server = tcon->ses->server;
226730b4 527 struct cifs_open_parms oparms;
e9e62243 528 int rdwr_for_fscache = 0;
eeb910a6 529
b8c32dbb 530 if (!server->ops->open)
fb1214e4
PS
531 return -ENOSYS;
532
e9e62243
DH
533 /* If we're caching, we need to be able to fill in around partial writes. */
534 if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
535 rdwr_for_fscache = 1;
536
537 desired_access = cifs_convert_flags(f_flags, rdwr_for_fscache);
eeb910a6
PS
538
539/*********************************************************************
540 * open flag mapping table:
541 *
542 * POSIX Flag CIFS Disposition
543 * ---------- ----------------
544 * O_CREAT FILE_OPEN_IF
545 * O_CREAT | O_EXCL FILE_CREATE
546 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
547 * O_TRUNC FILE_OVERWRITE
548 * none of the above FILE_OPEN
549 *
550 * Note that there is not a direct match between disposition
551 * FILE_SUPERSEDE (ie create whether or not file exists although
552 * O_CREAT | O_TRUNC is similar but truncates the existing
553 * file rather than creating a new file as FILE_SUPERSEDE does
554 * (which uses the attributes / metadata passed in on open call)
555 *?
556 *? O_SYNC is a reasonable match to CIFS writethrough flag
557 *? and the read write flags match reasonably. O_LARGEFILE
558 *? is irrelevant because largefile support is always used
559 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
560 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
561 *********************************************************************/
562
563 disposition = cifs_get_disposition(f_flags);
564
565 /* BB pass O_SYNC flag through on file attributes .. BB */
566
1013e760
SF
567 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
568 if (f_flags & O_SYNC)
569 create_options |= CREATE_WRITE_THROUGH;
570
571 if (f_flags & O_DIRECT)
572 create_options |= CREATE_NO_BUFFER;
573
e9e62243 574retry_open:
de036dca
VL
575 oparms = (struct cifs_open_parms) {
576 .tcon = tcon,
577 .cifs_sb = cifs_sb,
578 .desired_access = desired_access,
579 .create_options = cifs_create_options(cifs_sb, create_options),
580 .disposition = disposition,
581 .path = full_path,
582 .fid = fid,
583 };
226730b4
PS
584
585 rc = server->ops->open(xid, &oparms, oplock, buf);
e9e62243
DH
586 if (rc) {
587 if (rc == -EACCES && rdwr_for_fscache == 1) {
588 desired_access = cifs_convert_flags(f_flags, 0);
589 rdwr_for_fscache = 2;
590 goto retry_open;
591 }
76894f3e 592 return rc;
e9e62243
DH
593 }
594 if (rdwr_for_fscache == 2)
595 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
eeb910a6 596
d313852d 597 /* TODO: Add support for calling posix query info but with passing in fid */
eeb910a6
PS
598 if (tcon->unix_ext)
599 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
600 xid);
601 else
602 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
42eacf9e 603 xid, fid);
eeb910a6 604
30573a82
PS
605 if (rc) {
606 server->ops->close(xid, tcon, fid);
607 if (rc == -ESTALE)
608 rc = -EOPENSTALE;
609 }
610
eeb910a6
PS
611 return rc;
612}
613
63b7d3a4
PS
614static bool
615cifs_has_mand_locks(struct cifsInodeInfo *cinode)
616{
617 struct cifs_fid_locks *cur;
618 bool has_locks = false;
619
620 down_read(&cinode->lock_sem);
621 list_for_each_entry(cur, &cinode->llist, llist) {
622 if (!list_empty(&cur->locks)) {
623 has_locks = true;
624 break;
625 }
626 }
627 up_read(&cinode->lock_sem);
628 return has_locks;
629}
630
d46b0da7
DW
631void
632cifs_down_write(struct rw_semaphore *sem)
633{
634 while (!down_write_trylock(sem))
635 msleep(10);
636}
637
32546a95 638static void cifsFileInfo_put_work(struct work_struct *work);
173217bd 639void serverclose_work(struct work_struct *work);
32546a95 640
76894f3e
PA
641struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
642 struct tcon_link *tlink, __u32 oplock,
643 const char *symlink_target)
15ecb436 644{
1f1735cb 645 struct dentry *dentry = file_dentry(file);
2b0143b5 646 struct inode *inode = d_inode(dentry);
4b4de76e
PS
647 struct cifsInodeInfo *cinode = CIFS_I(inode);
648 struct cifsFileInfo *cfile;
f45d3416 649 struct cifs_fid_locks *fdlocks;
233839b1 650 struct cifs_tcon *tcon = tlink_tcon(tlink);
63b7d3a4 651 struct TCP_Server_Info *server = tcon->ses->server;
4b4de76e
PS
652
653 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
654 if (cfile == NULL)
655 return cfile;
656
f45d3416
PS
657 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
658 if (!fdlocks) {
659 kfree(cfile);
660 return NULL;
661 }
662
76894f3e
PA
663 if (symlink_target) {
664 cfile->symlink_target = kstrdup(symlink_target, GFP_KERNEL);
665 if (!cfile->symlink_target) {
666 kfree(fdlocks);
667 kfree(cfile);
668 return NULL;
669 }
670 }
671
f45d3416
PS
672 INIT_LIST_HEAD(&fdlocks->locks);
673 fdlocks->cfile = cfile;
674 cfile->llist = fdlocks;
f45d3416 675
4b4de76e 676 cfile->count = 1;
4b4de76e
PS
677 cfile->pid = current->tgid;
678 cfile->uid = current_fsuid();
679 cfile->dentry = dget(dentry);
680 cfile->f_flags = file->f_flags;
681 cfile->invalidHandle = false;
860b69a9 682 cfile->deferred_close_scheduled = false;
4b4de76e 683 cfile->tlink = cifs_get_tlink(tlink);
4b4de76e 684 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
32546a95 685 INIT_WORK(&cfile->put, cifsFileInfo_put_work);
173217bd 686 INIT_WORK(&cfile->serverclose, serverclose_work);
c3f207ab 687 INIT_DELAYED_WORK(&cfile->deferred, smb2_deferred_work_close);
f45d3416 688 mutex_init(&cfile->fh_mutex);
3afca265 689 spin_lock_init(&cfile->file_info_lock);
15ecb436 690
24261fc2
MG
691 cifs_sb_active(inode->i_sb);
692
63b7d3a4
PS
693 /*
694 * If the server returned a read oplock and we have mandatory brlocks,
695 * set oplock level to None.
696 */
53ef1016 697 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
f96637be 698 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
63b7d3a4
PS
699 oplock = 0;
700 }
701
6f582b27
PS
702 cifs_down_write(&cinode->lock_sem);
703 list_add(&fdlocks->llist, &cinode->llist);
704 up_write(&cinode->lock_sem);
705
3afca265 706 spin_lock(&tcon->open_file_lock);
63b7d3a4 707 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
233839b1
PS
708 oplock = fid->pending_open->oplock;
709 list_del(&fid->pending_open->olist);
710
42873b0a 711 fid->purge_cache = false;
63b7d3a4 712 server->ops->set_fid(cfile, fid, oplock);
233839b1
PS
713
714 list_add(&cfile->tlist, &tcon->openFileList);
fae8044c 715 atomic_inc(&tcon->num_local_opens);
3afca265 716
15ecb436 717 /* if readable file instance put first in list*/
487317c9 718 spin_lock(&cinode->open_file_lock);
15ecb436 719 if (file->f_mode & FMODE_READ)
4b4de76e 720 list_add(&cfile->flist, &cinode->openFileList);
15ecb436 721 else
4b4de76e 722 list_add_tail(&cfile->flist, &cinode->openFileList);
487317c9 723 spin_unlock(&cinode->open_file_lock);
3afca265 724 spin_unlock(&tcon->open_file_lock);
15ecb436 725
42873b0a 726 if (fid->purge_cache)
4f73c7d3 727 cifs_zap_mapping(inode);
42873b0a 728
4b4de76e
PS
729 file->private_data = cfile;
730 return cfile;
15ecb436
JL
731}
732
764a1b1a
JL
733struct cifsFileInfo *
734cifsFileInfo_get(struct cifsFileInfo *cifs_file)
735{
3afca265 736 spin_lock(&cifs_file->file_info_lock);
764a1b1a 737 cifsFileInfo_get_locked(cifs_file);
3afca265 738 spin_unlock(&cifs_file->file_info_lock);
764a1b1a
JL
739 return cifs_file;
740}
741
32546a95
RS
742static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
743{
744 struct inode *inode = d_inode(cifs_file->dentry);
745 struct cifsInodeInfo *cifsi = CIFS_I(inode);
746 struct cifsLockInfo *li, *tmp;
747 struct super_block *sb = inode->i_sb;
748
749 /*
750 * Delete any outstanding lock records. We'll lose them when the file
751 * is closed anyway.
752 */
753 cifs_down_write(&cifsi->lock_sem);
754 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
755 list_del(&li->llist);
756 cifs_del_lock_waiters(li);
757 kfree(li);
758 }
759 list_del(&cifs_file->llist->llist);
760 kfree(cifs_file->llist);
761 up_write(&cifsi->lock_sem);
762
763 cifs_put_tlink(cifs_file->tlink);
764 dput(cifs_file->dentry);
765 cifs_sb_deactive(sb);
76894f3e 766 kfree(cifs_file->symlink_target);
32546a95
RS
767 kfree(cifs_file);
768}
769
770static void cifsFileInfo_put_work(struct work_struct *work)
771{
772 struct cifsFileInfo *cifs_file = container_of(work,
773 struct cifsFileInfo, put);
774
775 cifsFileInfo_put_final(cifs_file);
776}
777
173217bd
RB
778void serverclose_work(struct work_struct *work)
779{
780 struct cifsFileInfo *cifs_file = container_of(work,
781 struct cifsFileInfo, serverclose);
782
783 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
784
785 struct TCP_Server_Info *server = tcon->ses->server;
786 int rc = 0;
787 int retries = 0;
788 int MAX_RETRIES = 4;
789
790 do {
791 if (server->ops->close_getattr)
792 rc = server->ops->close_getattr(0, tcon, cifs_file);
793 else if (server->ops->close)
794 rc = server->ops->close(0, tcon, &cifs_file->fid);
795
796 if (rc == -EBUSY || rc == -EAGAIN) {
797 retries++;
798 msleep(250);
799 }
800 } while ((rc == -EBUSY || rc == -EAGAIN) && (retries < MAX_RETRIES)
801 );
802
803 if (retries == MAX_RETRIES)
804 pr_warn("Serverclose failed %d times, giving up\n", MAX_RETRIES);
805
806 if (cifs_file->offload)
807 queue_work(fileinfo_put_wq, &cifs_file->put);
808 else
809 cifsFileInfo_put_final(cifs_file);
810}
811
b98749ca
AA
812/**
813 * cifsFileInfo_put - release a reference of file priv data
814 *
815 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
607dfc79
SF
816 *
817 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
cdff08e7 818 */
b33879aa 819void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
b98749ca 820{
32546a95 821 _cifsFileInfo_put(cifs_file, true, true);
b98749ca
AA
822}
823
824/**
825 * _cifsFileInfo_put - release a reference of file priv data
826 *
827 * This may involve closing the filehandle @cifs_file out on the
32546a95
RS
828 * server. Must be called without holding tcon->open_file_lock,
829 * cinode->open_file_lock and cifs_file->file_info_lock.
b98749ca
AA
830 *
831 * If @wait_for_oplock_handler is true and we are releasing the last
832 * reference, wait for any running oplock break handler of the file
607dfc79
SF
833 * and cancel any pending one.
834 *
835 * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
836 * @wait_oplock_handler: must be false if called from oplock_break_handler
837 * @offload: not offloaded on close and oplock breaks
b98749ca
AA
838 *
839 */
32546a95
RS
840void _cifsFileInfo_put(struct cifsFileInfo *cifs_file,
841 bool wait_oplock_handler, bool offload)
b33879aa 842{
2b0143b5 843 struct inode *inode = d_inode(cifs_file->dentry);
96daf2b0 844 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
233839b1 845 struct TCP_Server_Info *server = tcon->ses->server;
e66673e3 846 struct cifsInodeInfo *cifsi = CIFS_I(inode);
24261fc2
MG
847 struct super_block *sb = inode->i_sb;
848 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
2bff0659 849 struct cifs_fid fid = {};
233839b1 850 struct cifs_pending_open open;
ca7df8e0 851 bool oplock_break_cancelled;
173217bd 852 bool serverclose_offloaded = false;
cdff08e7 853
3afca265 854 spin_lock(&tcon->open_file_lock);
1a67c415 855 spin_lock(&cifsi->open_file_lock);
3afca265 856 spin_lock(&cifs_file->file_info_lock);
173217bd
RB
857
858 cifs_file->offload = offload;
5f6dbc9e 859 if (--cifs_file->count > 0) {
3afca265 860 spin_unlock(&cifs_file->file_info_lock);
1a67c415 861 spin_unlock(&cifsi->open_file_lock);
3afca265 862 spin_unlock(&tcon->open_file_lock);
cdff08e7
SF
863 return;
864 }
3afca265 865 spin_unlock(&cifs_file->file_info_lock);
cdff08e7 866
233839b1
PS
867 if (server->ops->get_lease_key)
868 server->ops->get_lease_key(inode, &fid);
869
870 /* store open in pending opens to make sure we don't miss lease break */
871 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
872
cdff08e7
SF
873 /* remove it from the lists */
874 list_del(&cifs_file->flist);
875 list_del(&cifs_file->tlist);
fae8044c 876 atomic_dec(&tcon->num_local_opens);
cdff08e7
SF
877
878 if (list_empty(&cifsi->openFileList)) {
f96637be 879 cifs_dbg(FYI, "closing last open instance for inode %p\n",
2b0143b5 880 d_inode(cifs_file->dentry));
25364138
PS
881 /*
882 * In strict cache mode we need invalidate mapping on the last
883 * close because it may cause a error when we open this file
884 * again and get at least level II oplock.
885 */
4f8ba8a0 886 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
aff8d5ca 887 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
c6723628 888 cifs_set_oplock_level(cifsi, 0);
cdff08e7 889 }
3afca265 890
1a67c415 891 spin_unlock(&cifsi->open_file_lock);
3afca265 892 spin_unlock(&tcon->open_file_lock);
cdff08e7 893
b98749ca
AA
894 oplock_break_cancelled = wait_oplock_handler ?
895 cancel_work_sync(&cifs_file->oplock_break) : false;
ad635942 896
cdff08e7 897 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
0ff78a22 898 struct TCP_Server_Info *server = tcon->ses->server;
6d5786a3 899 unsigned int xid;
173217bd 900 int rc = 0;
0ff78a22 901
6d5786a3 902 xid = get_xid();
43f8a6a7 903 if (server->ops->close_getattr)
173217bd 904 rc = server->ops->close_getattr(xid, tcon, cifs_file);
43f8a6a7 905 else if (server->ops->close)
173217bd 906 rc = server->ops->close(xid, tcon, &cifs_file->fid);
760ad0ca 907 _free_xid(xid);
173217bd
RB
908
909 if (rc == -EBUSY || rc == -EAGAIN) {
910 // Server close failed, hence offloading it as an async op
911 queue_work(serverclose_wq, &cifs_file->serverclose);
912 serverclose_offloaded = true;
913 }
cdff08e7
SF
914 }
915
ca7df8e0
SP
916 if (oplock_break_cancelled)
917 cifs_done_oplock_break(cifsi);
918
233839b1
PS
919 cifs_del_pending_open(&open);
920
173217bd
RB
921 // if serverclose has been offloaded to wq (on failure), it will
922 // handle offloading put as well. If serverclose not offloaded,
923 // we need to handle offloading put here.
924 if (!serverclose_offloaded) {
925 if (offload)
926 queue_work(fileinfo_put_wq, &cifs_file->put);
927 else
928 cifsFileInfo_put_final(cifs_file);
929 }
b33879aa
JL
930}
931
1da177e4 932int cifs_open(struct inode *inode, struct file *file)
233839b1 933
1da177e4
LT
934{
935 int rc = -EACCES;
6d5786a3 936 unsigned int xid;
590a3fe0 937 __u32 oplock;
1da177e4 938 struct cifs_sb_info *cifs_sb;
b8c32dbb 939 struct TCP_Server_Info *server;
96daf2b0 940 struct cifs_tcon *tcon;
7ffec372 941 struct tcon_link *tlink;
fb1214e4 942 struct cifsFileInfo *cfile = NULL;
f6a9bc33
AV
943 void *page;
944 const char *full_path;
7e12eddb 945 bool posix_open_ok = false;
2bff0659 946 struct cifs_fid fid = {};
233839b1 947 struct cifs_pending_open open;
76894f3e 948 struct cifs_open_info_data data = {};
1da177e4 949
6d5786a3 950 xid = get_xid();
1da177e4
LT
951
952 cifs_sb = CIFS_SB(inode->i_sb);
087f757b
SF
953 if (unlikely(cifs_forced_shutdown(cifs_sb))) {
954 free_xid(xid);
955 return -EIO;
956 }
957
7ffec372
JL
958 tlink = cifs_sb_tlink(cifs_sb);
959 if (IS_ERR(tlink)) {
6d5786a3 960 free_xid(xid);
7ffec372
JL
961 return PTR_ERR(tlink);
962 }
963 tcon = tlink_tcon(tlink);
b8c32dbb 964 server = tcon->ses->server;
1da177e4 965
f6a9bc33
AV
966 page = alloc_dentry_path();
967 full_path = build_path_from_dentry(file_dentry(file), page);
968 if (IS_ERR(full_path)) {
969 rc = PTR_ERR(full_path);
232341ba 970 goto out;
1da177e4
LT
971 }
972
f96637be 973 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
b6b38f70 974 inode, file->f_flags, full_path);
276a74a4 975
787aded6
NJ
976 if (file->f_flags & O_DIRECT &&
977 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
978 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
979 file->f_op = &cifs_file_direct_nobrl_ops;
980 else
981 file->f_op = &cifs_file_direct_ops;
982 }
983
c3f207ab
RS
984 /* Get the cached handle as SMB2 close is deferred */
985 rc = cifs_get_readable_path(tcon, full_path, &cfile);
986 if (rc == 0) {
987 if (file->f_flags == cfile->f_flags) {
988 file->private_data = cfile;
860b69a9 989 spin_lock(&CIFS_I(inode)->deferred_lock);
c3f207ab
RS
990 cifs_del_deferred_close(cfile);
991 spin_unlock(&CIFS_I(inode)->deferred_lock);
70431bfd 992 goto use_cache;
c3f207ab 993 } else {
c3f207ab
RS
994 _cifsFileInfo_put(cfile, true, false);
995 }
c3f207ab
RS
996 }
997
233839b1 998 if (server->oplocks)
276a74a4
SF
999 oplock = REQ_OPLOCK;
1000 else
1001 oplock = 0;
1002
fb157ed2 1003#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
64cc2c63 1004 if (!tcon->broken_posix_open && tcon->unix_ext &&
29e20f9c
PS
1005 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
1006 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
276a74a4 1007 /* can not refresh inode info since size could be stale */
2422f676 1008 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
8401e936 1009 cifs_sb->ctx->file_mode /* ignored */,
fb1214e4 1010 file->f_flags, &oplock, &fid.netfid, xid);
276a74a4 1011 if (rc == 0) {
f96637be 1012 cifs_dbg(FYI, "posix open succeeded\n");
7e12eddb 1013 posix_open_ok = true;
64cc2c63
SF
1014 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
1015 if (tcon->ses->serverNOS)
f96637be 1016 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
b438fcf1 1017 tcon->ses->ip_addr,
f96637be 1018 tcon->ses->serverNOS);
64cc2c63 1019 tcon->broken_posix_open = true;
276a74a4
SF
1020 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
1021 (rc != -EOPNOTSUPP)) /* path not found or net err */
1022 goto out;
fb1214e4
PS
1023 /*
1024 * Else fallthrough to retry open the old way on network i/o
1025 * or DFS errors.
1026 */
276a74a4 1027 }
fb157ed2 1028#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
276a74a4 1029
233839b1
PS
1030 if (server->ops->get_lease_key)
1031 server->ops->get_lease_key(inode, &fid);
1032
1033 cifs_add_pending_open(&fid, tlink, &open);
1034
7e12eddb 1035 if (!posix_open_ok) {
b8c32dbb
PS
1036 if (server->ops->get_lease_key)
1037 server->ops->get_lease_key(inode, &fid);
1038
76894f3e
PA
1039 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon, file->f_flags, &oplock, &fid,
1040 xid, &data);
233839b1
PS
1041 if (rc) {
1042 cifs_del_pending_open(&open);
7e12eddb 1043 goto out;
233839b1 1044 }
7e12eddb 1045 }
47c78b7f 1046
76894f3e 1047 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock, data.symlink_target);
fb1214e4 1048 if (cfile == NULL) {
b8c32dbb
PS
1049 if (server->ops->close)
1050 server->ops->close(xid, tcon, &fid);
233839b1 1051 cifs_del_pending_open(&open);
1da177e4
LT
1052 rc = -ENOMEM;
1053 goto out;
1054 }
1da177e4 1055
fb157ed2 1056#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
7e12eddb 1057 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
fb1214e4
PS
1058 /*
1059 * Time to set mode which we can not set earlier due to
1060 * problems creating new read-only files.
1061 */
7e12eddb
PS
1062 struct cifs_unix_set_info_args args = {
1063 .mode = inode->i_mode,
49418b2c
EB
1064 .uid = INVALID_UID, /* no change */
1065 .gid = INVALID_GID, /* no change */
7e12eddb
PS
1066 .ctime = NO_CHANGE_64,
1067 .atime = NO_CHANGE_64,
1068 .mtime = NO_CHANGE_64,
1069 .device = 0,
1070 };
fb1214e4
PS
1071 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
1072 cfile->pid);
1da177e4 1073 }
fb157ed2 1074#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1da177e4 1075
70431bfd
DH
1076use_cache:
1077 fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
1078 file->f_mode & FMODE_WRITE);
e9e62243
DH
1079 if (!(file->f_flags & O_DIRECT))
1080 goto out;
1081 if ((file->f_flags & (O_ACCMODE | O_APPEND)) == O_RDONLY)
1082 goto out;
1083 cifs_invalidate_cache(file_inode(file), FSCACHE_INVAL_DIO_WRITE);
70431bfd 1084
1da177e4 1085out:
f6a9bc33 1086 free_dentry_path(page);
6d5786a3 1087 free_xid(xid);
7ffec372 1088 cifs_put_tlink(tlink);
76894f3e 1089 cifs_free_open_info(&data);
1da177e4
LT
1090 return rc;
1091}
1092
fb157ed2 1093#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
f152fd5f 1094static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
fb157ed2 1095#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
f152fd5f 1096
2ae78ba8
PS
1097/*
1098 * Try to reacquire byte range locks that were released when session
f152fd5f 1099 * to server was lost.
2ae78ba8 1100 */
f152fd5f
PS
1101static int
1102cifs_relock_file(struct cifsFileInfo *cfile)
1da177e4 1103{
2b0143b5 1104 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
f152fd5f 1105 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1da177e4 1106 int rc = 0;
fb157ed2
SF
1107#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1108 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1109#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1da177e4 1110
560d3889 1111 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
f152fd5f 1112 if (cinode->can_cache_brlcks) {
689c3db4
PS
1113 /* can cache locks - no need to relock */
1114 up_read(&cinode->lock_sem);
f152fd5f
PS
1115 return rc;
1116 }
1117
fb157ed2 1118#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
f152fd5f
PS
1119 if (cap_unix(tcon->ses) &&
1120 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1121 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1122 rc = cifs_push_posix_locks(cfile);
1123 else
fb157ed2 1124#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
f152fd5f 1125 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1da177e4 1126
689c3db4 1127 up_read(&cinode->lock_sem);
1da177e4
LT
1128 return rc;
1129}
1130
2ae78ba8
PS
1131static int
1132cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
1da177e4
LT
1133{
1134 int rc = -EACCES;
6d5786a3 1135 unsigned int xid;
590a3fe0 1136 __u32 oplock;
1da177e4 1137 struct cifs_sb_info *cifs_sb;
96daf2b0 1138 struct cifs_tcon *tcon;
2ae78ba8
PS
1139 struct TCP_Server_Info *server;
1140 struct cifsInodeInfo *cinode;
fb8c4b14 1141 struct inode *inode;
f6a9bc33
AV
1142 void *page;
1143 const char *full_path;
2ae78ba8 1144 int desired_access;
1da177e4 1145 int disposition = FILE_OPEN;
3d3ea8e6 1146 int create_options = CREATE_NOT_DIR;
226730b4 1147 struct cifs_open_parms oparms;
e9e62243 1148 int rdwr_for_fscache = 0;
1da177e4 1149
6d5786a3 1150 xid = get_xid();
2ae78ba8
PS
1151 mutex_lock(&cfile->fh_mutex);
1152 if (!cfile->invalidHandle) {
1153 mutex_unlock(&cfile->fh_mutex);
6d5786a3 1154 free_xid(xid);
f6a9bc33 1155 return 0;
1da177e4
LT
1156 }
1157
2b0143b5 1158 inode = d_inode(cfile->dentry);
1da177e4 1159 cifs_sb = CIFS_SB(inode->i_sb);
2ae78ba8
PS
1160 tcon = tlink_tcon(cfile->tlink);
1161 server = tcon->ses->server;
1162
1163 /*
1164 * Can not grab rename sem here because various ops, including those
1165 * that already have the rename sem can end up causing writepage to get
1166 * called and if the server was down that means we end up here, and we
1167 * can never tell if the caller already has the rename_sem.
1168 */
f6a9bc33
AV
1169 page = alloc_dentry_path();
1170 full_path = build_path_from_dentry(cfile->dentry, page);
1171 if (IS_ERR(full_path)) {
2ae78ba8 1172 mutex_unlock(&cfile->fh_mutex);
f6a9bc33 1173 free_dentry_path(page);
6d5786a3 1174 free_xid(xid);
f6a9bc33 1175 return PTR_ERR(full_path);
1da177e4
LT
1176 }
1177
f96637be
JP
1178 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
1179 inode, cfile->f_flags, full_path);
1da177e4 1180
10b9b98e 1181 if (tcon->ses->server->oplocks)
1da177e4
LT
1182 oplock = REQ_OPLOCK;
1183 else
4b18f2a9 1184 oplock = 0;
1da177e4 1185
fb157ed2 1186#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
29e20f9c 1187 if (tcon->unix_ext && cap_unix(tcon->ses) &&
7fc8f4e9 1188 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
29e20f9c 1189 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
608712fe
JL
1190 /*
1191 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
1192 * original open. Must mask them off for a reopen.
1193 */
2ae78ba8 1194 unsigned int oflags = cfile->f_flags &
15886177 1195 ~(O_CREAT | O_EXCL | O_TRUNC);
608712fe 1196
2422f676 1197 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
8401e936 1198 cifs_sb->ctx->file_mode /* ignored */,
9cbc0b73 1199 oflags, &oplock, &cfile->fid.netfid, xid);
7fc8f4e9 1200 if (rc == 0) {
f96637be 1201 cifs_dbg(FYI, "posix reopen succeeded\n");
fe090e4e 1202 oparms.reconnect = true;
7fc8f4e9
SF
1203 goto reopen_success;
1204 }
2ae78ba8
PS
1205 /*
1206 * fallthrough to retry open the old way on errors, especially
1207 * in the reconnect path it is important to retry hard
1208 */
7fc8f4e9 1209 }
fb157ed2 1210#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
7fc8f4e9 1211
e9e62243
DH
1212 /* If we're caching, we need to be able to fill in around partial writes. */
1213 if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
1214 rdwr_for_fscache = 1;
1215
1216 desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
7fc8f4e9 1217
44805b0e
PS
1218 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
1219 if (cfile->f_flags & O_SYNC)
1220 create_options |= CREATE_WRITE_THROUGH;
1221
1222 if (cfile->f_flags & O_DIRECT)
1223 create_options |= CREATE_NO_BUFFER;
1224
b8c32dbb 1225 if (server->ops->get_lease_key)
9cbc0b73 1226 server->ops->get_lease_key(inode, &cfile->fid);
b8c32dbb 1227
e9e62243 1228retry_open:
de036dca
VL
1229 oparms = (struct cifs_open_parms) {
1230 .tcon = tcon,
1231 .cifs_sb = cifs_sb,
1232 .desired_access = desired_access,
1233 .create_options = cifs_create_options(cifs_sb, create_options),
1234 .disposition = disposition,
1235 .path = full_path,
1236 .fid = &cfile->fid,
1237 .reconnect = true,
1238 };
226730b4 1239
2ae78ba8
PS
1240 /*
1241 * Can not refresh inode by passing in file_info buf to be returned by
d81b8a40 1242 * ops->open and then calling get_inode_info with returned buf since
2ae78ba8
PS
1243 * file might have write behind data that needs to be flushed and server
1244 * version of file size can be stale. If we knew for sure that inode was
1245 * not dirty locally we could do this.
1246 */
226730b4 1247 rc = server->ops->open(xid, &oparms, &oplock, NULL);
b33fcf1c
PS
1248 if (rc == -ENOENT && oparms.reconnect == false) {
1249 /* durable handle timeout is expired - open the file again */
1250 rc = server->ops->open(xid, &oparms, &oplock, NULL);
1251 /* indicate that we need to relock the file */
1252 oparms.reconnect = true;
1253 }
e9e62243
DH
1254 if (rc == -EACCES && rdwr_for_fscache == 1) {
1255 desired_access = cifs_convert_flags(cfile->f_flags, 0);
1256 rdwr_for_fscache = 2;
1257 goto retry_open;
1258 }
b33fcf1c 1259
1da177e4 1260 if (rc) {
2ae78ba8 1261 mutex_unlock(&cfile->fh_mutex);
f96637be
JP
1262 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
1263 cifs_dbg(FYI, "oplock: %d\n", oplock);
15886177
JL
1264 goto reopen_error_exit;
1265 }
1266
e9e62243
DH
1267 if (rdwr_for_fscache == 2)
1268 cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
1269
fb157ed2 1270#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
7fc8f4e9 1271reopen_success:
fb157ed2 1272#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
2ae78ba8
PS
1273 cfile->invalidHandle = false;
1274 mutex_unlock(&cfile->fh_mutex);
1275 cinode = CIFS_I(inode);
15886177
JL
1276
1277 if (can_flush) {
1278 rc = filemap_write_and_wait(inode->i_mapping);
9a66396f
PS
1279 if (!is_interrupt_error(rc))
1280 mapping_set_error(inode->i_mapping, rc);
15886177 1281
102466f3
PA
1282 if (tcon->posix_extensions) {
1283 rc = smb311_posix_get_inode_info(&inode, full_path,
1284 NULL, inode->i_sb, xid);
1285 } else if (tcon->unix_ext) {
2ae78ba8
PS
1286 rc = cifs_get_inode_info_unix(&inode, full_path,
1287 inode->i_sb, xid);
102466f3 1288 } else {
2ae78ba8
PS
1289 rc = cifs_get_inode_info(&inode, full_path, NULL,
1290 inode->i_sb, xid, NULL);
102466f3 1291 }
2ae78ba8
PS
1292 }
1293 /*
1294 * Else we are writing out data to server already and could deadlock if
1295 * we tried to flush data, and since we do not know if we have data that
1296 * would invalidate the current end of file on the server we can not go
1297 * to the server to get the new inode info.
1298 */
1299
de740250
PS
1300 /*
1301 * If the server returned a read oplock and we have mandatory brlocks,
1302 * set oplock level to None.
1303 */
1304 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
1305 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1306 oplock = 0;
1307 }
1308
9cbc0b73
PS
1309 server->ops->set_fid(cfile, &cfile->fid, oplock);
1310 if (oparms.reconnect)
1311 cifs_relock_file(cfile);
15886177
JL
1312
1313reopen_error_exit:
f6a9bc33 1314 free_dentry_path(page);
6d5786a3 1315 free_xid(xid);
1da177e4
LT
1316 return rc;
1317}
1318
c3f207ab
RS
1319void smb2_deferred_work_close(struct work_struct *work)
1320{
1321 struct cifsFileInfo *cfile = container_of(work,
1322 struct cifsFileInfo, deferred.work);
1323
1324 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1325 cifs_del_deferred_close(cfile);
860b69a9 1326 cfile->deferred_close_scheduled = false;
c3f207ab
RS
1327 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
1328 _cifsFileInfo_put(cfile, true, false);
1329}
1330
dc528770
BS
1331static bool
1332smb2_can_defer_close(struct inode *inode, struct cifs_deferred_close *dclose)
1333{
1334 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1335 struct cifsInodeInfo *cinode = CIFS_I(inode);
1336
1337 return (cifs_sb->ctx->closetimeo && cinode->lease_granted && dclose &&
1338 (cinode->oplock == CIFS_CACHE_RHW_FLG ||
1339 cinode->oplock == CIFS_CACHE_RH_FLG) &&
1340 !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags));
1341
1342}
1343
1da177e4
LT
1344int cifs_close(struct inode *inode, struct file *file)
1345{
c3f207ab
RS
1346 struct cifsFileInfo *cfile;
1347 struct cifsInodeInfo *cinode = CIFS_I(inode);
1348 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1349 struct cifs_deferred_close *dclose;
1350
70431bfd
DH
1351 cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
1352
77970693 1353 if (file->private_data != NULL) {
c3f207ab 1354 cfile = file->private_data;
77970693 1355 file->private_data = NULL;
c3f207ab 1356 dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
dc528770
BS
1357 if ((cfile->status_file_deleted == false) &&
1358 (smb2_can_defer_close(inode, dclose))) {
4f222622 1359 if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
8f22ce70
JL
1360 inode_set_mtime_to_ts(inode,
1361 inode_set_ctime_current(inode));
18d04062 1362 }
c3f207ab
RS
1363 spin_lock(&cinode->deferred_lock);
1364 cifs_add_deferred_close(cfile, dclose);
860b69a9
RS
1365 if (cfile->deferred_close_scheduled &&
1366 delayed_work_pending(&cfile->deferred)) {
9687c85d
RS
1367 /*
1368 * If there is no pending work, mod_delayed_work queues new work.
1369 * So, Increase the ref count to avoid use-after-free.
1370 */
1371 if (!mod_delayed_work(deferredclose_wq,
5efdd912 1372 &cfile->deferred, cifs_sb->ctx->closetimeo))
9687c85d 1373 cifsFileInfo_get(cfile);
c3f207ab
RS
1374 } else {
1375 /* Deferred close for files */
1376 queue_delayed_work(deferredclose_wq,
5efdd912 1377 &cfile->deferred, cifs_sb->ctx->closetimeo);
860b69a9 1378 cfile->deferred_close_scheduled = true;
c3f207ab
RS
1379 spin_unlock(&cinode->deferred_lock);
1380 return 0;
1381 }
1382 spin_unlock(&cinode->deferred_lock);
1383 _cifsFileInfo_put(cfile, true, false);
1384 } else {
1385 _cifsFileInfo_put(cfile, true, false);
1386 kfree(dclose);
1387 }
77970693 1388 }
7ee1af76 1389
cdff08e7
SF
1390 /* return code from the ->release op is always ignored */
1391 return 0;
1da177e4
LT
1392}
1393
52ace1ef
SF
1394void
1395cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
1396{
9543c8ab 1397 struct cifsFileInfo *open_file, *tmp;
f2cca6a7
PS
1398 struct list_head tmp_list;
1399
96a988ff
PS
1400 if (!tcon->use_persistent || !tcon->need_reopen_files)
1401 return;
1402
1403 tcon->need_reopen_files = false;
1404
a0a3036b 1405 cifs_dbg(FYI, "Reopen persistent handles\n");
f2cca6a7 1406 INIT_LIST_HEAD(&tmp_list);
52ace1ef
SF
1407
1408 /* list all files open on tree connection, reopen resilient handles */
1409 spin_lock(&tcon->open_file_lock);
9543c8ab 1410 list_for_each_entry(open_file, &tcon->openFileList, tlist) {
f2cca6a7
PS
1411 if (!open_file->invalidHandle)
1412 continue;
1413 cifsFileInfo_get(open_file);
1414 list_add_tail(&open_file->rlist, &tmp_list);
52ace1ef
SF
1415 }
1416 spin_unlock(&tcon->open_file_lock);
f2cca6a7 1417
9543c8ab 1418 list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) {
96a988ff
PS
1419 if (cifs_reopen_file(open_file, false /* do not flush */))
1420 tcon->need_reopen_files = true;
f2cca6a7
PS
1421 list_del_init(&open_file->rlist);
1422 cifsFileInfo_put(open_file);
1423 }
52ace1ef
SF
1424}
1425
1da177e4
LT
1426int cifs_closedir(struct inode *inode, struct file *file)
1427{
1428 int rc = 0;
6d5786a3 1429 unsigned int xid;
4b4de76e 1430 struct cifsFileInfo *cfile = file->private_data;
92fc65a7
PS
1431 struct cifs_tcon *tcon;
1432 struct TCP_Server_Info *server;
1433 char *buf;
1da177e4 1434
f96637be 1435 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
1da177e4 1436
92fc65a7
PS
1437 if (cfile == NULL)
1438 return rc;
1439
6d5786a3 1440 xid = get_xid();
92fc65a7
PS
1441 tcon = tlink_tcon(cfile->tlink);
1442 server = tcon->ses->server;
1da177e4 1443
f96637be 1444 cifs_dbg(FYI, "Freeing private data in close dir\n");
3afca265 1445 spin_lock(&cfile->file_info_lock);
52755808 1446 if (server->ops->dir_needs_close(cfile)) {
92fc65a7 1447 cfile->invalidHandle = true;
3afca265 1448 spin_unlock(&cfile->file_info_lock);
92fc65a7
PS
1449 if (server->ops->close_dir)
1450 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
1451 else
1452 rc = -ENOSYS;
f96637be 1453 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
92fc65a7
PS
1454 /* not much we can do if it fails anyway, ignore rc */
1455 rc = 0;
1456 } else
3afca265 1457 spin_unlock(&cfile->file_info_lock);
92fc65a7
PS
1458
1459 buf = cfile->srch_inf.ntwrk_buf_start;
1460 if (buf) {
f96637be 1461 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
92fc65a7
PS
1462 cfile->srch_inf.ntwrk_buf_start = NULL;
1463 if (cfile->srch_inf.smallBuf)
1464 cifs_small_buf_release(buf);
1465 else
1466 cifs_buf_release(buf);
1da177e4 1467 }
92fc65a7
PS
1468
1469 cifs_put_tlink(cfile->tlink);
1470 kfree(file->private_data);
1471 file->private_data = NULL;
1da177e4 1472 /* BB can we lock the filestruct while this is going on? */
6d5786a3 1473 free_xid(xid);
1da177e4
LT
1474 return rc;
1475}
1476
85160e03 1477static struct cifsLockInfo *
9645759c 1478cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
7ee1af76 1479{
a88b4707 1480 struct cifsLockInfo *lock =
fb8c4b14 1481 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
a88b4707
PS
1482 if (!lock)
1483 return lock;
1484 lock->offset = offset;
1485 lock->length = length;
1486 lock->type = type;
a88b4707 1487 lock->pid = current->tgid;
9645759c 1488 lock->flags = flags;
a88b4707
PS
1489 INIT_LIST_HEAD(&lock->blist);
1490 init_waitqueue_head(&lock->block_q);
1491 return lock;
85160e03
PS
1492}
1493
f7ba7fe6 1494void
85160e03
PS
1495cifs_del_lock_waiters(struct cifsLockInfo *lock)
1496{
1497 struct cifsLockInfo *li, *tmp;
1498 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
1499 list_del_init(&li->blist);
1500 wake_up(&li->block_q);
1501 }
1502}
1503
081c0414
PS
1504#define CIFS_LOCK_OP 0
1505#define CIFS_READ_OP 1
1506#define CIFS_WRITE_OP 2
1507
1508/* @rw_check : 0 - no op, 1 - read, 2 - write */
85160e03 1509static bool
f45d3416 1510cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
9645759c
RS
1511 __u64 length, __u8 type, __u16 flags,
1512 struct cifsFileInfo *cfile,
081c0414 1513 struct cifsLockInfo **conf_lock, int rw_check)
85160e03 1514{
fbd35aca 1515 struct cifsLockInfo *li;
f45d3416 1516 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
106dc538 1517 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03 1518
f45d3416 1519 list_for_each_entry(li, &fdlocks->locks, llist) {
85160e03
PS
1520 if (offset + length <= li->offset ||
1521 offset >= li->offset + li->length)
1522 continue;
081c0414
PS
1523 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
1524 server->ops->compare_fids(cfile, cur_cfile)) {
1525 /* shared lock prevents write op through the same fid */
1526 if (!(li->type & server->vals->shared_lock_type) ||
1527 rw_check != CIFS_WRITE_OP)
1528 continue;
1529 }
f45d3416
PS
1530 if ((type & server->vals->shared_lock_type) &&
1531 ((server->ops->compare_fids(cfile, cur_cfile) &&
1532 current->tgid == li->pid) || type == li->type))
85160e03 1533 continue;
9645759c
RS
1534 if (rw_check == CIFS_LOCK_OP &&
1535 (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
1536 server->ops->compare_fids(cfile, cur_cfile))
1537 continue;
579f9053
PS
1538 if (conf_lock)
1539 *conf_lock = li;
f45d3416 1540 return true;
85160e03
PS
1541 }
1542 return false;
1543}
1544
579f9053 1545bool
55157dfb 1546cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
9645759c
RS
1547 __u8 type, __u16 flags,
1548 struct cifsLockInfo **conf_lock, int rw_check)
161ebf9f 1549{
fbd35aca 1550 bool rc = false;
f45d3416 1551 struct cifs_fid_locks *cur;
2b0143b5 1552 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
fbd35aca 1553
f45d3416
PS
1554 list_for_each_entry(cur, &cinode->llist, llist) {
1555 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
9645759c
RS
1556 flags, cfile, conf_lock,
1557 rw_check);
fbd35aca
PS
1558 if (rc)
1559 break;
1560 }
fbd35aca
PS
1561
1562 return rc;
161ebf9f
PS
1563}
1564
9a5101c8
PS
1565/*
1566 * Check if there is another lock that prevents us to set the lock (mandatory
1567 * style). If such a lock exists, update the flock structure with its
1568 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1569 * or leave it the same if we can't. Returns 0 if we don't need to request to
1570 * the server or 1 otherwise.
1571 */
85160e03 1572static int
fbd35aca
PS
1573cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
1574 __u8 type, struct file_lock *flock)
85160e03
PS
1575{
1576 int rc = 0;
1577 struct cifsLockInfo *conf_lock;
2b0143b5 1578 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
106dc538 1579 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
85160e03
PS
1580 bool exist;
1581
1b4b55a1 1582 down_read(&cinode->lock_sem);
85160e03 1583
55157dfb 1584 exist = cifs_find_lock_conflict(cfile, offset, length, type,
84e286c1 1585 flock->c.flc_flags, &conf_lock,
9645759c 1586 CIFS_LOCK_OP);
85160e03
PS
1587 if (exist) {
1588 flock->fl_start = conf_lock->offset;
1589 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
84e286c1 1590 flock->c.flc_pid = conf_lock->pid;
106dc538 1591 if (conf_lock->type & server->vals->shared_lock_type)
84e286c1 1592 flock->c.flc_type = F_RDLCK;
85160e03 1593 else
84e286c1 1594 flock->c.flc_type = F_WRLCK;
85160e03
PS
1595 } else if (!cinode->can_cache_brlcks)
1596 rc = 1;
1597 else
84e286c1 1598 flock->c.flc_type = F_UNLCK;
85160e03 1599
1b4b55a1 1600 up_read(&cinode->lock_sem);
85160e03
PS
1601 return rc;
1602}
1603
161ebf9f 1604static void
fbd35aca 1605cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
85160e03 1606{
2b0143b5 1607 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
d46b0da7 1608 cifs_down_write(&cinode->lock_sem);
f45d3416 1609 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 1610 up_write(&cinode->lock_sem);
7ee1af76
JA
1611}
1612
9a5101c8
PS
1613/*
1614 * Set the byte-range lock (mandatory style). Returns:
1615 * 1) 0, if we set the lock and don't need to request to the server;
1616 * 2) 1, if no locks prevent us but we need to request to the server;
413d6100 1617 * 3) -EACCES, if there is a lock that prevents us and wait is false.
9a5101c8 1618 */
85160e03 1619static int
fbd35aca 1620cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
161ebf9f 1621 bool wait)
85160e03 1622{
161ebf9f 1623 struct cifsLockInfo *conf_lock;
2b0143b5 1624 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
85160e03
PS
1625 bool exist;
1626 int rc = 0;
1627
85160e03
PS
1628try_again:
1629 exist = false;
d46b0da7 1630 cifs_down_write(&cinode->lock_sem);
85160e03 1631
55157dfb 1632 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
9645759c
RS
1633 lock->type, lock->flags, &conf_lock,
1634 CIFS_LOCK_OP);
85160e03 1635 if (!exist && cinode->can_cache_brlcks) {
f45d3416 1636 list_add_tail(&lock->llist, &cfile->llist->locks);
1b4b55a1 1637 up_write(&cinode->lock_sem);
85160e03
PS
1638 return rc;
1639 }
1640
1641 if (!exist)
1642 rc = 1;
1643 else if (!wait)
1644 rc = -EACCES;
1645 else {
1646 list_add_tail(&lock->blist, &conf_lock->blist);
1b4b55a1 1647 up_write(&cinode->lock_sem);
85160e03
PS
1648 rc = wait_event_interruptible(lock->block_q,
1649 (lock->blist.prev == &lock->blist) &&
1650 (lock->blist.next == &lock->blist));
1651 if (!rc)
1652 goto try_again;
d46b0da7 1653 cifs_down_write(&cinode->lock_sem);
a88b4707 1654 list_del_init(&lock->blist);
85160e03
PS
1655 }
1656
1b4b55a1 1657 up_write(&cinode->lock_sem);
85160e03
PS
1658 return rc;
1659}
1660
fb157ed2 1661#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
9a5101c8
PS
1662/*
1663 * Check if there is another lock that prevents us to set the lock (posix
1664 * style). If such a lock exists, update the flock structure with its
1665 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1666 * or leave it the same if we can't. Returns 0 if we don't need to request to
1667 * the server or 1 otherwise.
1668 */
85160e03 1669static int
4f6bcec9
PS
1670cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1671{
1672 int rc = 0;
496ad9aa 1673 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
84e286c1 1674 unsigned char saved_type = flock->c.flc_type;
4f6bcec9 1675
84e286c1 1676 if ((flock->c.flc_flags & FL_POSIX) == 0)
50792760
PS
1677 return 1;
1678
1b4b55a1 1679 down_read(&cinode->lock_sem);
4f6bcec9
PS
1680 posix_test_lock(file, flock);
1681
2cd11429 1682 if (lock_is_unlock(flock) && !cinode->can_cache_brlcks) {
84e286c1 1683 flock->c.flc_type = saved_type;
4f6bcec9
PS
1684 rc = 1;
1685 }
1686
1b4b55a1 1687 up_read(&cinode->lock_sem);
4f6bcec9
PS
1688 return rc;
1689}
1690
9a5101c8
PS
1691/*
1692 * Set the byte-range lock (posix style). Returns:
2e98c018 1693 * 1) <0, if the error occurs while setting the lock;
1694 * 2) 0, if we set the lock and don't need to request to the server;
1695 * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock;
1696 * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
9a5101c8 1697 */
4f6bcec9
PS
1698static int
1699cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1700{
496ad9aa 1701 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
2e98c018 1702 int rc = FILE_LOCK_DEFERRED + 1;
50792760 1703
84e286c1 1704 if ((flock->c.flc_flags & FL_POSIX) == 0)
50792760 1705 return rc;
4f6bcec9 1706
d46b0da7 1707 cifs_down_write(&cinode->lock_sem);
4f6bcec9 1708 if (!cinode->can_cache_brlcks) {
1b4b55a1 1709 up_write(&cinode->lock_sem);
50792760 1710 return rc;
4f6bcec9 1711 }
66189be7
PS
1712
1713 rc = posix_lock_file(file, flock, NULL);
1b4b55a1 1714 up_write(&cinode->lock_sem);
9ebb389d 1715 return rc;
4f6bcec9
PS
1716}
1717
d39a4f71 1718int
4f6bcec9 1719cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
85160e03 1720{
6d5786a3
PS
1721 unsigned int xid;
1722 int rc = 0, stored_rc;
85160e03
PS
1723 struct cifsLockInfo *li, *tmp;
1724 struct cifs_tcon *tcon;
0013fb4c 1725 unsigned int num, max_num, max_buf;
32b9aaf1 1726 LOCKING_ANDX_RANGE *buf, *cur;
4d61eda8
CIK
1727 static const int types[] = {
1728 LOCKING_ANDX_LARGE_FILES,
1729 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
1730 };
32b9aaf1 1731 int i;
85160e03 1732
6d5786a3 1733 xid = get_xid();
85160e03
PS
1734 tcon = tlink_tcon(cfile->tlink);
1735
0013fb4c
PS
1736 /*
1737 * Accessing maxBuf is racy with cifs_reconnect - need to store value
b9a74cde 1738 * and check it before using.
0013fb4c
PS
1739 */
1740 max_buf = tcon->ses->server->maxBuf;
b9a74cde 1741 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
6d5786a3 1742 free_xid(xid);
0013fb4c
PS
1743 return -EINVAL;
1744 }
1745
92a8109e
RL
1746 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1747 PAGE_SIZE);
1748 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1749 PAGE_SIZE);
0013fb4c
PS
1750 max_num = (max_buf - sizeof(struct smb_hdr)) /
1751 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 1752 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
32b9aaf1 1753 if (!buf) {
6d5786a3 1754 free_xid(xid);
e2f2886a 1755 return -ENOMEM;
32b9aaf1
PS
1756 }
1757
1758 for (i = 0; i < 2; i++) {
1759 cur = buf;
1760 num = 0;
f45d3416 1761 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
32b9aaf1
PS
1762 if (li->type != types[i])
1763 continue;
1764 cur->Pid = cpu_to_le16(li->pid);
1765 cur->LengthLow = cpu_to_le32((u32)li->length);
1766 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1767 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1768 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1769 if (++num == max_num) {
4b4de76e
PS
1770 stored_rc = cifs_lockv(xid, tcon,
1771 cfile->fid.netfid,
04a6aa8a
PS
1772 (__u8)li->type, 0, num,
1773 buf);
32b9aaf1
PS
1774 if (stored_rc)
1775 rc = stored_rc;
1776 cur = buf;
1777 num = 0;
1778 } else
1779 cur++;
1780 }
1781
1782 if (num) {
4b4de76e 1783 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
04a6aa8a 1784 (__u8)types[i], 0, num, buf);
32b9aaf1
PS
1785 if (stored_rc)
1786 rc = stored_rc;
1787 }
85160e03
PS
1788 }
1789
32b9aaf1 1790 kfree(buf);
6d5786a3 1791 free_xid(xid);
85160e03
PS
1792 return rc;
1793}
1794
3d22462a
JL
1795static __u32
1796hash_lockowner(fl_owner_t owner)
1797{
1798 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1799}
fb157ed2 1800#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
3d22462a 1801
d5751469
PS
1802struct lock_to_push {
1803 struct list_head llist;
1804 __u64 offset;
1805 __u64 length;
1806 __u32 pid;
1807 __u16 netfid;
1808 __u8 type;
1809};
1810
fb157ed2 1811#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
4f6bcec9 1812static int
b8db928b 1813cifs_push_posix_locks(struct cifsFileInfo *cfile)
4f6bcec9 1814{
2b0143b5 1815 struct inode *inode = d_inode(cfile->dentry);
4f6bcec9 1816 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
bd61e0a9 1817 struct file_lock *flock;
a1fde8ee 1818 struct file_lock_context *flctx = locks_inode_context(inode);
e084c1bd 1819 unsigned int count = 0, i;
4f6bcec9 1820 int rc = 0, xid, type;
d5751469
PS
1821 struct list_head locks_to_send, *el;
1822 struct lock_to_push *lck, *tmp;
4f6bcec9 1823 __u64 length;
4f6bcec9 1824
6d5786a3 1825 xid = get_xid();
4f6bcec9 1826
bd61e0a9
JL
1827 if (!flctx)
1828 goto out;
d5751469 1829
e084c1bd
JL
1830 spin_lock(&flctx->flc_lock);
1831 list_for_each(el, &flctx->flc_posix) {
1832 count++;
1833 }
1834 spin_unlock(&flctx->flc_lock);
1835
4f6bcec9
PS
1836 INIT_LIST_HEAD(&locks_to_send);
1837
d5751469 1838 /*
e084c1bd
JL
1839 * Allocating count locks is enough because no FL_POSIX locks can be
1840 * added to the list while we are holding cinode->lock_sem that
ce85852b 1841 * protects locking operations of this inode.
d5751469 1842 */
e084c1bd 1843 for (i = 0; i < count; i++) {
d5751469
PS
1844 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1845 if (!lck) {
1846 rc = -ENOMEM;
1847 goto err_out;
1848 }
1849 list_add_tail(&lck->llist, &locks_to_send);
1850 }
1851
d5751469 1852 el = locks_to_send.next;
6109c850 1853 spin_lock(&flctx->flc_lock);
2cd11429 1854 for_each_file_lock(flock, &flctx->flc_posix) {
84e286c1
JL
1855 unsigned char ftype = flock->c.flc_type;
1856
d5751469 1857 if (el == &locks_to_send) {
ce85852b
PS
1858 /*
1859 * The list ended. We don't have enough allocated
1860 * structures - something is really wrong.
1861 */
f96637be 1862 cifs_dbg(VFS, "Can't push all brlocks!\n");
d5751469
PS
1863 break;
1864 }
d80c6984 1865 length = cifs_flock_len(flock);
84e286c1 1866 if (ftype == F_RDLCK || ftype == F_SHLCK)
4f6bcec9
PS
1867 type = CIFS_RDLCK;
1868 else
1869 type = CIFS_WRLCK;
d5751469 1870 lck = list_entry(el, struct lock_to_push, llist);
84e286c1 1871 lck->pid = hash_lockowner(flock->c.flc_owner);
4b4de76e 1872 lck->netfid = cfile->fid.netfid;
d5751469
PS
1873 lck->length = length;
1874 lck->type = type;
1875 lck->offset = flock->fl_start;
4f6bcec9 1876 }
6109c850 1877 spin_unlock(&flctx->flc_lock);
4f6bcec9
PS
1878
1879 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
4f6bcec9
PS
1880 int stored_rc;
1881
4f6bcec9 1882 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
c5fd363d 1883 lck->offset, lck->length, NULL,
4f6bcec9
PS
1884 lck->type, 0);
1885 if (stored_rc)
1886 rc = stored_rc;
1887 list_del(&lck->llist);
1888 kfree(lck);
1889 }
1890
d5751469 1891out:
6d5786a3 1892 free_xid(xid);
4f6bcec9 1893 return rc;
d5751469
PS
1894err_out:
1895 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1896 list_del(&lck->llist);
1897 kfree(lck);
1898 }
1899 goto out;
4f6bcec9 1900}
fb157ed2 1901#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
4f6bcec9 1902
9ec3c882 1903static int
b8db928b 1904cifs_push_locks(struct cifsFileInfo *cfile)
9ec3c882 1905{
2b0143b5 1906 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
b8db928b 1907 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
9ec3c882 1908 int rc = 0;
fb157ed2
SF
1909#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
1910 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1911#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
9ec3c882
PS
1912
1913 /* we are going to update can_cache_brlcks here - need a write access */
d46b0da7 1914 cifs_down_write(&cinode->lock_sem);
9ec3c882
PS
1915 if (!cinode->can_cache_brlcks) {
1916 up_write(&cinode->lock_sem);
1917 return rc;
1918 }
4f6bcec9 1919
fb157ed2 1920#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
29e20f9c 1921 if (cap_unix(tcon->ses) &&
4f6bcec9
PS
1922 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1923 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
b8db928b
PS
1924 rc = cifs_push_posix_locks(cfile);
1925 else
fb157ed2 1926#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
b8db928b 1927 rc = tcon->ses->server->ops->push_mand_locks(cfile);
4f6bcec9 1928
b8db928b
PS
1929 cinode->can_cache_brlcks = false;
1930 up_write(&cinode->lock_sem);
1931 return rc;
4f6bcec9
PS
1932}
1933
03776f45 1934static void
04a6aa8a 1935cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
106dc538 1936 bool *wait_flag, struct TCP_Server_Info *server)
1da177e4 1937{
84e286c1 1938 if (flock->c.flc_flags & FL_POSIX)
f96637be 1939 cifs_dbg(FYI, "Posix\n");
84e286c1 1940 if (flock->c.flc_flags & FL_FLOCK)
f96637be 1941 cifs_dbg(FYI, "Flock\n");
84e286c1 1942 if (flock->c.flc_flags & FL_SLEEP) {
f96637be 1943 cifs_dbg(FYI, "Blocking lock\n");
03776f45 1944 *wait_flag = true;
1da177e4 1945 }
84e286c1 1946 if (flock->c.flc_flags & FL_ACCESS)
f96637be 1947 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
84e286c1 1948 if (flock->c.flc_flags & FL_LEASE)
f96637be 1949 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
84e286c1 1950 if (flock->c.flc_flags &
3d6d854a 1951 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
9645759c 1952 FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
84e286c1
JL
1953 cifs_dbg(FYI, "Unknown lock flags 0x%x\n",
1954 flock->c.flc_flags);
1da177e4 1955
106dc538 1956 *type = server->vals->large_lock_type;
2cd11429 1957 if (lock_is_write(flock)) {
f96637be 1958 cifs_dbg(FYI, "F_WRLCK\n");
106dc538 1959 *type |= server->vals->exclusive_lock_type;
03776f45 1960 *lock = 1;
2cd11429 1961 } else if (lock_is_unlock(flock)) {
f96637be 1962 cifs_dbg(FYI, "F_UNLCK\n");
106dc538 1963 *type |= server->vals->unlock_lock_type;
03776f45
PS
1964 *unlock = 1;
1965 /* Check if unlock includes more than one lock range */
2cd11429 1966 } else if (lock_is_read(flock)) {
f96637be 1967 cifs_dbg(FYI, "F_RDLCK\n");
106dc538 1968 *type |= server->vals->shared_lock_type;
03776f45 1969 *lock = 1;
84e286c1 1970 } else if (flock->c.flc_type == F_EXLCK) {
f96637be 1971 cifs_dbg(FYI, "F_EXLCK\n");
106dc538 1972 *type |= server->vals->exclusive_lock_type;
03776f45 1973 *lock = 1;
84e286c1 1974 } else if (flock->c.flc_type == F_SHLCK) {
f96637be 1975 cifs_dbg(FYI, "F_SHLCK\n");
106dc538 1976 *type |= server->vals->shared_lock_type;
03776f45 1977 *lock = 1;
1da177e4 1978 } else
f96637be 1979 cifs_dbg(FYI, "Unknown type of lock\n");
03776f45 1980}
1da177e4 1981
03776f45 1982static int
04a6aa8a 1983cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3 1984 bool wait_flag, bool posix_lck, unsigned int xid)
03776f45
PS
1985{
1986 int rc = 0;
d80c6984 1987 __u64 length = cifs_flock_len(flock);
4f6bcec9
PS
1988 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1989 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 1990 struct TCP_Server_Info *server = tcon->ses->server;
fb157ed2 1991#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
4b4de76e 1992 __u16 netfid = cfile->fid.netfid;
f05337c6 1993
03776f45
PS
1994 if (posix_lck) {
1995 int posix_lock_type;
4f6bcec9
PS
1996
1997 rc = cifs_posix_lock_test(file, flock);
1998 if (!rc)
1999 return rc;
2000
106dc538 2001 if (type & server->vals->shared_lock_type)
03776f45
PS
2002 posix_lock_type = CIFS_RDLCK;
2003 else
2004 posix_lock_type = CIFS_WRLCK;
3d22462a 2005 rc = CIFSSMBPosixLock(xid, tcon, netfid,
84e286c1 2006 hash_lockowner(flock->c.flc_owner),
c5fd363d 2007 flock->fl_start, length, flock,
4f6bcec9 2008 posix_lock_type, wait_flag);
03776f45
PS
2009 return rc;
2010 }
fb157ed2 2011#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
1da177e4 2012
fbd35aca 2013 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
85160e03
PS
2014 if (!rc)
2015 return rc;
2016
03776f45 2017 /* BB we could chain these into one lock request BB */
d39a4f71
PS
2018 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
2019 1, 0, false);
03776f45 2020 if (rc == 0) {
d39a4f71
PS
2021 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2022 type, 0, 1, false);
84e286c1 2023 flock->c.flc_type = F_UNLCK;
03776f45 2024 if (rc != 0)
f96637be
JP
2025 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2026 rc);
a88b4707 2027 return 0;
1da177e4 2028 }
7ee1af76 2029
106dc538 2030 if (type & server->vals->shared_lock_type) {
84e286c1 2031 flock->c.flc_type = F_WRLCK;
a88b4707 2032 return 0;
7ee1af76
JA
2033 }
2034
d39a4f71
PS
2035 type &= ~server->vals->exclusive_lock_type;
2036
2037 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2038 type | server->vals->shared_lock_type,
2039 1, 0, false);
03776f45 2040 if (rc == 0) {
d39a4f71
PS
2041 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2042 type | server->vals->shared_lock_type, 0, 1, false);
84e286c1 2043 flock->c.flc_type = F_RDLCK;
03776f45 2044 if (rc != 0)
f96637be
JP
2045 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
2046 rc);
03776f45 2047 } else
84e286c1 2048 flock->c.flc_type = F_WRLCK;
03776f45 2049
a88b4707 2050 return 0;
03776f45
PS
2051}
2052
f7ba7fe6 2053void
9ee305b7
PS
2054cifs_move_llist(struct list_head *source, struct list_head *dest)
2055{
2056 struct list_head *li, *tmp;
2057 list_for_each_safe(li, tmp, source)
2058 list_move(li, dest);
2059}
2060
f7ba7fe6 2061void
9ee305b7
PS
2062cifs_free_llist(struct list_head *llist)
2063{
2064 struct cifsLockInfo *li, *tmp;
2065 list_for_each_entry_safe(li, tmp, llist, llist) {
2066 cifs_del_lock_waiters(li);
2067 list_del(&li->llist);
2068 kfree(li);
2069 }
2070}
2071
fb157ed2 2072#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
d39a4f71 2073int
6d5786a3
PS
2074cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
2075 unsigned int xid)
9ee305b7
PS
2076{
2077 int rc = 0, stored_rc;
4d61eda8
CIK
2078 static const int types[] = {
2079 LOCKING_ANDX_LARGE_FILES,
2080 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES
2081 };
9ee305b7 2082 unsigned int i;
0013fb4c 2083 unsigned int max_num, num, max_buf;
9ee305b7
PS
2084 LOCKING_ANDX_RANGE *buf, *cur;
2085 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2b0143b5 2086 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
9ee305b7 2087 struct cifsLockInfo *li, *tmp;
d80c6984 2088 __u64 length = cifs_flock_len(flock);
9ee305b7
PS
2089 struct list_head tmp_llist;
2090
2091 INIT_LIST_HEAD(&tmp_llist);
2092
0013fb4c
PS
2093 /*
2094 * Accessing maxBuf is racy with cifs_reconnect - need to store value
b9a74cde 2095 * and check it before using.
0013fb4c
PS
2096 */
2097 max_buf = tcon->ses->server->maxBuf;
b9a74cde 2098 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
0013fb4c
PS
2099 return -EINVAL;
2100
92a8109e
RL
2101 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
2102 PAGE_SIZE);
2103 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
2104 PAGE_SIZE);
0013fb4c
PS
2105 max_num = (max_buf - sizeof(struct smb_hdr)) /
2106 sizeof(LOCKING_ANDX_RANGE);
4b99d39b 2107 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
9ee305b7
PS
2108 if (!buf)
2109 return -ENOMEM;
2110
d46b0da7 2111 cifs_down_write(&cinode->lock_sem);
9ee305b7
PS
2112 for (i = 0; i < 2; i++) {
2113 cur = buf;
2114 num = 0;
f45d3416 2115 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
9ee305b7
PS
2116 if (flock->fl_start > li->offset ||
2117 (flock->fl_start + length) <
2118 (li->offset + li->length))
2119 continue;
2120 if (current->tgid != li->pid)
2121 continue;
9ee305b7
PS
2122 if (types[i] != li->type)
2123 continue;
ea319d57 2124 if (cinode->can_cache_brlcks) {
9ee305b7
PS
2125 /*
2126 * We can cache brlock requests - simply remove
fbd35aca 2127 * a lock from the file's list.
9ee305b7
PS
2128 */
2129 list_del(&li->llist);
2130 cifs_del_lock_waiters(li);
2131 kfree(li);
ea319d57 2132 continue;
9ee305b7 2133 }
ea319d57
PS
2134 cur->Pid = cpu_to_le16(li->pid);
2135 cur->LengthLow = cpu_to_le32((u32)li->length);
2136 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
2137 cur->OffsetLow = cpu_to_le32((u32)li->offset);
2138 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
2139 /*
2140 * We need to save a lock here to let us add it again to
2141 * the file's list if the unlock range request fails on
2142 * the server.
2143 */
2144 list_move(&li->llist, &tmp_llist);
2145 if (++num == max_num) {
4b4de76e
PS
2146 stored_rc = cifs_lockv(xid, tcon,
2147 cfile->fid.netfid,
ea319d57
PS
2148 li->type, num, 0, buf);
2149 if (stored_rc) {
2150 /*
2151 * We failed on the unlock range
2152 * request - add all locks from the tmp
2153 * list to the head of the file's list.
2154 */
2155 cifs_move_llist(&tmp_llist,
f45d3416 2156 &cfile->llist->locks);
ea319d57
PS
2157 rc = stored_rc;
2158 } else
2159 /*
2160 * The unlock range request succeed -
2161 * free the tmp list.
2162 */
2163 cifs_free_llist(&tmp_llist);
2164 cur = buf;
2165 num = 0;
2166 } else
2167 cur++;
9ee305b7
PS
2168 }
2169 if (num) {
4b4de76e 2170 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
9ee305b7
PS
2171 types[i], num, 0, buf);
2172 if (stored_rc) {
f45d3416
PS
2173 cifs_move_llist(&tmp_llist,
2174 &cfile->llist->locks);
9ee305b7
PS
2175 rc = stored_rc;
2176 } else
2177 cifs_free_llist(&tmp_llist);
2178 }
2179 }
2180
1b4b55a1 2181 up_write(&cinode->lock_sem);
9ee305b7
PS
2182 kfree(buf);
2183 return rc;
2184}
fb157ed2 2185#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
9ee305b7 2186
03776f45 2187static int
f45d3416 2188cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
6d5786a3
PS
2189 bool wait_flag, bool posix_lck, int lock, int unlock,
2190 unsigned int xid)
03776f45
PS
2191{
2192 int rc = 0;
d80c6984 2193 __u64 length = cifs_flock_len(flock);
03776f45
PS
2194 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2195 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
106dc538 2196 struct TCP_Server_Info *server = tcon->ses->server;
2b0143b5 2197 struct inode *inode = d_inode(cfile->dentry);
03776f45 2198
fb157ed2 2199#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
03776f45 2200 if (posix_lck) {
08547b03 2201 int posix_lock_type;
4f6bcec9
PS
2202
2203 rc = cifs_posix_lock_set(file, flock);
2e98c018 2204 if (rc <= FILE_LOCK_DEFERRED)
4f6bcec9
PS
2205 return rc;
2206
106dc538 2207 if (type & server->vals->shared_lock_type)
08547b03
SF
2208 posix_lock_type = CIFS_RDLCK;
2209 else
2210 posix_lock_type = CIFS_WRLCK;
50c2f753 2211
03776f45 2212 if (unlock == 1)
beb84dc8 2213 posix_lock_type = CIFS_UNLCK;
7ee1af76 2214
f45d3416 2215 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
84e286c1 2216 hash_lockowner(flock->c.flc_owner),
3d22462a 2217 flock->fl_start, length,
f45d3416 2218 NULL, posix_lock_type, wait_flag);
03776f45
PS
2219 goto out;
2220 }
fb157ed2 2221#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
03776f45 2222 if (lock) {
161ebf9f
PS
2223 struct cifsLockInfo *lock;
2224
9645759c 2225 lock = cifs_lock_init(flock->fl_start, length, type,
84e286c1 2226 flock->c.flc_flags);
161ebf9f
PS
2227 if (!lock)
2228 return -ENOMEM;
2229
fbd35aca 2230 rc = cifs_lock_add_if(cfile, lock, wait_flag);
21cb2d90 2231 if (rc < 0) {
161ebf9f 2232 kfree(lock);
21cb2d90
PS
2233 return rc;
2234 }
2235 if (!rc)
85160e03
PS
2236 goto out;
2237
63b7d3a4
PS
2238 /*
2239 * Windows 7 server can delay breaking lease from read to None
2240 * if we set a byte-range lock on a file - break it explicitly
2241 * before sending the lock to the server to be sure the next
2242 * read won't conflict with non-overlapted locks due to
2243 * pagereading.
2244 */
18cceb6a
PS
2245 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
2246 CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 2247 cifs_zap_mapping(inode);
f96637be
JP
2248 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
2249 inode);
18cceb6a 2250 CIFS_I(inode)->oplock = 0;
63b7d3a4
PS
2251 }
2252
d39a4f71
PS
2253 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
2254 type, 1, 0, wait_flag);
161ebf9f
PS
2255 if (rc) {
2256 kfree(lock);
21cb2d90 2257 return rc;
03776f45 2258 }
161ebf9f 2259
fbd35aca 2260 cifs_lock_add(cfile, lock);
9ee305b7 2261 } else if (unlock)
d39a4f71 2262 rc = server->ops->mand_unlock_range(cfile, flock, xid);
03776f45 2263
03776f45 2264out:
84e286c1 2265 if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) {
bc31d0cd
AA
2266 /*
2267 * If this is a request to remove all locks because we
2268 * are closing the file, it doesn't matter if the
2269 * unlocking failed as both cifs.ko and the SMB server
2270 * remove the lock on file close
2271 */
2272 if (rc) {
2273 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
84e286c1 2274 if (!(flock->c.flc_flags & FL_CLOSE))
bc31d0cd
AA
2275 return rc;
2276 }
4f656367 2277 rc = locks_lock_file_wait(file, flock);
bc31d0cd 2278 }
03776f45
PS
2279 return rc;
2280}
2281
d0677992
SF
2282int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
2283{
2284 int rc, xid;
2285 int lock = 0, unlock = 0;
2286 bool wait_flag = false;
2287 bool posix_lck = false;
2288 struct cifs_sb_info *cifs_sb;
2289 struct cifs_tcon *tcon;
d0677992 2290 struct cifsFileInfo *cfile;
d0677992
SF
2291 __u32 type;
2292
d0677992
SF
2293 xid = get_xid();
2294
84e286c1 2295 if (!(fl->c.flc_flags & FL_FLOCK)) {
575e079c
ZX
2296 rc = -ENOLCK;
2297 free_xid(xid);
2298 return rc;
2299 }
d0677992
SF
2300
2301 cfile = (struct cifsFileInfo *)file->private_data;
2302 tcon = tlink_tcon(cfile->tlink);
2303
2304 cifs_read_flock(fl, &type, &lock, &unlock, &wait_flag,
2305 tcon->ses->server);
2306 cifs_sb = CIFS_FILE_SB(file);
d0677992
SF
2307
2308 if (cap_unix(tcon->ses) &&
2309 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2310 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2311 posix_lck = true;
2312
2313 if (!lock && !unlock) {
2314 /*
2315 * if no lock or unlock then nothing to do since we do not
2316 * know what it is
2317 */
575e079c 2318 rc = -EOPNOTSUPP;
d0677992 2319 free_xid(xid);
575e079c 2320 return rc;
d0677992
SF
2321 }
2322
2323 rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
2324 xid);
2325 free_xid(xid);
2326 return rc;
2327
2328
2329}
2330
03776f45
PS
2331int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
2332{
2333 int rc, xid;
2334 int lock = 0, unlock = 0;
2335 bool wait_flag = false;
2336 bool posix_lck = false;
2337 struct cifs_sb_info *cifs_sb;
2338 struct cifs_tcon *tcon;
03776f45 2339 struct cifsFileInfo *cfile;
04a6aa8a 2340 __u32 type;
03776f45
PS
2341
2342 rc = -EACCES;
6d5786a3 2343 xid = get_xid();
03776f45 2344
773891ff 2345 cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd,
84e286c1
JL
2346 flock->c.flc_flags, flock->c.flc_type,
2347 (long long)flock->fl_start,
773891ff 2348 (long long)flock->fl_end);
03776f45 2349
03776f45
PS
2350 cfile = (struct cifsFileInfo *)file->private_data;
2351 tcon = tlink_tcon(cfile->tlink);
106dc538
PS
2352
2353 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
2354 tcon->ses->server);
7119e220 2355 cifs_sb = CIFS_FILE_SB(file);
35866f3f 2356 set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
03776f45 2357
29e20f9c 2358 if (cap_unix(tcon->ses) &&
03776f45
PS
2359 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2360 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
2361 posix_lck = true;
2362 /*
2363 * BB add code here to normalize offset and length to account for
2364 * negative length which we can not accept over the wire.
2365 */
2366 if (IS_GETLK(cmd)) {
4f6bcec9 2367 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
6d5786a3 2368 free_xid(xid);
03776f45
PS
2369 return rc;
2370 }
2371
2372 if (!lock && !unlock) {
2373 /*
2374 * if no lock or unlock then nothing to do since we do not
2375 * know what it is
2376 */
6d5786a3 2377 free_xid(xid);
03776f45 2378 return -EOPNOTSUPP;
7ee1af76
JA
2379 }
2380
03776f45
PS
2381 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
2382 xid);
6d5786a3 2383 free_xid(xid);
1da177e4
LT
2384 return rc;
2385}
2386
3ee1a1fc
DH
2387void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
2388 bool was_async)
fbec9ab9 2389{
3ee1a1fc 2390 struct netfs_io_request *wreq = wdata->rreq;
61ea6b3a
DH
2391 struct netfs_inode *ictx = netfs_inode(wreq->inode);
2392 loff_t wrend;
fbec9ab9 2393
3ee1a1fc 2394 if (result > 0) {
61ea6b3a
DH
2395 wrend = wdata->subreq.start + wdata->subreq.transferred + result;
2396
2397 if (wrend > ictx->zero_point &&
2398 (wdata->rreq->origin == NETFS_UNBUFFERED_WRITE ||
2399 wdata->rreq->origin == NETFS_DIO_WRITE))
2400 ictx->zero_point = wrend;
2401 if (wrend > ictx->remote_i_size)
2402 netfs_resize_file(ictx, wrend, true);
3ee1a1fc
DH
2403 }
2404
2405 netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
fbec9ab9
JL
2406}
2407
6508d904
JL
2408struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
2409 bool fsuid_only)
630f3f0c
SF
2410{
2411 struct cifsFileInfo *open_file = NULL;
874c8ca1 2412 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
6508d904
JL
2413
2414 /* only filter by fsuid on multiuser mounts */
2415 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2416 fsuid_only = false;
630f3f0c 2417
cb248819 2418 spin_lock(&cifs_inode->open_file_lock);
630f3f0c
SF
2419 /* we could simply get the first_list_entry since write-only entries
2420 are always at the end of the list but since the first entry might
2421 have a close pending, we go through the whole list */
2422 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
fef59fd7 2423 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6508d904 2424 continue;
2e396b83 2425 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
860b69a9 2426 if ((!open_file->invalidHandle)) {
630f3f0c
SF
2427 /* found a good file */
2428 /* lock it so it will not be closed on us */
3afca265 2429 cifsFileInfo_get(open_file);
cb248819 2430 spin_unlock(&cifs_inode->open_file_lock);
630f3f0c
SF
2431 return open_file;
2432 } /* else might as well continue, and look for
2433 another, or simply have the caller reopen it
2434 again rather than trying to fix this handle */
2435 } else /* write only file */
2436 break; /* write only files are last so must be done */
2437 }
cb248819 2438 spin_unlock(&cifs_inode->open_file_lock);
630f3f0c
SF
2439 return NULL;
2440}
630f3f0c 2441
fe768d51
PS
2442/* Return -EBADF if no handle is found and general rc otherwise */
2443int
86f740f2 2444cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
fe768d51 2445 struct cifsFileInfo **ret_file)
6148a742 2446{
2c0c2a08 2447 struct cifsFileInfo *open_file, *inv_file = NULL;
d3892294 2448 struct cifs_sb_info *cifs_sb;
2846d386 2449 bool any_available = false;
fe768d51 2450 int rc = -EBADF;
2c0c2a08 2451 unsigned int refind = 0;
86f740f2
AA
2452 bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
2453 bool with_delete = flags & FIND_WR_WITH_DELETE;
fe768d51
PS
2454 *ret_file = NULL;
2455
2456 /*
2457 * Having a null inode here (because mapping->host was set to zero by
2458 * the VFS or MM) should not happen but we had reports of on oops (due
2459 * to it being zero) during stress testcases so we need to check for it
2460 */
60808233 2461
fb8c4b14 2462 if (cifs_inode == NULL) {
f96637be 2463 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
60808233 2464 dump_stack();
fe768d51 2465 return rc;
60808233
SF
2466 }
2467
874c8ca1 2468 cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb);
d3892294 2469
6508d904
JL
2470 /* only filter by fsuid on multiuser mounts */
2471 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
2472 fsuid_only = false;
2473
cb248819 2474 spin_lock(&cifs_inode->open_file_lock);
9b22b0b7 2475refind_writable:
2c0c2a08 2476 if (refind > MAX_REOPEN_ATT) {
cb248819 2477 spin_unlock(&cifs_inode->open_file_lock);
fe768d51 2478 return rc;
2c0c2a08 2479 }
6148a742 2480 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
6508d904
JL
2481 if (!any_available && open_file->pid != current->tgid)
2482 continue;
fef59fd7 2483 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
6148a742 2484 continue;
86f740f2
AA
2485 if (with_delete && !(open_file->fid.access & DELETE))
2486 continue;
2e396b83 2487 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
9b22b0b7
SF
2488 if (!open_file->invalidHandle) {
2489 /* found a good writable file */
3afca265 2490 cifsFileInfo_get(open_file);
cb248819 2491 spin_unlock(&cifs_inode->open_file_lock);
fe768d51
PS
2492 *ret_file = open_file;
2493 return 0;
2c0c2a08
SP
2494 } else {
2495 if (!inv_file)
2496 inv_file = open_file;
9b22b0b7 2497 }
6148a742
SF
2498 }
2499 }
2846d386
JL
2500 /* couldn't find useable FH with same pid, try any available */
2501 if (!any_available) {
2502 any_available = true;
2503 goto refind_writable;
2504 }
2c0c2a08
SP
2505
2506 if (inv_file) {
2507 any_available = false;
3afca265 2508 cifsFileInfo_get(inv_file);
2c0c2a08
SP
2509 }
2510
cb248819 2511 spin_unlock(&cifs_inode->open_file_lock);
2c0c2a08
SP
2512
2513 if (inv_file) {
2514 rc = cifs_reopen_file(inv_file, false);
fe768d51
PS
2515 if (!rc) {
2516 *ret_file = inv_file;
2517 return 0;
2c0c2a08 2518 }
fe768d51 2519
487317c9 2520 spin_lock(&cifs_inode->open_file_lock);
fe768d51 2521 list_move_tail(&inv_file->flist, &cifs_inode->openFileList);
487317c9 2522 spin_unlock(&cifs_inode->open_file_lock);
fe768d51
PS
2523 cifsFileInfo_put(inv_file);
2524 ++refind;
2525 inv_file = NULL;
cb248819 2526 spin_lock(&cifs_inode->open_file_lock);
fe768d51 2527 goto refind_writable;
2c0c2a08
SP
2528 }
2529
fe768d51
PS
2530 return rc;
2531}
2532
2533struct cifsFileInfo *
86f740f2 2534find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
fe768d51
PS
2535{
2536 struct cifsFileInfo *cfile;
2537 int rc;
2538
86f740f2 2539 rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
fe768d51 2540 if (rc)
a0a3036b 2541 cifs_dbg(FYI, "Couldn't find writable handle rc=%d\n", rc);
fe768d51
PS
2542
2543 return cfile;
6148a742
SF
2544}
2545
8de9e86c
RS
2546int
2547cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
86f740f2 2548 int flags,
8de9e86c
RS
2549 struct cifsFileInfo **ret_file)
2550{
8de9e86c 2551 struct cifsFileInfo *cfile;
f6a9bc33 2552 void *page = alloc_dentry_path();
8de9e86c
RS
2553
2554 *ret_file = NULL;
2555
2556 spin_lock(&tcon->open_file_lock);
f6a9bc33
AV
2557 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2558 struct cifsInodeInfo *cinode;
2559 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2560 if (IS_ERR(full_path)) {
8de9e86c 2561 spin_unlock(&tcon->open_file_lock);
f6a9bc33
AV
2562 free_dentry_path(page);
2563 return PTR_ERR(full_path);
8de9e86c 2564 }
f6a9bc33 2565 if (strcmp(full_path, name))
8de9e86c 2566 continue;
8de9e86c 2567
8de9e86c
RS
2568 cinode = CIFS_I(d_inode(cfile->dentry));
2569 spin_unlock(&tcon->open_file_lock);
f6a9bc33 2570 free_dentry_path(page);
86f740f2 2571 return cifs_get_writable_file(cinode, flags, ret_file);
8de9e86c
RS
2572 }
2573
2574 spin_unlock(&tcon->open_file_lock);
f6a9bc33 2575 free_dentry_path(page);
8de9e86c
RS
2576 return -ENOENT;
2577}
2578
496902dc
RS
2579int
2580cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
2581 struct cifsFileInfo **ret_file)
2582{
496902dc 2583 struct cifsFileInfo *cfile;
f6a9bc33 2584 void *page = alloc_dentry_path();
496902dc
RS
2585
2586 *ret_file = NULL;
2587
2588 spin_lock(&tcon->open_file_lock);
f6a9bc33
AV
2589 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
2590 struct cifsInodeInfo *cinode;
2591 const char *full_path = build_path_from_dentry(cfile->dentry, page);
2592 if (IS_ERR(full_path)) {
496902dc 2593 spin_unlock(&tcon->open_file_lock);
f6a9bc33
AV
2594 free_dentry_path(page);
2595 return PTR_ERR(full_path);
496902dc 2596 }
f6a9bc33 2597 if (strcmp(full_path, name))
496902dc 2598 continue;
496902dc 2599
496902dc
RS
2600 cinode = CIFS_I(d_inode(cfile->dentry));
2601 spin_unlock(&tcon->open_file_lock);
f6a9bc33 2602 free_dentry_path(page);
496902dc
RS
2603 *ret_file = find_readable_file(cinode, 0);
2604 return *ret_file ? 0 : -ENOENT;
2605 }
2606
2607 spin_unlock(&tcon->open_file_lock);
f6a9bc33 2608 free_dentry_path(page);
496902dc
RS
2609 return -ENOENT;
2610}
2611
3ee1a1fc
DH
2612/*
2613 * Flush data on a strict file.
2614 */
02c24a82
JB
2615int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2616 int datasync)
1da177e4 2617{
6d5786a3 2618 unsigned int xid;
1da177e4 2619 int rc = 0;
96daf2b0 2620 struct cifs_tcon *tcon;
1d8c4c00 2621 struct TCP_Server_Info *server;
c21dfb69 2622 struct cifsFileInfo *smbfile = file->private_data;
496ad9aa 2623 struct inode *inode = file_inode(file);
8be7e6ba 2624 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1da177e4 2625
3b49c9a1 2626 rc = file_write_and_wait_range(file, start, end);
2391ca41
SF
2627 if (rc) {
2628 trace_cifs_fsync_err(inode->i_ino, rc);
02c24a82 2629 return rc;
2391ca41 2630 }
02c24a82 2631
6d5786a3 2632 xid = get_xid();
1da177e4 2633
35c265e0
AV
2634 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2635 file, datasync);
50c2f753 2636
18cceb6a 2637 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
4f73c7d3 2638 rc = cifs_zap_mapping(inode);
6feb9891 2639 if (rc) {
f96637be 2640 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
6feb9891
PS
2641 rc = 0; /* don't care about it in fsync */
2642 }
2643 }
eb4b756b 2644
8be7e6ba 2645 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2646 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2647 server = tcon->ses->server;
71e6864e 2648 if (server->ops->flush == NULL) {
1d8c4c00 2649 rc = -ENOSYS;
71e6864e
SF
2650 goto strict_fsync_exit;
2651 }
2652
2653 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2654 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2655 if (smbfile) {
2656 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2657 cifsFileInfo_put(smbfile);
2658 } else
2659 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2660 } else
2661 rc = server->ops->flush(xid, tcon, &smbfile->fid);
1d8c4c00 2662 }
8be7e6ba 2663
71e6864e 2664strict_fsync_exit:
6d5786a3 2665 free_xid(xid);
8be7e6ba
PS
2666 return rc;
2667}
2668
3ee1a1fc
DH
2669/*
2670 * Flush data on a non-strict data.
2671 */
02c24a82 2672int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
8be7e6ba 2673{
6d5786a3 2674 unsigned int xid;
8be7e6ba 2675 int rc = 0;
96daf2b0 2676 struct cifs_tcon *tcon;
1d8c4c00 2677 struct TCP_Server_Info *server;
8be7e6ba 2678 struct cifsFileInfo *smbfile = file->private_data;
71e6864e 2679 struct inode *inode = file_inode(file);
7119e220 2680 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
02c24a82 2681
3b49c9a1 2682 rc = file_write_and_wait_range(file, start, end);
f2bf09e9
SF
2683 if (rc) {
2684 trace_cifs_fsync_err(file_inode(file)->i_ino, rc);
02c24a82 2685 return rc;
f2bf09e9 2686 }
8be7e6ba 2687
6d5786a3 2688 xid = get_xid();
8be7e6ba 2689
35c265e0
AV
2690 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2691 file, datasync);
8be7e6ba
PS
2692
2693 tcon = tlink_tcon(smbfile->tlink);
1d8c4c00
PS
2694 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2695 server = tcon->ses->server;
71e6864e 2696 if (server->ops->flush == NULL) {
1d8c4c00 2697 rc = -ENOSYS;
71e6864e
SF
2698 goto fsync_exit;
2699 }
2700
2701 if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
2702 smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
2703 if (smbfile) {
2704 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2705 cifsFileInfo_put(smbfile);
2706 } else
2707 cifs_dbg(FYI, "ignore fsync for file not open for write\n");
2708 } else
2709 rc = server->ops->flush(xid, tcon, &smbfile->fid);
1d8c4c00 2710 }
b298f223 2711
71e6864e 2712fsync_exit:
6d5786a3 2713 free_xid(xid);
1da177e4
LT
2714 return rc;
2715}
2716
1da177e4
LT
2717/*
2718 * As file closes, flush all cached write data for this inode checking
2719 * for write behind errors.
2720 */
75e1fcc0 2721int cifs_flush(struct file *file, fl_owner_t id)
1da177e4 2722{
496ad9aa 2723 struct inode *inode = file_inode(file);
1da177e4
LT
2724 int rc = 0;
2725
eb4b756b 2726 if (file->f_mode & FMODE_WRITE)
d3f1322a 2727 rc = filemap_write_and_wait(inode->i_mapping);
50c2f753 2728
f96637be 2729 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2b058ace
C
2730 if (rc) {
2731 /* get more nuanced writeback errors */
2732 rc = filemap_check_wb_err(file->f_mapping, 0);
f2bf09e9 2733 trace_cifs_flush_err(inode->i_ino, rc);
2b058ace 2734 }
1da177e4
LT
2735 return rc;
2736}
2737
579f9053 2738static ssize_t
3dae8750 2739cifs_writev(struct kiocb *iocb, struct iov_iter *from)
72432ffc 2740{
579f9053
PS
2741 struct file *file = iocb->ki_filp;
2742 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2743 struct inode *inode = file->f_mapping->host;
2744 struct cifsInodeInfo *cinode = CIFS_I(inode);
2745 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
5f380c7f 2746 ssize_t rc;
72432ffc 2747
3ee1a1fc
DH
2748 rc = netfs_start_io_write(inode);
2749 if (rc < 0)
2750 return rc;
2751
579f9053
PS
2752 /*
2753 * We need to hold the sem to be sure nobody modifies lock list
2754 * with a brlock that prevents writing.
2755 */
2756 down_read(&cinode->lock_sem);
5f380c7f 2757
3309dd04
AV
2758 rc = generic_write_checks(iocb, from);
2759 if (rc <= 0)
5f380c7f
AV
2760 goto out;
2761
5f380c7f 2762 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
9645759c
RS
2763 server->vals->exclusive_lock_type, 0,
2764 NULL, CIFS_WRITE_OP))
3ee1a1fc 2765 rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
5f380c7f
AV
2766 else
2767 rc = -EACCES;
2768out:
966681c9 2769 up_read(&cinode->lock_sem);
3ee1a1fc 2770 netfs_end_io_write(inode);
e2592217
CH
2771 if (rc > 0)
2772 rc = generic_write_sync(iocb, rc);
579f9053
PS
2773 return rc;
2774}
2775
2776ssize_t
3dae8750 2777cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
579f9053 2778{
496ad9aa 2779 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
2780 struct cifsInodeInfo *cinode = CIFS_I(inode);
2781 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2782 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2783 iocb->ki_filp->private_data;
2784 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
88cf75aa 2785 ssize_t written;
ca8aa29c 2786
c11f1df5
SP
2787 written = cifs_get_writer(cinode);
2788 if (written)
2789 return written;
2790
18cceb6a 2791 if (CIFS_CACHE_WRITE(cinode)) {
88cf75aa 2792 if (cap_unix(tcon->ses) &&
3ee1a1fc
DH
2793 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
2794 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2795 written = netfs_file_write_iter(iocb, from);
c11f1df5
SP
2796 goto out;
2797 }
3dae8750 2798 written = cifs_writev(iocb, from);
c11f1df5 2799 goto out;
25078105 2800 }
25078105 2801 /*
ca8aa29c
PS
2802 * For non-oplocked files in strict cache mode we need to write the data
2803 * to the server exactly from the pos to pos+len-1 rather than flush all
2804 * affected pages because it may cause a error with mandatory locks on
2805 * these pages but not on the region from pos to ppos+len-1.
72432ffc 2806 */
3ee1a1fc 2807 written = netfs_file_write_iter(iocb, from);
6dfbd846 2808 if (CIFS_CACHE_READ(cinode)) {
88cf75aa 2809 /*
6dfbd846
PS
2810 * We have read level caching and we have just sent a write
2811 * request to the server thus making data in the cache stale.
2812 * Zap the cache and set oplock/lease level to NONE to avoid
2813 * reading stale data from the cache. All subsequent read
2814 * operations will read new data from the server.
88cf75aa 2815 */
4f73c7d3 2816 cifs_zap_mapping(inode);
6dfbd846 2817 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
f96637be 2818 inode);
18cceb6a 2819 cinode->oplock = 0;
88cf75aa 2820 }
c11f1df5
SP
2821out:
2822 cifs_put_writer(cinode);
88cf75aa 2823 return written;
72432ffc
PS
2824}
2825
1a5b4edd
DH
2826ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2827{
2828 ssize_t rc;
2829 struct inode *inode = file_inode(iocb->ki_filp);
2830
2831 if (iocb->ki_flags & IOCB_DIRECT)
3ee1a1fc 2832 return netfs_unbuffered_read_iter(iocb, iter);
1a5b4edd
DH
2833
2834 rc = cifs_revalidate_mapping(inode);
2835 if (rc)
2836 return rc;
2837
3ee1a1fc 2838 return netfs_file_read_iter(iocb, iter);
1a5b4edd
DH
2839}
2840
2841ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
2842{
2843 struct inode *inode = file_inode(iocb->ki_filp);
2844 struct cifsInodeInfo *cinode = CIFS_I(inode);
2845 ssize_t written;
2846 int rc;
2847
2848 if (iocb->ki_filp->f_flags & O_DIRECT) {
3ee1a1fc 2849 written = netfs_unbuffered_write_iter(iocb, from);
1a5b4edd
DH
2850 if (written > 0 && CIFS_CACHE_READ(cinode)) {
2851 cifs_zap_mapping(inode);
2852 cifs_dbg(FYI,
2853 "Set no oplock for inode=%p after a write operation\n",
2854 inode);
2855 cinode->oplock = 0;
2856 }
2857 return written;
2858 }
2859
2860 written = cifs_get_writer(cinode);
2861 if (written)
2862 return written;
2863
3ee1a1fc 2864 written = netfs_file_write_iter(iocb, from);
1a5b4edd 2865
3ee1a1fc
DH
2866 if (!CIFS_CACHE_WRITE(CIFS_I(inode))) {
2867 rc = filemap_fdatawrite(inode->i_mapping);
2868 if (rc)
2869 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
2870 rc, inode);
2871 }
1a5b4edd 2872
1a5b4edd
DH
2873 cifs_put_writer(cinode);
2874 return written;
2875}
2876
579f9053 2877ssize_t
e6a7bcb4 2878cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
a70307ee 2879{
496ad9aa 2880 struct inode *inode = file_inode(iocb->ki_filp);
579f9053
PS
2881 struct cifsInodeInfo *cinode = CIFS_I(inode);
2882 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2883 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2884 iocb->ki_filp->private_data;
2885 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2886 int rc = -EACCES;
a70307ee
PS
2887
2888 /*
2889 * In strict cache mode we need to read from the server all the time
2890 * if we don't have level II oplock because the server can delay mtime
2891 * change - so we can't make a decision about inode invalidating.
2892 * And we can also fail with pagereading if there are mandatory locks
2893 * on pages affected by this read but not on the region from pos to
2894 * pos+len-1.
2895 */
18cceb6a 2896 if (!CIFS_CACHE_READ(cinode))
3ee1a1fc 2897 return netfs_unbuffered_read_iter(iocb, to);
a70307ee 2898
579f9053
PS
2899 if (cap_unix(tcon->ses) &&
2900 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3ee1a1fc
DH
2901 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2902 if (iocb->ki_flags & IOCB_DIRECT)
2903 return netfs_unbuffered_read_iter(iocb, to);
2904 return netfs_buffered_read_iter(iocb, to);
2905 }
579f9053
PS
2906
2907 /*
2908 * We need to hold the sem to be sure nobody modifies lock list
2909 * with a brlock that prevents reading.
2910 */
14b1cd25
SF
2911 if (iocb->ki_flags & IOCB_DIRECT) {
2912 rc = netfs_start_io_direct(inode);
2913 if (rc < 0)
2914 goto out;
d2c5eb57 2915 rc = -EACCES;
14b1cd25
SF
2916 down_read(&cinode->lock_sem);
2917 if (!cifs_find_lock_conflict(
2918 cfile, iocb->ki_pos, iov_iter_count(to),
2919 tcon->ses->server->vals->shared_lock_type,
2920 0, NULL, CIFS_READ_OP))
2921 rc = netfs_unbuffered_read_iter_locked(iocb, to);
2922 up_read(&cinode->lock_sem);
2923 netfs_end_io_direct(inode);
2924 } else {
2925 rc = netfs_start_io_read(inode);
2926 if (rc < 0)
2927 goto out;
d2c5eb57 2928 rc = -EACCES;
14b1cd25
SF
2929 down_read(&cinode->lock_sem);
2930 if (!cifs_find_lock_conflict(
2931 cfile, iocb->ki_pos, iov_iter_count(to),
2932 tcon->ses->server->vals->shared_lock_type,
2933 0, NULL, CIFS_READ_OP))
2934 rc = filemap_read(iocb, to, 0);
2935 up_read(&cinode->lock_sem);
2936 netfs_end_io_read(inode);
3ee1a1fc 2937 }
14b1cd25 2938out:
579f9053 2939 return rc;
a70307ee 2940}
1da177e4 2941
c191bc07 2942static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf)
ca83ce3d 2943{
3ee1a1fc 2944 return netfs_page_mkwrite(vmf, NULL);
ca83ce3d
JL
2945}
2946
7cbea8dc 2947static const struct vm_operations_struct cifs_file_vm_ops = {
ca83ce3d 2948 .fault = filemap_fault,
f1820361 2949 .map_pages = filemap_map_pages,
ca83ce3d
JL
2950 .page_mkwrite = cifs_page_mkwrite,
2951};
2952
7a6a19b1
PS
2953int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
2954{
f04a703c 2955 int xid, rc = 0;
496ad9aa 2956 struct inode *inode = file_inode(file);
7a6a19b1 2957
6d5786a3 2958 xid = get_xid();
7a6a19b1 2959
f04a703c 2960 if (!CIFS_CACHE_READ(CIFS_I(inode)))
4f73c7d3 2961 rc = cifs_zap_mapping(inode);
f04a703c
MW
2962 if (!rc)
2963 rc = generic_file_mmap(file, vma);
2964 if (!rc)
ca83ce3d 2965 vma->vm_ops = &cifs_file_vm_ops;
f04a703c 2966
6d5786a3 2967 free_xid(xid);
7a6a19b1
PS
2968 return rc;
2969}
2970
1da177e4
LT
2971int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2972{
1da177e4
LT
2973 int rc, xid;
2974
6d5786a3 2975 xid = get_xid();
f04a703c 2976
abab095d 2977 rc = cifs_revalidate_file(file);
f04a703c 2978 if (rc)
f96637be
JP
2979 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
2980 rc);
f04a703c
MW
2981 if (!rc)
2982 rc = generic_file_mmap(file, vma);
2983 if (!rc)
ca83ce3d 2984 vma->vm_ops = &cifs_file_vm_ops;
f04a703c 2985
6d5786a3 2986 free_xid(xid);
1da177e4
LT
2987 return rc;
2988}
2989
a403a0a3
SF
2990static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
2991{
2992 struct cifsFileInfo *open_file;
2993
cb248819 2994 spin_lock(&cifs_inode->open_file_lock);
a403a0a3 2995 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
2e396b83 2996 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
cb248819 2997 spin_unlock(&cifs_inode->open_file_lock);
a403a0a3
SF
2998 return 1;
2999 }
3000 }
cb248819 3001 spin_unlock(&cifs_inode->open_file_lock);
a403a0a3
SF
3002 return 0;
3003}
3004
1da177e4
LT
3005/* We do not want to update the file size from server for inodes
3006 open for write - to avoid races with writepage extending
3007 the file - in the future we could consider allowing
fb8c4b14 3008 refreshing the inode only on increases in the file size
1da177e4
LT
3009 but this is tricky to do without racing with writebehind
3010 page caching in the current Linux kernel design */
e4b61f3b
BS
3011bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file,
3012 bool from_readdir)
1da177e4 3013{
a403a0a3 3014 if (!cifsInode)
4b18f2a9 3015 return true;
50c2f753 3016
e4b61f3b
BS
3017 if (is_inode_writable(cifsInode) ||
3018 ((cifsInode->oplock & CIFS_CACHE_RW_FLG) != 0 && from_readdir)) {
a403a0a3 3019 /* This inode is open for write at least once */
c32a0b68
SF
3020 struct cifs_sb_info *cifs_sb;
3021
874c8ca1 3022 cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb);
ad7a2926 3023 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
fb8c4b14 3024 /* since no page cache to corrupt on directio
c32a0b68 3025 we can change size safely */
4b18f2a9 3026 return true;
c32a0b68
SF
3027 }
3028
874c8ca1 3029 if (i_size_read(&cifsInode->netfs.inode) < end_of_file)
4b18f2a9 3030 return true;
7ba52631 3031
4b18f2a9 3032 return false;
23e7dd7d 3033 } else
4b18f2a9 3034 return true;
1da177e4
LT
3035}
3036
9b646972 3037void cifs_oplock_break(struct work_struct *work)
3bc303c2
JL
3038{
3039 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3040 oplock_break);
2b0143b5 3041 struct inode *inode = d_inode(cfile->dentry);
e8f5f849 3042 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3bc303c2 3043 struct cifsInodeInfo *cinode = CIFS_I(inode);
e8f5f849
SF
3044 struct cifs_tcon *tcon;
3045 struct TCP_Server_Info *server;
3046 struct tcon_link *tlink;
eb4b756b 3047 int rc = 0;
59a556ae
BS
3048 bool purge_cache = false, oplock_break_cancelled;
3049 __u64 persistent_fid, volatile_fid;
3050 __u16 net_fid;
3bc303c2 3051
c11f1df5 3052 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
74316201 3053 TASK_UNINTERRUPTIBLE);
c11f1df5 3054
e8f5f849
SF
3055 tlink = cifs_sb_tlink(cifs_sb);
3056 if (IS_ERR(tlink))
3057 goto out;
3058 tcon = tlink_tcon(tlink);
3059 server = tcon->ses->server;
3060
9bd45408
PS
3061 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3062 cfile->oplock_epoch, &purge_cache);
c11f1df5 3063
18cceb6a 3064 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
63b7d3a4 3065 cifs_has_mand_locks(cinode)) {
f96637be
JP
3066 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3067 inode);
18cceb6a 3068 cinode->oplock = 0;
63b7d3a4
PS
3069 }
3070
3bc303c2 3071 if (inode && S_ISREG(inode->i_mode)) {
18cceb6a 3072 if (CIFS_CACHE_READ(cinode))
8737c930 3073 break_lease(inode, O_RDONLY);
d54ff732 3074 else
8737c930 3075 break_lease(inode, O_WRONLY);
3bc303c2 3076 rc = filemap_fdatawrite(inode->i_mapping);
9bd45408 3077 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
eb4b756b
JL
3078 rc = filemap_fdatawait(inode->i_mapping);
3079 mapping_set_error(inode->i_mapping, rc);
4f73c7d3 3080 cifs_zap_mapping(inode);
3bc303c2 3081 }
f96637be 3082 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
9bd45408
PS
3083 if (CIFS_CACHE_WRITE(cinode))
3084 goto oplock_break_ack;
3bc303c2
JL
3085 }
3086
85160e03
PS
3087 rc = cifs_push_locks(cfile);
3088 if (rc)
f96637be 3089 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
85160e03 3090
9bd45408 3091oplock_break_ack:
d906be3f
BS
3092 /*
3093 * When oplock break is received and there are no active
3094 * file handles but cached, then schedule deferred close immediately.
3095 * So, new open will not use cached handle.
3096 */
d906be3f 3097
47592fa8 3098 if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
d906be3f 3099 cifs_close_deferred_file(cinode);
d906be3f 3100
59a556ae
BS
3101 persistent_fid = cfile->fid.persistent_fid;
3102 volatile_fid = cfile->fid.volatile_fid;
3103 net_fid = cfile->fid.netfid;
3104 oplock_break_cancelled = cfile->oplock_break_cancelled;
3105
3106 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
9e992755 3107 /*
da787d5b
BS
3108 * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
3109 * an acknowledgment to be sent when the file has already been closed.
9e992755 3110 */
da787d5b 3111 spin_lock(&cinode->open_file_lock);
e8f5f849
SF
3112 /* check list empty since can race with kill_sb calling tree disconnect */
3113 if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
da787d5b 3114 spin_unlock(&cinode->open_file_lock);
e8f5f849
SF
3115 rc = server->ops->oplock_response(tcon, persistent_fid,
3116 volatile_fid, net_fid, cinode);
da787d5b
BS
3117 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3118 } else
3119 spin_unlock(&cinode->open_file_lock);
9e31678f 3120
e8f5f849
SF
3121 cifs_put_tlink(tlink);
3122out:
c11f1df5 3123 cifs_done_oplock_break(cinode);
3bc303c2
JL
3124}
3125
4e8aea30
SF
3126static int cifs_swap_activate(struct swap_info_struct *sis,
3127 struct file *swap_file, sector_t *span)
3128{
3129 struct cifsFileInfo *cfile = swap_file->private_data;
3130 struct inode *inode = swap_file->f_mapping->host;
3131 unsigned long blocks;
3132 long long isize;
3133
3134 cifs_dbg(FYI, "swap activate\n");
3135
e1209d3a
N
3136 if (!swap_file->f_mapping->a_ops->swap_rw)
3137 /* Cannot support swap */
3138 return -EINVAL;
3139
4e8aea30
SF
3140 spin_lock(&inode->i_lock);
3141 blocks = inode->i_blocks;
3142 isize = inode->i_size;
3143 spin_unlock(&inode->i_lock);
3144 if (blocks*512 < isize) {
3145 pr_warn("swap activate: swapfile has holes\n");
3146 return -EINVAL;
3147 }
3148 *span = sis->pages;
3149
a0a3036b 3150 pr_warn_once("Swap support over SMB3 is experimental\n");
4e8aea30
SF
3151
3152 /*
3153 * TODO: consider adding ACL (or documenting how) to prevent other
3154 * users (on this or other systems) from reading it
3155 */
3156
3157
3158 /* TODO: add sk_set_memalloc(inet) or similar */
3159
3160 if (cfile)
3161 cfile->swapfile = true;
3162 /*
3163 * TODO: Since file already open, we can't open with DENY_ALL here
3164 * but we could add call to grab a byte range lock to prevent others
3165 * from reading or writing the file
3166 */
3167
4b60c0ff
N
3168 sis->flags |= SWP_FS_OPS;
3169 return add_swap_extent(sis, 0, sis->max, 0);
4e8aea30
SF
3170}
3171
3172static void cifs_swap_deactivate(struct file *file)
3173{
3174 struct cifsFileInfo *cfile = file->private_data;
3175
3176 cifs_dbg(FYI, "swap deactivate\n");
3177
3178 /* TODO: undo sk_set_memalloc(inet) will eventually be needed */
3179
3180 if (cfile)
3181 cfile->swapfile = false;
3182
3183 /* do we need to unpin (or unlock) the file */
3184}
dca69288 3185
16e00683
SF
3186/**
3187 * cifs_swap_rw - SMB3 address space operation for swap I/O
3188 * @iocb: target I/O control block
3189 * @iter: I/O buffer
3190 *
3191 * Perform IO to the swap-file. This is much like direct IO.
3192 */
3193static int cifs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
3194{
3195 ssize_t ret;
3196
16e00683
SF
3197 if (iov_iter_rw(iter) == READ)
3198 ret = netfs_unbuffered_read_iter_locked(iocb, iter);
3199 else
3200 ret = netfs_unbuffered_write_iter_locked(iocb, iter, NULL);
3201 if (ret < 0)
3202 return ret;
3203 return 0;
3204}
3205
f5e54d6e 3206const struct address_space_operations cifs_addr_ops = {
3ee1a1fc
DH
3207 .read_folio = netfs_read_folio,
3208 .readahead = netfs_readahead,
3209 .writepages = netfs_writepages,
3210 .dirty_folio = netfs_dirty_folio,
3211 .release_folio = netfs_release_folio,
3212 .direct_IO = noop_direct_IO,
3213 .invalidate_folio = netfs_invalidate_folio,
3214 .migrate_folio = filemap_migrate_folio,
4e8aea30 3215 /*
9381666e
CH
3216 * TODO: investigate and if useful we could add an is_dirty_writeback
3217 * helper if needed
4e8aea30 3218 */
3ee1a1fc 3219 .swap_activate = cifs_swap_activate,
4e8aea30 3220 .swap_deactivate = cifs_swap_deactivate,
16e00683 3221 .swap_rw = cifs_swap_rw,
1da177e4 3222};
273d81d6
DK
3223
3224/*
ce3bb0d2 3225 * cifs_readahead requires the server to support a buffer large enough to
273d81d6 3226 * contain the header plus one complete page of data. Otherwise, we need
ce3bb0d2 3227 * to leave cifs_readahead out of the address space operations.
273d81d6 3228 */
f5e54d6e 3229const struct address_space_operations cifs_addr_ops_smallbuf = {
3ee1a1fc
DH
3230 .read_folio = netfs_read_folio,
3231 .writepages = netfs_writepages,
3232 .dirty_folio = netfs_dirty_folio,
3233 .release_folio = netfs_release_folio,
3234 .invalidate_folio = netfs_invalidate_folio,
3235 .migrate_folio = filemap_migrate_folio,
273d81d6 3236};
This page took 2.378923 seconds and 4 git commands to generate.