1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2008
9 #include <linux/slab.h>
10 #include <linux/ctype.h>
11 #include <linux/mempool.h>
12 #include <linux/vmalloc.h>
15 #include "cifsproto.h"
16 #include "cifs_debug.h"
19 #include "cifs_unicode.h"
22 #ifdef CONFIG_CIFS_DFS_UPCALL
23 #include "dns_resolve.h"
24 #include "dfs_cache.h"
27 #include "fs_context.h"
28 #include "cached_dir.h"
30 extern mempool_t *cifs_sm_req_poolp;
31 extern mempool_t *cifs_req_poolp;
33 /* The xid serves as a useful identifier for each incoming vfs request,
34 in a similar way to the mid which is useful to track each sent smb,
35 and CurrentXid can also provide a running counter (although it
36 will eventually wrap past zero) of the total vfs operations handled
37 since the cifs fs was mounted */
44 spin_lock(&GlobalMid_Lock);
45 GlobalTotalActiveXid++;
47 /* keep high water mark for number of simultaneous ops in filesystem */
48 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
49 GlobalMaxActiveXid = GlobalTotalActiveXid;
50 if (GlobalTotalActiveXid > 65000)
51 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
52 xid = GlobalCurrentXid++;
53 spin_unlock(&GlobalMid_Lock);
58 _free_xid(unsigned int xid)
60 spin_lock(&GlobalMid_Lock);
61 /* if (GlobalTotalActiveXid == 0)
63 GlobalTotalActiveXid--;
64 spin_unlock(&GlobalMid_Lock);
70 struct cifs_ses *ret_buf;
72 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
74 atomic_inc(&sesInfoAllocCount);
75 spin_lock_init(&ret_buf->ses_lock);
76 ret_buf->ses_status = SES_NEW;
78 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
79 INIT_LIST_HEAD(&ret_buf->tcon_list);
80 mutex_init(&ret_buf->session_mutex);
81 spin_lock_init(&ret_buf->iface_lock);
82 INIT_LIST_HEAD(&ret_buf->iface_list);
83 spin_lock_init(&ret_buf->chan_lock);
89 sesInfoFree(struct cifs_ses *buf_to_free)
91 struct cifs_server_iface *iface = NULL, *niface = NULL;
93 if (buf_to_free == NULL) {
94 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
98 unload_nls(buf_to_free->local_nls);
99 atomic_dec(&sesInfoAllocCount);
100 kfree(buf_to_free->serverOS);
101 kfree(buf_to_free->serverDomain);
102 kfree(buf_to_free->serverNOS);
103 kfree_sensitive(buf_to_free->password);
104 kfree(buf_to_free->user_name);
105 kfree(buf_to_free->domainName);
106 kfree_sensitive(buf_to_free->auth_key.response);
107 spin_lock(&buf_to_free->iface_lock);
108 list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
110 kref_put(&iface->refcount, release_iface);
111 spin_unlock(&buf_to_free->iface_lock);
112 kfree_sensitive(buf_to_free);
118 struct cifs_tcon *ret_buf;
120 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
123 ret_buf->cfids = init_cached_dirs();
124 if (!ret_buf->cfids) {
129 atomic_inc(&tconInfoAllocCount);
130 ret_buf->status = TID_NEW;
132 spin_lock_init(&ret_buf->tc_lock);
133 INIT_LIST_HEAD(&ret_buf->openFileList);
134 INIT_LIST_HEAD(&ret_buf->tcon_list);
135 spin_lock_init(&ret_buf->open_file_lock);
136 spin_lock_init(&ret_buf->stat_lock);
137 atomic_set(&ret_buf->num_local_opens, 0);
138 atomic_set(&ret_buf->num_remote_opens, 0);
139 #ifdef CONFIG_CIFS_DFS_UPCALL
140 INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
147 tconInfoFree(struct cifs_tcon *tcon)
150 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
153 free_cached_dirs(tcon->cfids);
154 atomic_dec(&tconInfoAllocCount);
155 kfree(tcon->nativeFileSystem);
156 kfree_sensitive(tcon->password);
157 #ifdef CONFIG_CIFS_DFS_UPCALL
158 dfs_put_root_smb_sessions(&tcon->dfs_ses_list);
160 kfree(tcon->origin_fullpath);
167 struct smb_hdr *ret_buf = NULL;
169 * SMB2 header is bigger than CIFS one - no problems to clean some
170 * more bytes for CIFS.
172 size_t buf_size = sizeof(struct smb2_hdr);
175 * We could use negotiated size instead of max_msgsize -
176 * but it may be more efficient to always alloc same size
177 * albeit slightly larger than necessary and maxbuffersize
178 * defaults to this and can not be bigger.
180 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
182 /* clear the first few header bytes */
183 /* for most paths, more is cleared in header_assemble */
184 memset(ret_buf, 0, buf_size + 3);
185 atomic_inc(&buf_alloc_count);
186 #ifdef CONFIG_CIFS_STATS2
187 atomic_inc(&total_buf_alloc_count);
188 #endif /* CONFIG_CIFS_STATS2 */
194 cifs_buf_release(void *buf_to_free)
196 if (buf_to_free == NULL) {
197 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
200 mempool_free(buf_to_free, cifs_req_poolp);
202 atomic_dec(&buf_alloc_count);
207 cifs_small_buf_get(void)
209 struct smb_hdr *ret_buf = NULL;
211 /* We could use negotiated size instead of max_msgsize -
212 but it may be more efficient to always alloc same size
213 albeit slightly larger than necessary and maxbuffersize
214 defaults to this and can not be bigger */
215 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
216 /* No need to clear memory here, cleared in header assemble */
217 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
218 atomic_inc(&small_buf_alloc_count);
219 #ifdef CONFIG_CIFS_STATS2
220 atomic_inc(&total_small_buf_alloc_count);
221 #endif /* CONFIG_CIFS_STATS2 */
227 cifs_small_buf_release(void *buf_to_free)
230 if (buf_to_free == NULL) {
231 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
234 mempool_free(buf_to_free, cifs_sm_req_poolp);
236 atomic_dec(&small_buf_alloc_count);
241 free_rsp_buf(int resp_buftype, void *rsp)
243 if (resp_buftype == CIFS_SMALL_BUFFER)
244 cifs_small_buf_release(rsp);
245 else if (resp_buftype == CIFS_LARGE_BUFFER)
246 cifs_buf_release(rsp);
249 /* NB: MID can not be set if treeCon not passed in, in that
250 case it is responsbility of caller to set the mid */
252 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
253 const struct cifs_tcon *treeCon, int word_count
254 /* length of fixed section (word count) in two byte units */)
256 char *temp = (char *) buffer;
258 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
260 buffer->smb_buf_length = cpu_to_be32(
261 (2 * word_count) + sizeof(struct smb_hdr) -
262 4 /* RFC 1001 length field does not count */ +
263 2 /* for bcc field itself */) ;
265 buffer->Protocol[0] = 0xFF;
266 buffer->Protocol[1] = 'S';
267 buffer->Protocol[2] = 'M';
268 buffer->Protocol[3] = 'B';
269 buffer->Command = smb_command;
270 buffer->Flags = 0x00; /* case sensitive */
271 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
272 buffer->Pid = cpu_to_le16((__u16)current->tgid);
273 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
275 buffer->Tid = treeCon->tid;
277 if (treeCon->ses->capabilities & CAP_UNICODE)
278 buffer->Flags2 |= SMBFLG2_UNICODE;
279 if (treeCon->ses->capabilities & CAP_STATUS32)
280 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
282 /* Uid is not converted */
283 buffer->Uid = treeCon->ses->Suid;
284 if (treeCon->ses->server)
285 buffer->Mid = get_next_mid(treeCon->ses->server);
287 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
288 buffer->Flags2 |= SMBFLG2_DFS;
290 buffer->Flags |= SMBFLG_CASELESS;
291 if ((treeCon->ses) && (treeCon->ses->server))
292 if (treeCon->ses->server->sign)
293 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
296 /* endian conversion of flags is now done just before sending */
297 buffer->WordCount = (char) word_count;
302 check_smb_hdr(struct smb_hdr *smb)
304 /* does it have the right SMB "signature" ? */
305 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
306 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
307 *(unsigned int *)smb->Protocol);
311 /* if it's a response then accept */
312 if (smb->Flags & SMBFLG_RESPONSE)
315 /* only one valid case where server sends us request */
316 if (smb->Command == SMB_COM_LOCKING_ANDX)
319 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
325 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
327 struct smb_hdr *smb = (struct smb_hdr *)buf;
328 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
329 __u32 clc_len; /* calculated length */
330 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
333 /* is this frame too small to even get to a BCC? */
334 if (total_read < 2 + sizeof(struct smb_hdr)) {
335 if ((total_read >= sizeof(struct smb_hdr) - 1)
336 && (smb->Status.CifsError != 0)) {
337 /* it's an error return */
339 /* some error cases do not return wct and bcc */
341 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
342 (smb->WordCount == 0)) {
343 char *tmp = (char *)smb;
344 /* Need to work around a bug in two servers here */
345 /* First, check if the part of bcc they sent was zero */
346 if (tmp[sizeof(struct smb_hdr)] == 0) {
347 /* some servers return only half of bcc
348 * on simple responses (wct, bcc both zero)
349 * in particular have seen this on
350 * ulogoffX and FindClose. This leaves
351 * one byte of bcc potentially unitialized
353 /* zero rest of bcc */
354 tmp[sizeof(struct smb_hdr)+1] = 0;
357 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
359 cifs_dbg(VFS, "Length less than smb header size\n");
364 /* otherwise, there is enough to get to the BCC */
365 if (check_smb_hdr(smb))
367 clc_len = smbCalcSize(smb);
369 if (4 + rfclen != total_read) {
370 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
375 if (4 + rfclen != clc_len) {
376 __u16 mid = get_mid(smb);
377 /* check if bcc wrapped around for large read responses */
378 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
379 /* check if lengths match mod 64K */
380 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
381 return 0; /* bcc wrapped */
383 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
384 clc_len, 4 + rfclen, mid);
386 if (4 + rfclen < clc_len) {
387 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
390 } else if (rfclen > clc_len + 512) {
392 * Some servers (Windows XP in particular) send more
393 * data than the lengths in the SMB packet would
394 * indicate on certain calls (byte range locks and
395 * trans2 find first calls in particular). While the
396 * client can handle such a frame by ignoring the
397 * trailing data, we choose limit the amount of extra
400 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
409 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
411 struct smb_hdr *buf = (struct smb_hdr *)buffer;
412 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
413 struct TCP_Server_Info *pserver;
414 struct cifs_ses *ses;
415 struct cifs_tcon *tcon;
416 struct cifsInodeInfo *pCifsInode;
417 struct cifsFileInfo *netfile;
419 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
420 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
421 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
422 struct smb_com_transaction_change_notify_rsp *pSMBr =
423 (struct smb_com_transaction_change_notify_rsp *)buf;
424 struct file_notify_information *pnotify;
425 __u32 data_offset = 0;
426 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
428 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
429 data_offset = le32_to_cpu(pSMBr->DataOffset);
432 len - sizeof(struct file_notify_information)) {
433 cifs_dbg(FYI, "Invalid data_offset %u\n",
437 pnotify = (struct file_notify_information *)
438 ((char *)&pSMBr->hdr.Protocol + data_offset);
439 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
440 pnotify->FileName, pnotify->Action);
441 /* cifs_dump_mem("Rcvd notify Data: ",buf,
442 sizeof(struct smb_hdr)+60); */
445 if (pSMBr->hdr.Status.CifsError) {
446 cifs_dbg(FYI, "notify err 0x%x\n",
447 pSMBr->hdr.Status.CifsError);
452 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
454 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
455 /* no sense logging error on invalid handle on oplock
456 break - harmless race between close request and oplock
457 break response is expected from time to time writing out
458 large dirty files cached on the client */
459 if ((NT_STATUS_INVALID_HANDLE) ==
460 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
461 cifs_dbg(FYI, "Invalid handle on oplock break\n");
463 } else if (ERRbadfid ==
464 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
467 return false; /* on valid oplock brk we get "request" */
470 if (pSMB->hdr.WordCount != 8)
473 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
474 pSMB->LockType, pSMB->OplockLevel);
475 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
478 /* If server is a channel, select the primary channel */
479 pserver = CIFS_SERVER_IS_CHAN(srv) ? srv->primary_server : srv;
481 /* look up tcon based on tid & uid */
482 spin_lock(&cifs_tcp_ses_lock);
483 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
484 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
485 if (tcon->tid != buf->Tid)
488 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
489 spin_lock(&tcon->open_file_lock);
490 list_for_each_entry(netfile, &tcon->openFileList, tlist) {
491 if (pSMB->Fid != netfile->fid.netfid)
494 cifs_dbg(FYI, "file id match, oplock break\n");
495 pCifsInode = CIFS_I(d_inode(netfile->dentry));
497 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
500 netfile->oplock_epoch = 0;
501 netfile->oplock_level = pSMB->OplockLevel;
502 netfile->oplock_break_cancelled = false;
503 cifs_queue_oplock_break(netfile);
505 spin_unlock(&tcon->open_file_lock);
506 spin_unlock(&cifs_tcp_ses_lock);
509 spin_unlock(&tcon->open_file_lock);
510 spin_unlock(&cifs_tcp_ses_lock);
511 cifs_dbg(FYI, "No matching file for oplock break\n");
515 spin_unlock(&cifs_tcp_ses_lock);
516 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
521 dump_smb(void *buf, int smb_buf_length)
526 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
527 smb_buf_length, true);
531 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
533 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
534 struct cifs_tcon *tcon = NULL;
536 if (cifs_sb->master_tlink)
537 tcon = cifs_sb_master_tcon(cifs_sb);
539 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
540 cifs_sb->mnt_cifs_serverino_autodisabled = true;
541 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
542 tcon ? tcon->tree_name : "new server");
543 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
544 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
549 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
553 if (oplock == OPLOCK_EXCLUSIVE) {
554 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
555 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
556 &cinode->netfs.inode);
557 } else if (oplock == OPLOCK_READ) {
558 cinode->oplock = CIFS_CACHE_READ_FLG;
559 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
560 &cinode->netfs.inode);
566 * We wait for oplock breaks to be processed before we attempt to perform
569 int cifs_get_writer(struct cifsInodeInfo *cinode)
574 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
579 spin_lock(&cinode->writers_lock);
580 if (!cinode->writers)
581 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
583 /* Check to see if we have started servicing an oplock break */
584 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
586 if (cinode->writers == 0) {
587 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
588 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
590 spin_unlock(&cinode->writers_lock);
593 spin_unlock(&cinode->writers_lock);
597 void cifs_put_writer(struct cifsInodeInfo *cinode)
599 spin_lock(&cinode->writers_lock);
601 if (cinode->writers == 0) {
602 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
603 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
605 spin_unlock(&cinode->writers_lock);
609 * cifs_queue_oplock_break - queue the oplock break handler for cfile
610 * @cfile: The file to break the oplock on
612 * This function is called from the demultiplex thread when it
613 * receives an oplock break for @cfile.
615 * Assumes the tcon->open_file_lock is held.
616 * Assumes cfile->file_info_lock is NOT held.
618 void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
621 * Bump the handle refcount now while we hold the
622 * open_file_lock to enforce the validity of it for the oplock
623 * break handler. The matching put is done at the end of the
626 cifsFileInfo_get(cfile);
628 queue_work(cifsoplockd_wq, &cfile->oplock_break);
631 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
633 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
634 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
638 backup_cred(struct cifs_sb_info *cifs_sb)
640 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
641 if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
644 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
645 if (in_group_p(cifs_sb->ctx->backupgid))
653 cifs_del_pending_open(struct cifs_pending_open *open)
655 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
656 list_del(&open->olist);
657 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
661 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
662 struct cifs_pending_open *open)
664 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
665 open->oplock = CIFS_OPLOCK_NO_CHANGE;
667 fid->pending_open = open;
668 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
672 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
673 struct cifs_pending_open *open)
675 spin_lock(&tlink_tcon(tlink)->open_file_lock);
676 cifs_add_pending_open_locked(fid, tlink, open);
677 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
681 * Critical section which runs after acquiring deferred_lock.
682 * As there is no reference count on cifs_deferred_close, pdclose
683 * should not be used outside deferred_lock.
686 cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
688 struct cifs_deferred_close *dclose;
690 list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
691 if ((dclose->netfid == cfile->fid.netfid) &&
692 (dclose->persistent_fid == cfile->fid.persistent_fid) &&
693 (dclose->volatile_fid == cfile->fid.volatile_fid)) {
702 * Critical section which runs after acquiring deferred_lock.
705 cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
707 bool is_deferred = false;
708 struct cifs_deferred_close *pdclose;
710 is_deferred = cifs_is_deferred_close(cfile, &pdclose);
716 dclose->tlink = cfile->tlink;
717 dclose->netfid = cfile->fid.netfid;
718 dclose->persistent_fid = cfile->fid.persistent_fid;
719 dclose->volatile_fid = cfile->fid.volatile_fid;
720 list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
724 * Critical section which runs after acquiring deferred_lock.
727 cifs_del_deferred_close(struct cifsFileInfo *cfile)
729 bool is_deferred = false;
730 struct cifs_deferred_close *dclose;
732 is_deferred = cifs_is_deferred_close(cfile, &dclose);
735 list_del(&dclose->dlist);
740 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
742 struct cifsFileInfo *cfile = NULL;
743 struct file_list *tmp_list, *tmp_next_list;
744 struct list_head file_head;
746 if (cifs_inode == NULL)
749 INIT_LIST_HEAD(&file_head);
750 spin_lock(&cifs_inode->open_file_lock);
751 list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
752 if (delayed_work_pending(&cfile->deferred)) {
753 if (cancel_delayed_work(&cfile->deferred)) {
754 spin_lock(&cifs_inode->deferred_lock);
755 cifs_del_deferred_close(cfile);
756 spin_unlock(&cifs_inode->deferred_lock);
758 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
759 if (tmp_list == NULL)
761 tmp_list->cfile = cfile;
762 list_add_tail(&tmp_list->list, &file_head);
766 spin_unlock(&cifs_inode->open_file_lock);
768 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
769 _cifsFileInfo_put(tmp_list->cfile, false, false);
770 list_del(&tmp_list->list);
776 cifs_close_all_deferred_files(struct cifs_tcon *tcon)
778 struct cifsFileInfo *cfile;
779 struct file_list *tmp_list, *tmp_next_list;
780 struct list_head file_head;
782 INIT_LIST_HEAD(&file_head);
783 spin_lock(&tcon->open_file_lock);
784 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
785 if (delayed_work_pending(&cfile->deferred)) {
786 if (cancel_delayed_work(&cfile->deferred)) {
787 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
788 cifs_del_deferred_close(cfile);
789 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
791 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
792 if (tmp_list == NULL)
794 tmp_list->cfile = cfile;
795 list_add_tail(&tmp_list->list, &file_head);
799 spin_unlock(&tcon->open_file_lock);
801 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
802 _cifsFileInfo_put(tmp_list->cfile, true, false);
803 list_del(&tmp_list->list);
808 cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
810 struct cifsFileInfo *cfile;
811 struct file_list *tmp_list, *tmp_next_list;
812 struct list_head file_head;
814 const char *full_path;
816 INIT_LIST_HEAD(&file_head);
817 page = alloc_dentry_path();
818 spin_lock(&tcon->open_file_lock);
819 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
820 full_path = build_path_from_dentry(cfile->dentry, page);
821 if (strstr(full_path, path)) {
822 if (delayed_work_pending(&cfile->deferred)) {
823 if (cancel_delayed_work(&cfile->deferred)) {
824 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
825 cifs_del_deferred_close(cfile);
826 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
828 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
829 if (tmp_list == NULL)
831 tmp_list->cfile = cfile;
832 list_add_tail(&tmp_list->list, &file_head);
837 spin_unlock(&tcon->open_file_lock);
839 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
840 _cifsFileInfo_put(tmp_list->cfile, true, false);
841 list_del(&tmp_list->list);
844 free_dentry_path(page);
847 /* parses DFS referral V3 structure
848 * caller is responsible for freeing target_nodes
851 * - on failure - errno
854 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
855 unsigned int *num_of_nodes,
856 struct dfs_info3_param **target_nodes,
857 const struct nls_table *nls_codepage, int remap,
858 const char *searchName, bool is_unicode)
862 struct dfs_referral_level_3 *ref;
864 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
866 if (*num_of_nodes < 1) {
867 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
870 goto parse_DFS_referrals_exit;
873 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
874 if (ref->VersionNumber != cpu_to_le16(3)) {
875 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
876 le16_to_cpu(ref->VersionNumber));
878 goto parse_DFS_referrals_exit;
881 /* get the upper boundary of the resp buffer */
882 data_end = (char *)rsp + rsp_size;
884 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
885 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
887 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
889 if (*target_nodes == NULL) {
891 goto parse_DFS_referrals_exit;
894 /* collect necessary data from referrals */
895 for (i = 0; i < *num_of_nodes; i++) {
898 struct dfs_info3_param *node = (*target_nodes)+i;
900 node->flags = le32_to_cpu(rsp->DFSFlags);
902 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
906 goto parse_DFS_referrals_exit;
908 cifsConvertToUTF16((__le16 *) tmp, searchName,
909 PATH_MAX, nls_codepage, remap);
910 node->path_consumed = cifs_utf16_bytes(tmp,
911 le16_to_cpu(rsp->PathConsumed),
915 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
917 node->server_type = le16_to_cpu(ref->ServerType);
918 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
921 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
922 max_len = data_end - temp;
923 node->path_name = cifs_strndup_from_utf16(temp, max_len,
924 is_unicode, nls_codepage);
925 if (!node->path_name) {
927 goto parse_DFS_referrals_exit;
930 /* copy link target UNC */
931 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
932 max_len = data_end - temp;
933 node->node_name = cifs_strndup_from_utf16(temp, max_len,
934 is_unicode, nls_codepage);
935 if (!node->node_name) {
937 goto parse_DFS_referrals_exit;
940 node->ttl = le32_to_cpu(ref->TimeToLive);
945 parse_DFS_referrals_exit:
947 free_dfs_info_array(*target_nodes, *num_of_nodes);
948 *target_nodes = NULL;
954 struct cifs_aio_ctx *
955 cifs_aio_ctx_alloc(void)
957 struct cifs_aio_ctx *ctx;
960 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
961 * to false so that we know when we have to unreference pages within
962 * cifs_aio_ctx_release()
964 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
968 INIT_LIST_HEAD(&ctx->list);
969 mutex_init(&ctx->aio_mutex);
970 init_completion(&ctx->done);
971 kref_init(&ctx->refcount);
976 cifs_aio_ctx_release(struct kref *refcount)
978 struct cifs_aio_ctx *ctx = container_of(refcount,
979 struct cifs_aio_ctx, refcount);
981 cifsFileInfo_put(ctx->cfile);
984 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
985 * which means that iov_iter_extract_pages() was a success and thus
986 * that we may have references or pins on pages that we need to
990 if (ctx->should_dirty || ctx->bv_need_unpin) {
993 for (i = 0; i < ctx->nr_pinned_pages; i++) {
994 struct page *page = ctx->bv[i].bv_page;
996 if (ctx->should_dirty)
997 set_page_dirty(page);
998 if (ctx->bv_need_unpin)
999 unpin_user_page(page);
1009 * cifs_alloc_hash - allocate hash and hash context together
1010 * @name: The name of the crypto hash algo
1011 * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
1013 * The caller has to make sure @sdesc is initialized to either NULL or
1014 * a valid context. It can be freed via cifs_free_hash().
1017 cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
1020 struct crypto_shash *alg = NULL;
1025 alg = crypto_alloc_shash(name, 0, 0);
1027 cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
1033 *sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
1034 if (*sdesc == NULL) {
1035 cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
1036 crypto_free_shash(alg);
1040 (*sdesc)->tfm = alg;
1045 * cifs_free_hash - free hash and hash context together
1046 * @sdesc: Where to find the pointer to the hash TFM
1048 * Freeing a NULL descriptor is safe.
1051 cifs_free_hash(struct shash_desc **sdesc)
1053 if (unlikely(!sdesc) || !*sdesc)
1056 if ((*sdesc)->tfm) {
1057 crypto_free_shash((*sdesc)->tfm);
1058 (*sdesc)->tfm = NULL;
1061 kfree_sensitive(*sdesc);
1065 void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1069 /* skip initial slashes */
1070 while (*unc && (*unc == '\\' || *unc == '/'))
1075 while (*end && !(*end == '\\' || *end == '/'))
1083 * copy_path_name - copy src path to dst, possibly truncating
1084 * @dst: The destination buffer
1085 * @src: The source name
1087 * returns number of bytes written (including trailing nul)
1089 int copy_path_name(char *dst, const char *src)
1094 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1095 * will truncate and strlen(dst) will be PATH_MAX-1
1097 name_len = strscpy(dst, src, PATH_MAX);
1098 if (WARN_ON_ONCE(name_len < 0))
1099 name_len = PATH_MAX-1;
1101 /* we count the trailing nul */
1106 struct super_cb_data {
1108 struct super_block *sb;
1111 static void tcon_super_cb(struct super_block *sb, void *arg)
1113 struct super_cb_data *sd = arg;
1114 struct cifs_sb_info *cifs_sb;
1115 struct cifs_tcon *t1 = sd->data, *t2;
1120 cifs_sb = CIFS_SB(sb);
1121 t2 = cifs_sb_master_tcon(cifs_sb);
1123 spin_lock(&t2->tc_lock);
1124 if (t1->ses == t2->ses &&
1125 t1->ses->server == t2->ses->server &&
1126 t2->origin_fullpath &&
1127 dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath))
1129 spin_unlock(&t2->tc_lock);
1132 static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1135 struct super_cb_data sd = {
1139 struct file_system_type **fs_type = (struct file_system_type *[]) {
1140 &cifs_fs_type, &smb3_fs_type, NULL,
1143 for (; *fs_type; fs_type++) {
1144 iterate_supers_type(*fs_type, f, &sd);
1147 * Grab an active reference in order to prevent automounts (DFS links)
1148 * of expiring and then freeing up our cifs superblock pointer while
1149 * we're doing failover.
1151 cifs_sb_active(sd.sb);
1155 pr_warn_once("%s: could not find dfs superblock\n", __func__);
1156 return ERR_PTR(-EINVAL);
1159 static void __cifs_put_super(struct super_block *sb)
1161 if (!IS_ERR_OR_NULL(sb))
1162 cifs_sb_deactive(sb);
1165 struct super_block *cifs_get_dfs_tcon_super(struct cifs_tcon *tcon)
1167 spin_lock(&tcon->tc_lock);
1168 if (!tcon->origin_fullpath) {
1169 spin_unlock(&tcon->tc_lock);
1170 return ERR_PTR(-ENOENT);
1172 spin_unlock(&tcon->tc_lock);
1173 return __cifs_get_super(tcon_super_cb, tcon);
1176 void cifs_put_tcp_super(struct super_block *sb)
1178 __cifs_put_super(sb);
1181 #ifdef CONFIG_CIFS_DFS_UPCALL
1182 int match_target_ip(struct TCP_Server_Info *server,
1183 const char *share, size_t share_len,
1188 struct sockaddr_storage ss;
1192 target = kzalloc(share_len + 3, GFP_KERNEL);
1196 scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
1198 cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
1200 rc = dns_resolve_server_name_to_ip(target, (struct sockaddr *)&ss, NULL);
1206 spin_lock(&server->srv_lock);
1207 *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1208 spin_unlock(&server->srv_lock);
1209 cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1213 int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
1217 kfree(cifs_sb->prepath);
1218 cifs_sb->prepath = NULL;
1220 if (prefix && *prefix) {
1221 cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
1222 if (IS_ERR(cifs_sb->prepath)) {
1223 rc = PTR_ERR(cifs_sb->prepath);
1224 cifs_sb->prepath = NULL;
1227 if (cifs_sb->prepath)
1228 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1231 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1236 * Handle weird Windows SMB server behaviour. It responds with
1237 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
1238 * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
1239 * non-ASCII unicode symbols.
1241 int cifs_inval_name_dfs_link_error(const unsigned int xid,
1242 struct cifs_tcon *tcon,
1243 struct cifs_sb_info *cifs_sb,
1244 const char *full_path,
1247 struct cifs_ses *ses = tcon->ses;
1255 * Fast path - skip check when @full_path doesn't have a prefix path to
1256 * look up or tcon is not DFS.
1258 if (strlen(full_path) < 2 || !cifs_sb ||
1259 (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
1263 spin_lock(&tcon->tc_lock);
1264 if (!tcon->origin_fullpath) {
1265 spin_unlock(&tcon->tc_lock);
1268 spin_unlock(&tcon->tc_lock);
1271 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
1272 * to get a referral to figure out whether it is an DFS link.
1274 len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
1275 path = kmalloc(len, GFP_KERNEL);
1279 scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
1280 ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
1281 cifs_remap(cifs_sb));
1284 if (IS_ERR(ref_path)) {
1285 if (PTR_ERR(ref_path) != -EINVAL)
1286 return PTR_ERR(ref_path);
1288 struct dfs_info3_param *refs = NULL;
1292 * XXX: we are not using dfs_cache_find() here because we might
1293 * end up filling all the DFS cache and thus potentially
1294 * removing cached DFS targets that the client would eventually
1295 * need during failover.
1297 ses = CIFS_DFS_ROOT_SES(ses);
1298 if (ses->server->ops->get_dfs_refer &&
1299 !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
1300 &num_refs, cifs_sb->local_nls,
1301 cifs_remap(cifs_sb)))
1302 *islink = refs[0].server_type == DFS_TYPE_LINK;
1303 free_dfs_info_array(refs, num_refs);
1310 int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
1315 spin_lock(&server->srv_lock);
1316 if (server->tcpStatus != CifsNeedReconnect) {
1317 spin_unlock(&server->srv_lock);
1320 timeout *= server->nr_targets;
1321 spin_unlock(&server->srv_lock);
1324 * Give demultiplex thread up to 10 seconds to each target available for
1325 * reconnect -- should be greater than cifs socket timeout which is 7
1328 * On "soft" mounts we wait once. Hard mounts keep retrying until
1329 * process is killed or server comes back on-line.
1332 rc = wait_event_interruptible_timeout(server->response_q,
1333 (server->tcpStatus != CifsNeedReconnect),
1336 cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
1338 return -ERESTARTSYS;
1341 /* are we still trying to reconnect? */
1342 spin_lock(&server->srv_lock);
1343 if (server->tcpStatus != CifsNeedReconnect) {
1344 spin_unlock(&server->srv_lock);
1347 spin_unlock(&server->srv_lock);
1350 cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);