4 * Copyright (C) International Business Machines Corp., 2002,2008
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/uuid.h>
41 #include <linux/xattr.h>
45 #define DECLARE_GLOBALS_HERE
47 #include "cifsproto.h"
48 #include "cifs_debug.h"
49 #include "cifs_fs_sb.h"
51 #include <linux/key-type.h>
52 #include "cifs_spnego.h"
55 #ifdef CONFIG_CIFS_DFS_UPCALL
56 #include "dfs_cache.h"
60 * DOS dates from 1980/1/1 through 2107/12/31
61 * Protocol specifications indicate the range should be to 119, which
62 * limits maximum year to 2099. But this range has not been checked.
64 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
65 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
66 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
70 bool enable_oplocks = true;
71 bool linuxExtEnabled = true;
72 bool lookupCacheEnabled = true;
73 bool disable_legacy_dialects; /* false by default */
74 unsigned int global_secflags = CIFSSEC_DEF;
75 /* unsigned int ntlmv2_support = 0; */
76 unsigned int sign_CIFS_PDUs = 1;
77 static const struct super_operations cifs_super_ops;
78 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
79 module_param(CIFSMaxBufSize, uint, 0444);
80 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
82 "Default: 16384 Range: 8192 to 130048");
83 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
84 module_param(cifs_min_rcv, uint, 0444);
85 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
87 unsigned int cifs_min_small = 30;
88 module_param(cifs_min_small, uint, 0444);
89 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
91 unsigned int cifs_max_pending = CIFS_MAX_REQ;
92 module_param(cifs_max_pending, uint, 0444);
93 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
94 "CIFS/SMB1 dialect (N/A for SMB3) "
95 "Default: 32767 Range: 2 to 32767.");
96 #ifdef CONFIG_CIFS_STATS2
97 unsigned int slow_rsp_threshold = 1;
98 module_param(slow_rsp_threshold, uint, 0644);
99 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
100 "before logging that a response is delayed. "
101 "Default: 1 (if set to 0 disables msg).");
104 module_param(enable_oplocks, bool, 0644);
105 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
107 module_param(disable_legacy_dialects, bool, 0644);
108 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
109 "helpful to restrict the ability to "
110 "override the default dialects (SMB2.1, "
111 "SMB3 and SMB3.02) on mount with old "
112 "dialects (CIFS/SMB1 and SMB2) since "
113 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
114 " and less secure. Default: n/N/0");
116 extern mempool_t *cifs_sm_req_poolp;
117 extern mempool_t *cifs_req_poolp;
118 extern mempool_t *cifs_mid_poolp;
120 struct workqueue_struct *cifsiod_wq;
121 struct workqueue_struct *decrypt_wq;
122 struct workqueue_struct *fileinfo_put_wq;
123 struct workqueue_struct *cifsoplockd_wq;
124 __u32 cifs_lock_secret;
127 * Bumps refcount for cifs super block.
128 * Note that it should be only called if a referece to VFS super block is
129 * already held, e.g. in open-type syscalls context. Otherwise it can race with
130 * atomic_dec_and_test in deactivate_locked_super.
133 cifs_sb_active(struct super_block *sb)
135 struct cifs_sb_info *server = CIFS_SB(sb);
137 if (atomic_inc_return(&server->active) == 1)
138 atomic_inc(&sb->s_active);
142 cifs_sb_deactive(struct super_block *sb)
144 struct cifs_sb_info *server = CIFS_SB(sb);
146 if (atomic_dec_and_test(&server->active))
147 deactivate_super(sb);
151 cifs_read_super(struct super_block *sb)
154 struct cifs_sb_info *cifs_sb;
155 struct cifs_tcon *tcon;
156 struct timespec64 ts;
159 cifs_sb = CIFS_SB(sb);
160 tcon = cifs_sb_master_tcon(cifs_sb);
162 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
163 sb->s_flags |= SB_POSIXACL;
165 if (tcon->snapshot_time)
166 sb->s_flags |= SB_RDONLY;
168 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
169 sb->s_maxbytes = MAX_LFS_FILESIZE;
171 sb->s_maxbytes = MAX_NON_LFS;
174 * Some very old servers like DOS and OS/2 used 2 second granularity
175 * (while all current servers use 100ns granularity - see MS-DTYP)
176 * but 1 second is the maximum allowed granularity for the VFS
177 * so for old servers set time granularity to 1 second while for
178 * everything else (current servers) set it to 100ns.
180 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
181 ((tcon->ses->capabilities &
182 tcon->ses->server->vals->cap_nt_find) == 0) &&
184 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
185 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
186 sb->s_time_min = ts.tv_sec;
187 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
188 cpu_to_le16(SMB_TIME_MAX), 0);
189 sb->s_time_max = ts.tv_sec;
192 * Almost every server, including all SMB2+, uses DCE TIME
193 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
195 sb->s_time_gran = 100;
196 ts = cifs_NTtimeToUnix(0);
197 sb->s_time_min = ts.tv_sec;
198 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
199 sb->s_time_max = ts.tv_sec;
202 sb->s_magic = CIFS_MAGIC_NUMBER;
203 sb->s_op = &cifs_super_ops;
204 sb->s_xattr = cifs_xattr_handlers;
205 rc = super_setup_bdi(sb);
208 /* tune readahead according to rsize */
209 sb->s_bdi->ra_pages = cifs_sb->rsize / PAGE_SIZE;
211 sb->s_blocksize = CIFS_MAX_MSGSIZE;
212 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
213 inode = cifs_root_iget(sb);
221 sb->s_d_op = &cifs_ci_dentry_ops;
223 sb->s_d_op = &cifs_dentry_ops;
225 sb->s_root = d_make_root(inode);
231 #ifdef CONFIG_CIFS_NFSD_EXPORT
232 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
233 cifs_dbg(FYI, "export ops supported\n");
234 sb->s_export_op = &cifs_export_ops;
236 #endif /* CONFIG_CIFS_NFSD_EXPORT */
241 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
245 static void cifs_kill_sb(struct super_block *sb)
247 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
249 cifs_umount(cifs_sb);
253 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
255 struct super_block *sb = dentry->d_sb;
256 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
257 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
258 struct TCP_Server_Info *server = tcon->ses->server;
264 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
266 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
268 buf->f_namelen = PATH_MAX;
270 buf->f_fsid.val[0] = tcon->vol_serial_number;
271 /* are using part of create time for more randomness, see man statfs */
272 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
274 buf->f_files = 0; /* undefined */
275 buf->f_ffree = 0; /* unlimited */
277 if (server->ops->queryfs)
278 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
284 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
286 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
287 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
288 struct TCP_Server_Info *server = tcon->ses->server;
290 if (server->ops->fallocate)
291 return server->ops->fallocate(file, tcon, mode, off, len);
296 static int cifs_permission(struct inode *inode, int mask)
298 struct cifs_sb_info *cifs_sb;
300 cifs_sb = CIFS_SB(inode->i_sb);
302 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
303 if ((mask & MAY_EXEC) && !execute_ok(inode))
307 } else /* file mode might have been restricted at mount time
308 on the client (above and beyond ACL on servers) for
309 servers which do not support setting and viewing mode bits,
310 so allowing client to check permissions is useful */
311 return generic_permission(inode, mask);
314 static struct kmem_cache *cifs_inode_cachep;
315 static struct kmem_cache *cifs_req_cachep;
316 static struct kmem_cache *cifs_mid_cachep;
317 static struct kmem_cache *cifs_sm_req_cachep;
318 mempool_t *cifs_sm_req_poolp;
319 mempool_t *cifs_req_poolp;
320 mempool_t *cifs_mid_poolp;
322 static struct inode *
323 cifs_alloc_inode(struct super_block *sb)
325 struct cifsInodeInfo *cifs_inode;
326 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
329 cifs_inode->cifsAttrs = 0x20; /* default */
330 cifs_inode->time = 0;
332 * Until the file is open and we have gotten oplock info back from the
333 * server, can not assume caching of file data or metadata.
335 cifs_set_oplock_level(cifs_inode, 0);
336 cifs_inode->flags = 0;
337 spin_lock_init(&cifs_inode->writers_lock);
338 cifs_inode->writers = 0;
339 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
340 cifs_inode->server_eof = 0;
341 cifs_inode->uniqueid = 0;
342 cifs_inode->createtime = 0;
343 cifs_inode->epoch = 0;
344 spin_lock_init(&cifs_inode->open_file_lock);
345 generate_random_uuid(cifs_inode->lease_key);
348 * Can not set i_flags here - they get immediately overwritten to zero
351 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
352 INIT_LIST_HEAD(&cifs_inode->openFileList);
353 INIT_LIST_HEAD(&cifs_inode->llist);
354 return &cifs_inode->vfs_inode;
358 cifs_free_inode(struct inode *inode)
360 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
364 cifs_evict_inode(struct inode *inode)
366 truncate_inode_pages_final(&inode->i_data);
368 cifs_fscache_release_inode_cookie(inode);
372 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
374 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
375 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
377 seq_puts(s, ",addr=");
379 switch (server->dstaddr.ss_family) {
381 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
384 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
385 if (sa6->sin6_scope_id)
386 seq_printf(s, "%%%u", sa6->sin6_scope_id);
389 seq_puts(s, "(unknown)");
392 seq_puts(s, ",rdma");
396 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
398 if (ses->sectype == Unspecified) {
399 if (ses->user_name == NULL)
400 seq_puts(s, ",sec=none");
404 seq_puts(s, ",sec=");
406 switch (ses->sectype) {
408 seq_puts(s, "lanman");
411 seq_puts(s, "ntlmv2");
420 seq_puts(s, "ntlmssp");
423 /* shouldn't ever happen */
424 seq_puts(s, "unknown");
431 if (ses->sectype == Kerberos)
432 seq_printf(s, ",cruid=%u",
433 from_kuid_munged(&init_user_ns, ses->cred_uid));
437 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
439 seq_puts(s, ",cache=");
441 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
442 seq_puts(s, "strict");
443 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
445 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
446 seq_puts(s, "singleclient"); /* assume only one client access */
447 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
448 seq_puts(s, "ro"); /* read only caching assumed */
450 seq_puts(s, "loose");
454 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
456 struct nls_table *def;
458 /* Display iocharset= option if it's not default charset */
459 def = load_nls_default();
461 seq_printf(s, ",iocharset=%s", cur->charset);
466 * cifs_show_options() is for displaying mount options in /proc/mounts.
467 * Not all settable options are displayed but most of the important
471 cifs_show_options(struct seq_file *s, struct dentry *root)
473 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
474 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
475 struct sockaddr *srcaddr;
476 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
478 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
479 cifs_show_security(s, tcon->ses);
480 cifs_show_cache_flavor(s, cifs_sb);
483 seq_puts(s, ",nolease");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
485 seq_puts(s, ",multiuser");
486 else if (tcon->ses->user_name)
487 seq_show_option(s, "username", tcon->ses->user_name);
489 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
490 seq_show_option(s, "domain", tcon->ses->domainName);
492 if (srcaddr->sa_family != AF_UNSPEC) {
493 struct sockaddr_in *saddr4;
494 struct sockaddr_in6 *saddr6;
495 saddr4 = (struct sockaddr_in *)srcaddr;
496 saddr6 = (struct sockaddr_in6 *)srcaddr;
497 if (srcaddr->sa_family == AF_INET6)
498 seq_printf(s, ",srcaddr=%pI6c",
500 else if (srcaddr->sa_family == AF_INET)
501 seq_printf(s, ",srcaddr=%pI4",
502 &saddr4->sin_addr.s_addr);
504 seq_printf(s, ",srcaddr=BAD-AF:%i",
505 (int)(srcaddr->sa_family));
508 seq_printf(s, ",uid=%u",
509 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
510 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
511 seq_puts(s, ",forceuid");
513 seq_puts(s, ",noforceuid");
515 seq_printf(s, ",gid=%u",
516 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
517 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
518 seq_puts(s, ",forcegid");
520 seq_puts(s, ",noforcegid");
522 cifs_show_address(s, tcon->ses->server);
525 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
526 cifs_sb->mnt_file_mode,
527 cifs_sb->mnt_dir_mode);
529 cifs_show_nls(s, cifs_sb->local_nls);
532 seq_puts(s, ",seal");
533 else if (tcon->ses->server->ignore_signature)
534 seq_puts(s, ",signloosely");
536 seq_puts(s, ",nocase");
537 if (tcon->local_lease)
538 seq_puts(s, ",locallease");
540 seq_puts(s, ",hard");
542 seq_puts(s, ",soft");
543 if (tcon->use_persistent)
544 seq_puts(s, ",persistenthandles");
545 else if (tcon->use_resilient)
546 seq_puts(s, ",resilienthandles");
547 if (tcon->posix_extensions)
548 seq_puts(s, ",posix");
549 else if (tcon->unix_ext)
550 seq_puts(s, ",unix");
552 seq_puts(s, ",nounix");
553 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
554 seq_puts(s, ",nodfs");
555 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
556 seq_puts(s, ",posixpaths");
557 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
558 seq_puts(s, ",setuids");
559 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
560 seq_puts(s, ",idsfromsid");
561 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
562 seq_puts(s, ",serverino");
563 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
564 seq_puts(s, ",rwpidforward");
565 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
566 seq_puts(s, ",forcemand");
567 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
568 seq_puts(s, ",nouser_xattr");
569 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
570 seq_puts(s, ",mapchars");
571 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
572 seq_puts(s, ",mapposix");
573 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
576 seq_puts(s, ",nobrl");
577 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
578 seq_puts(s, ",nohandlecache");
579 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
580 seq_puts(s, ",modefromsid");
581 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
582 seq_puts(s, ",cifsacl");
583 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
584 seq_puts(s, ",dynperm");
585 if (root->d_sb->s_flags & SB_POSIXACL)
587 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
588 seq_puts(s, ",mfsymlinks");
589 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
591 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
592 seq_puts(s, ",nostrictsync");
593 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
594 seq_puts(s, ",noperm");
595 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
596 seq_printf(s, ",backupuid=%u",
597 from_kuid_munged(&init_user_ns,
598 cifs_sb->mnt_backupuid));
599 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
600 seq_printf(s, ",backupgid=%u",
601 from_kgid_munged(&init_user_ns,
602 cifs_sb->mnt_backupgid));
604 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
605 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
606 seq_printf(s, ",bsize=%u", cifs_sb->bsize);
607 if (tcon->ses->server->min_offload)
608 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
609 seq_printf(s, ",echo_interval=%lu",
610 tcon->ses->server->echo_interval / HZ);
612 /* Only display max_credits if it was overridden on mount */
613 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
614 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
616 if (tcon->snapshot_time)
617 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
618 if (tcon->handle_timeout)
619 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
620 /* convert actimeo and display it in seconds */
621 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
623 if (tcon->ses->chan_max > 1)
624 seq_printf(s, ",multichannel,max_channel=%zu",
625 tcon->ses->chan_max);
630 static void cifs_umount_begin(struct super_block *sb)
632 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
633 struct cifs_tcon *tcon;
638 tcon = cifs_sb_master_tcon(cifs_sb);
640 spin_lock(&cifs_tcp_ses_lock);
641 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
642 /* we have other mounts to same share or we have
643 already tried to force umount this and woken up
644 all waiting network requests, nothing to do */
645 spin_unlock(&cifs_tcp_ses_lock);
647 } else if (tcon->tc_count == 1)
648 tcon->tidStatus = CifsExiting;
649 spin_unlock(&cifs_tcp_ses_lock);
651 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
652 /* cancel_notify_requests(tcon); */
653 if (tcon->ses && tcon->ses->server) {
654 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
655 wake_up_all(&tcon->ses->server->request_q);
656 wake_up_all(&tcon->ses->server->response_q);
657 msleep(1); /* yield */
658 /* we have to kick the requests once more */
659 wake_up_all(&tcon->ses->server->response_q);
666 #ifdef CONFIG_CIFS_STATS2
667 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
674 static int cifs_remount(struct super_block *sb, int *flags, char *data)
677 *flags |= SB_NODIRATIME;
681 static int cifs_drop_inode(struct inode *inode)
683 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
685 /* no serverino => unconditional eviction */
686 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
687 generic_drop_inode(inode);
690 static const struct super_operations cifs_super_ops = {
691 .statfs = cifs_statfs,
692 .alloc_inode = cifs_alloc_inode,
693 .free_inode = cifs_free_inode,
694 .drop_inode = cifs_drop_inode,
695 .evict_inode = cifs_evict_inode,
696 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
697 function unless later we add lazy close of inodes or unless the
698 kernel forgets to call us with the same number of releases (closes)
700 .show_options = cifs_show_options,
701 .umount_begin = cifs_umount_begin,
702 .remount_fs = cifs_remount,
703 #ifdef CONFIG_CIFS_STATS2
704 .show_stats = cifs_show_stats,
709 * Get root dentry from superblock according to prefix path mount option.
710 * Return dentry with refcount + 1 on success and NULL otherwise.
712 static struct dentry *
713 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
715 struct dentry *dentry;
716 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
717 char *full_path = NULL;
721 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
722 return dget(sb->s_root);
724 full_path = cifs_build_path_to_root(vol, cifs_sb,
725 cifs_sb_master_tcon(cifs_sb), 0);
726 if (full_path == NULL)
727 return ERR_PTR(-ENOMEM);
729 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
731 sep = CIFS_DIR_SEP(cifs_sb);
732 dentry = dget(sb->s_root);
736 struct inode *dir = d_inode(dentry);
737 struct dentry *child;
739 if (!S_ISDIR(dir->i_mode)) {
741 dentry = ERR_PTR(-ENOTDIR);
745 /* skip separators */
752 while (*s && *s != sep)
755 child = lookup_positive_unlocked(p, dentry, s - p);
758 } while (!IS_ERR(dentry));
763 static int cifs_set_super(struct super_block *sb, void *data)
765 struct cifs_mnt_data *mnt_data = data;
766 sb->s_fs_info = mnt_data->cifs_sb;
767 return set_anon_super(sb, NULL);
770 static struct dentry *
771 cifs_smb3_do_mount(struct file_system_type *fs_type,
772 int flags, const char *dev_name, void *data, bool is_smb3)
775 struct super_block *sb;
776 struct cifs_sb_info *cifs_sb;
777 struct smb_vol *volume_info;
778 struct cifs_mnt_data mnt_data;
782 * Prints in Kernel / CIFS log the attempted mount operation
783 * If CIFS_DEBUG && cifs_FYI
786 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
788 cifs_info("Attempting to mount %s\n", dev_name);
790 volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
791 if (IS_ERR(volume_info))
792 return ERR_CAST(volume_info);
794 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
795 if (cifs_sb == NULL) {
796 root = ERR_PTR(-ENOMEM);
800 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
801 if (cifs_sb->mountdata == NULL) {
802 root = ERR_PTR(-ENOMEM);
806 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
812 rc = cifs_mount(cifs_sb, volume_info);
814 if (!(flags & SB_SILENT))
815 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
821 mnt_data.vol = volume_info;
822 mnt_data.cifs_sb = cifs_sb;
823 mnt_data.flags = flags;
825 /* BB should we make this contingent on mount parm? */
826 flags |= SB_NODIRATIME | SB_NOATIME;
828 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
831 cifs_umount(cifs_sb);
836 cifs_dbg(FYI, "Use existing superblock\n");
837 cifs_umount(cifs_sb);
839 rc = cifs_read_super(sb);
845 sb->s_flags |= SB_ACTIVE;
848 root = cifs_get_root(volume_info, sb);
852 cifs_dbg(FYI, "dentry root is: %p\n", root);
856 deactivate_locked_super(sb);
858 cifs_cleanup_volume_info(volume_info);
862 kfree(cifs_sb->prepath);
863 kfree(cifs_sb->mountdata);
866 unload_nls(volume_info->local_nls);
870 static struct dentry *
871 smb3_do_mount(struct file_system_type *fs_type,
872 int flags, const char *dev_name, void *data)
874 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, true);
877 static struct dentry *
878 cifs_do_mount(struct file_system_type *fs_type,
879 int flags, const char *dev_name, void *data)
881 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, false);
885 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
888 struct inode *inode = file_inode(iocb->ki_filp);
890 if (iocb->ki_filp->f_flags & O_DIRECT)
891 return cifs_user_readv(iocb, iter);
893 rc = cifs_revalidate_mapping(inode);
897 return generic_file_read_iter(iocb, iter);
900 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
902 struct inode *inode = file_inode(iocb->ki_filp);
903 struct cifsInodeInfo *cinode = CIFS_I(inode);
907 if (iocb->ki_filp->f_flags & O_DIRECT) {
908 written = cifs_user_writev(iocb, from);
909 if (written > 0 && CIFS_CACHE_READ(cinode)) {
910 cifs_zap_mapping(inode);
912 "Set no oplock for inode=%p after a write operation\n",
919 written = cifs_get_writer(cinode);
923 written = generic_file_write_iter(iocb, from);
925 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
928 rc = filemap_fdatawrite(inode->i_mapping);
930 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
934 cifs_put_writer(cinode);
938 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
940 struct cifsFileInfo *cfile = file->private_data;
941 struct cifs_tcon *tcon;
944 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
945 * the cached file length
947 if (whence != SEEK_SET && whence != SEEK_CUR) {
949 struct inode *inode = file_inode(file);
952 * We need to be sure that all dirty pages are written and the
953 * server has the newest file length.
955 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
956 inode->i_mapping->nrpages != 0) {
957 rc = filemap_fdatawait(inode->i_mapping);
959 mapping_set_error(inode->i_mapping, rc);
964 * Some applications poll for the file length in this strange
965 * way so we must seek to end on non-oplocked files by
966 * setting the revalidate time to zero.
968 CIFS_I(inode)->time = 0;
970 rc = cifs_revalidate_file_attr(file);
974 if (cfile && cfile->tlink) {
975 tcon = tlink_tcon(cfile->tlink);
976 if (tcon->ses->server->ops->llseek)
977 return tcon->ses->server->ops->llseek(file, tcon,
980 return generic_file_llseek(file, offset, whence);
984 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
987 * Note that this is called by vfs setlease with i_lock held to
988 * protect *lease from going away.
990 struct inode *inode = file_inode(file);
991 struct cifsFileInfo *cfile = file->private_data;
993 if (!(S_ISREG(inode->i_mode)))
996 /* Check if file is oplocked if this is request for new lease */
997 if (arg == F_UNLCK ||
998 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
999 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1000 return generic_setlease(file, arg, lease, priv);
1001 else if (tlink_tcon(cfile->tlink)->local_lease &&
1002 !CIFS_CACHE_READ(CIFS_I(inode)))
1004 * If the server claims to support oplock on this file, then we
1005 * still need to check oplock even if the local_lease mount
1006 * option is set, but there are servers which do not support
1007 * oplock for which this mount option may be useful if the user
1008 * knows that the file won't be changed on the server by anyone
1011 return generic_setlease(file, arg, lease, priv);
1016 struct file_system_type cifs_fs_type = {
1017 .owner = THIS_MODULE,
1019 .mount = cifs_do_mount,
1020 .kill_sb = cifs_kill_sb,
1021 .fs_flags = FS_RENAME_DOES_D_MOVE,
1023 MODULE_ALIAS_FS("cifs");
1025 static struct file_system_type smb3_fs_type = {
1026 .owner = THIS_MODULE,
1028 .mount = smb3_do_mount,
1029 .kill_sb = cifs_kill_sb,
1030 .fs_flags = FS_RENAME_DOES_D_MOVE,
1032 MODULE_ALIAS_FS("smb3");
1033 MODULE_ALIAS("smb3");
1035 const struct inode_operations cifs_dir_inode_ops = {
1036 .create = cifs_create,
1037 .atomic_open = cifs_atomic_open,
1038 .lookup = cifs_lookup,
1039 .getattr = cifs_getattr,
1040 .unlink = cifs_unlink,
1041 .link = cifs_hardlink,
1042 .mkdir = cifs_mkdir,
1043 .rmdir = cifs_rmdir,
1044 .rename = cifs_rename2,
1045 .permission = cifs_permission,
1046 .setattr = cifs_setattr,
1047 .symlink = cifs_symlink,
1048 .mknod = cifs_mknod,
1049 .listxattr = cifs_listxattr,
1052 const struct inode_operations cifs_file_inode_ops = {
1053 .setattr = cifs_setattr,
1054 .getattr = cifs_getattr,
1055 .permission = cifs_permission,
1056 .listxattr = cifs_listxattr,
1057 .fiemap = cifs_fiemap,
1060 const struct inode_operations cifs_symlink_inode_ops = {
1061 .get_link = cifs_get_link,
1062 .permission = cifs_permission,
1063 .listxattr = cifs_listxattr,
1066 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1067 struct file *dst_file, loff_t destoff, loff_t len,
1068 unsigned int remap_flags)
1070 struct inode *src_inode = file_inode(src_file);
1071 struct inode *target_inode = file_inode(dst_file);
1072 struct cifsFileInfo *smb_file_src = src_file->private_data;
1073 struct cifsFileInfo *smb_file_target;
1074 struct cifs_tcon *target_tcon;
1078 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1081 cifs_dbg(FYI, "clone range\n");
1085 if (!src_file->private_data || !dst_file->private_data) {
1087 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1091 smb_file_target = dst_file->private_data;
1092 target_tcon = tlink_tcon(smb_file_target->tlink);
1095 * Note: cifs case is easier than btrfs since server responsible for
1096 * checks for proper open modes and file type and if it wants
1097 * server could even support copy of range where source = target
1099 lock_two_nondirectories(target_inode, src_inode);
1102 len = src_inode->i_size - off;
1104 cifs_dbg(FYI, "about to flush pages\n");
1105 /* should we flush first and last page first */
1106 truncate_inode_pages_range(&target_inode->i_data, destoff,
1107 PAGE_ALIGN(destoff + len)-1);
1109 if (target_tcon->ses->server->ops->duplicate_extents)
1110 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1111 smb_file_src, smb_file_target, off, len, destoff);
1115 /* force revalidate of size and timestamps of target file now
1116 that target is updated on the server */
1117 CIFS_I(target_inode)->time = 0;
1118 /* although unlocking in the reverse order from locking is not
1119 strictly necessary here it is a little cleaner to be consistent */
1120 unlock_two_nondirectories(src_inode, target_inode);
1123 return rc < 0 ? rc : len;
1126 ssize_t cifs_file_copychunk_range(unsigned int xid,
1127 struct file *src_file, loff_t off,
1128 struct file *dst_file, loff_t destoff,
1129 size_t len, unsigned int flags)
1131 struct inode *src_inode = file_inode(src_file);
1132 struct inode *target_inode = file_inode(dst_file);
1133 struct cifsFileInfo *smb_file_src;
1134 struct cifsFileInfo *smb_file_target;
1135 struct cifs_tcon *src_tcon;
1136 struct cifs_tcon *target_tcon;
1139 cifs_dbg(FYI, "copychunk range\n");
1141 if (!src_file->private_data || !dst_file->private_data) {
1143 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1148 smb_file_target = dst_file->private_data;
1149 smb_file_src = src_file->private_data;
1150 src_tcon = tlink_tcon(smb_file_src->tlink);
1151 target_tcon = tlink_tcon(smb_file_target->tlink);
1153 if (src_tcon->ses != target_tcon->ses) {
1154 cifs_dbg(VFS, "source and target of copy not on same server\n");
1159 if (!target_tcon->ses->server->ops->copychunk_range)
1163 * Note: cifs case is easier than btrfs since server responsible for
1164 * checks for proper open modes and file type and if it wants
1165 * server could even support copy of range where source = target
1167 lock_two_nondirectories(target_inode, src_inode);
1169 cifs_dbg(FYI, "about to flush pages\n");
1170 /* should we flush first and last page first */
1171 truncate_inode_pages(&target_inode->i_data, 0);
1173 rc = file_modified(dst_file);
1175 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1176 smb_file_src, smb_file_target, off, len, destoff);
1178 file_accessed(src_file);
1180 /* force revalidate of size and timestamps of target file now
1181 * that target is updated on the server
1183 CIFS_I(target_inode)->time = 0;
1184 /* although unlocking in the reverse order from locking is not
1185 * strictly necessary here it is a little cleaner to be consistent
1187 unlock_two_nondirectories(src_inode, target_inode);
1194 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1195 * is a dummy operation.
1197 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1199 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1205 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1206 struct file *dst_file, loff_t destoff,
1207 size_t len, unsigned int flags)
1209 unsigned int xid = get_xid();
1211 struct cifsFileInfo *cfile = dst_file->private_data;
1213 if (cfile->swapfile)
1216 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1220 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1221 rc = generic_copy_file_range(src_file, off, dst_file,
1222 destoff, len, flags);
1226 const struct file_operations cifs_file_ops = {
1227 .read_iter = cifs_loose_read_iter,
1228 .write_iter = cifs_file_write_iter,
1230 .release = cifs_close,
1232 .flock = cifs_flock,
1233 .fsync = cifs_fsync,
1234 .flush = cifs_flush,
1235 .mmap = cifs_file_mmap,
1236 .splice_read = generic_file_splice_read,
1237 .splice_write = iter_file_splice_write,
1238 .llseek = cifs_llseek,
1239 .unlocked_ioctl = cifs_ioctl,
1240 .copy_file_range = cifs_copy_file_range,
1241 .remap_file_range = cifs_remap_file_range,
1242 .setlease = cifs_setlease,
1243 .fallocate = cifs_fallocate,
1246 const struct file_operations cifs_file_strict_ops = {
1247 .read_iter = cifs_strict_readv,
1248 .write_iter = cifs_strict_writev,
1250 .release = cifs_close,
1252 .flock = cifs_flock,
1253 .fsync = cifs_strict_fsync,
1254 .flush = cifs_flush,
1255 .mmap = cifs_file_strict_mmap,
1256 .splice_read = generic_file_splice_read,
1257 .splice_write = iter_file_splice_write,
1258 .llseek = cifs_llseek,
1259 .unlocked_ioctl = cifs_ioctl,
1260 .copy_file_range = cifs_copy_file_range,
1261 .remap_file_range = cifs_remap_file_range,
1262 .setlease = cifs_setlease,
1263 .fallocate = cifs_fallocate,
1266 const struct file_operations cifs_file_direct_ops = {
1267 .read_iter = cifs_direct_readv,
1268 .write_iter = cifs_direct_writev,
1270 .release = cifs_close,
1272 .flock = cifs_flock,
1273 .fsync = cifs_fsync,
1274 .flush = cifs_flush,
1275 .mmap = cifs_file_mmap,
1276 .splice_read = generic_file_splice_read,
1277 .splice_write = iter_file_splice_write,
1278 .unlocked_ioctl = cifs_ioctl,
1279 .copy_file_range = cifs_copy_file_range,
1280 .remap_file_range = cifs_remap_file_range,
1281 .llseek = cifs_llseek,
1282 .setlease = cifs_setlease,
1283 .fallocate = cifs_fallocate,
1286 const struct file_operations cifs_file_nobrl_ops = {
1287 .read_iter = cifs_loose_read_iter,
1288 .write_iter = cifs_file_write_iter,
1290 .release = cifs_close,
1291 .fsync = cifs_fsync,
1292 .flush = cifs_flush,
1293 .mmap = cifs_file_mmap,
1294 .splice_read = generic_file_splice_read,
1295 .splice_write = iter_file_splice_write,
1296 .llseek = cifs_llseek,
1297 .unlocked_ioctl = cifs_ioctl,
1298 .copy_file_range = cifs_copy_file_range,
1299 .remap_file_range = cifs_remap_file_range,
1300 .setlease = cifs_setlease,
1301 .fallocate = cifs_fallocate,
1304 const struct file_operations cifs_file_strict_nobrl_ops = {
1305 .read_iter = cifs_strict_readv,
1306 .write_iter = cifs_strict_writev,
1308 .release = cifs_close,
1309 .fsync = cifs_strict_fsync,
1310 .flush = cifs_flush,
1311 .mmap = cifs_file_strict_mmap,
1312 .splice_read = generic_file_splice_read,
1313 .splice_write = iter_file_splice_write,
1314 .llseek = cifs_llseek,
1315 .unlocked_ioctl = cifs_ioctl,
1316 .copy_file_range = cifs_copy_file_range,
1317 .remap_file_range = cifs_remap_file_range,
1318 .setlease = cifs_setlease,
1319 .fallocate = cifs_fallocate,
1322 const struct file_operations cifs_file_direct_nobrl_ops = {
1323 .read_iter = cifs_direct_readv,
1324 .write_iter = cifs_direct_writev,
1326 .release = cifs_close,
1327 .fsync = cifs_fsync,
1328 .flush = cifs_flush,
1329 .mmap = cifs_file_mmap,
1330 .splice_read = generic_file_splice_read,
1331 .splice_write = iter_file_splice_write,
1332 .unlocked_ioctl = cifs_ioctl,
1333 .copy_file_range = cifs_copy_file_range,
1334 .remap_file_range = cifs_remap_file_range,
1335 .llseek = cifs_llseek,
1336 .setlease = cifs_setlease,
1337 .fallocate = cifs_fallocate,
1340 const struct file_operations cifs_dir_ops = {
1341 .iterate_shared = cifs_readdir,
1342 .release = cifs_closedir,
1343 .read = generic_read_dir,
1344 .unlocked_ioctl = cifs_ioctl,
1345 .copy_file_range = cifs_copy_file_range,
1346 .remap_file_range = cifs_remap_file_range,
1347 .llseek = generic_file_llseek,
1348 .fsync = cifs_dir_fsync,
1352 cifs_init_once(void *inode)
1354 struct cifsInodeInfo *cifsi = inode;
1356 inode_init_once(&cifsi->vfs_inode);
1357 init_rwsem(&cifsi->lock_sem);
1361 cifs_init_inodecache(void)
1363 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1364 sizeof(struct cifsInodeInfo),
1365 0, (SLAB_RECLAIM_ACCOUNT|
1366 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1368 if (cifs_inode_cachep == NULL)
1375 cifs_destroy_inodecache(void)
1378 * Make sure all delayed rcu free inodes are flushed before we
1382 kmem_cache_destroy(cifs_inode_cachep);
1386 cifs_init_request_bufs(void)
1389 * SMB2 maximum header size is bigger than CIFS one - no problems to
1390 * allocate some more bytes for CIFS.
1392 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1394 if (CIFSMaxBufSize < 8192) {
1395 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1396 Unicode path name has to fit in any SMB/CIFS path based frames */
1397 CIFSMaxBufSize = 8192;
1398 } else if (CIFSMaxBufSize > 1024*127) {
1399 CIFSMaxBufSize = 1024 * 127;
1401 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1404 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1405 CIFSMaxBufSize, CIFSMaxBufSize);
1407 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1408 CIFSMaxBufSize + max_hdr_size, 0,
1409 SLAB_HWCACHE_ALIGN, 0,
1410 CIFSMaxBufSize + max_hdr_size,
1412 if (cifs_req_cachep == NULL)
1415 if (cifs_min_rcv < 1)
1417 else if (cifs_min_rcv > 64) {
1419 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1422 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1425 if (cifs_req_poolp == NULL) {
1426 kmem_cache_destroy(cifs_req_cachep);
1429 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1430 almost all handle based requests (but not write response, nor is it
1431 sufficient for path based requests). A smaller size would have
1432 been more efficient (compacting multiple slab items on one 4k page)
1433 for the case in which debug was on, but this larger size allows
1434 more SMBs to use small buffer alloc and is still much more
1435 efficient to alloc 1 per page off the slab compared to 17K (5page)
1436 alloc of large cifs buffers even when page debugging is on */
1437 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1438 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1439 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1440 if (cifs_sm_req_cachep == NULL) {
1441 mempool_destroy(cifs_req_poolp);
1442 kmem_cache_destroy(cifs_req_cachep);
1446 if (cifs_min_small < 2)
1448 else if (cifs_min_small > 256) {
1449 cifs_min_small = 256;
1450 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1453 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1454 cifs_sm_req_cachep);
1456 if (cifs_sm_req_poolp == NULL) {
1457 mempool_destroy(cifs_req_poolp);
1458 kmem_cache_destroy(cifs_req_cachep);
1459 kmem_cache_destroy(cifs_sm_req_cachep);
1467 cifs_destroy_request_bufs(void)
1469 mempool_destroy(cifs_req_poolp);
1470 kmem_cache_destroy(cifs_req_cachep);
1471 mempool_destroy(cifs_sm_req_poolp);
1472 kmem_cache_destroy(cifs_sm_req_cachep);
1476 cifs_init_mids(void)
1478 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1479 sizeof(struct mid_q_entry), 0,
1480 SLAB_HWCACHE_ALIGN, NULL);
1481 if (cifs_mid_cachep == NULL)
1484 /* 3 is a reasonable minimum number of simultaneous operations */
1485 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1486 if (cifs_mid_poolp == NULL) {
1487 kmem_cache_destroy(cifs_mid_cachep);
1495 cifs_destroy_mids(void)
1497 mempool_destroy(cifs_mid_poolp);
1498 kmem_cache_destroy(cifs_mid_cachep);
1506 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1507 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1508 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1509 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1510 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1512 * Initialize Global counters
1514 atomic_set(&sesInfoAllocCount, 0);
1515 atomic_set(&tconInfoAllocCount, 0);
1516 atomic_set(&tcpSesAllocCount, 0);
1517 atomic_set(&tcpSesReconnectCount, 0);
1518 atomic_set(&tconInfoReconnectCount, 0);
1520 atomic_set(&bufAllocCount, 0);
1521 atomic_set(&smBufAllocCount, 0);
1522 #ifdef CONFIG_CIFS_STATS2
1523 atomic_set(&totBufAllocCount, 0);
1524 atomic_set(&totSmBufAllocCount, 0);
1525 if (slow_rsp_threshold < 1)
1526 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1527 else if (slow_rsp_threshold > 32767)
1529 "slow response threshold set higher than recommended (0 to 32767)\n");
1530 #endif /* CONFIG_CIFS_STATS2 */
1532 atomic_set(&midCount, 0);
1533 GlobalCurrentXid = 0;
1534 GlobalTotalActiveXid = 0;
1535 GlobalMaxActiveXid = 0;
1536 spin_lock_init(&cifs_tcp_ses_lock);
1537 spin_lock_init(&GlobalMid_Lock);
1539 cifs_lock_secret = get_random_u32();
1541 if (cifs_max_pending < 2) {
1542 cifs_max_pending = 2;
1543 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1544 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1545 cifs_max_pending = CIFS_MAX_REQ;
1546 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1550 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1553 goto out_clean_proc;
1557 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1558 * so that we don't launch too many worker threads but
1559 * Documentation/core-api/workqueue.rst recommends setting it to 0
1562 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1563 decrypt_wq = alloc_workqueue("smb3decryptd",
1564 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1567 goto out_destroy_cifsiod_wq;
1570 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1571 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1572 if (!fileinfo_put_wq) {
1574 goto out_destroy_decrypt_wq;
1577 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1578 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1579 if (!cifsoplockd_wq) {
1581 goto out_destroy_fileinfo_put_wq;
1584 rc = cifs_fscache_register();
1586 goto out_destroy_cifsoplockd_wq;
1588 rc = cifs_init_inodecache();
1590 goto out_unreg_fscache;
1592 rc = cifs_init_mids();
1594 goto out_destroy_inodecache;
1596 rc = cifs_init_request_bufs();
1598 goto out_destroy_mids;
1600 #ifdef CONFIG_CIFS_DFS_UPCALL
1601 rc = dfs_cache_init();
1603 goto out_destroy_request_bufs;
1604 #endif /* CONFIG_CIFS_DFS_UPCALL */
1605 #ifdef CONFIG_CIFS_UPCALL
1606 rc = init_cifs_spnego();
1608 goto out_destroy_dfs_cache;
1609 #endif /* CONFIG_CIFS_UPCALL */
1611 rc = init_cifs_idmap();
1613 goto out_register_key_type;
1615 rc = register_filesystem(&cifs_fs_type);
1617 goto out_init_cifs_idmap;
1619 rc = register_filesystem(&smb3_fs_type);
1621 unregister_filesystem(&cifs_fs_type);
1622 goto out_init_cifs_idmap;
1627 out_init_cifs_idmap:
1629 out_register_key_type:
1630 #ifdef CONFIG_CIFS_UPCALL
1632 out_destroy_dfs_cache:
1634 #ifdef CONFIG_CIFS_DFS_UPCALL
1635 dfs_cache_destroy();
1636 out_destroy_request_bufs:
1638 cifs_destroy_request_bufs();
1640 cifs_destroy_mids();
1641 out_destroy_inodecache:
1642 cifs_destroy_inodecache();
1644 cifs_fscache_unregister();
1645 out_destroy_cifsoplockd_wq:
1646 destroy_workqueue(cifsoplockd_wq);
1647 out_destroy_fileinfo_put_wq:
1648 destroy_workqueue(fileinfo_put_wq);
1649 out_destroy_decrypt_wq:
1650 destroy_workqueue(decrypt_wq);
1651 out_destroy_cifsiod_wq:
1652 destroy_workqueue(cifsiod_wq);
1661 cifs_dbg(NOISY, "exit_smb3\n");
1662 unregister_filesystem(&cifs_fs_type);
1663 unregister_filesystem(&smb3_fs_type);
1664 cifs_dfs_release_automount_timer();
1666 #ifdef CONFIG_CIFS_UPCALL
1669 #ifdef CONFIG_CIFS_DFS_UPCALL
1670 dfs_cache_destroy();
1672 cifs_destroy_request_bufs();
1673 cifs_destroy_mids();
1674 cifs_destroy_inodecache();
1675 cifs_fscache_unregister();
1676 destroy_workqueue(cifsoplockd_wq);
1677 destroy_workqueue(decrypt_wq);
1678 destroy_workqueue(fileinfo_put_wq);
1679 destroy_workqueue(cifsiod_wq);
1683 MODULE_AUTHOR("Steve French");
1684 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1686 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1687 "also older servers complying with the SNIA CIFS Specification)");
1688 MODULE_VERSION(CIFS_VERSION);
1689 MODULE_SOFTDEP("ecb");
1690 MODULE_SOFTDEP("hmac");
1691 MODULE_SOFTDEP("md4");
1692 MODULE_SOFTDEP("md5");
1693 MODULE_SOFTDEP("nls");
1694 MODULE_SOFTDEP("aes");
1695 MODULE_SOFTDEP("cmac");
1696 MODULE_SOFTDEP("sha256");
1697 MODULE_SOFTDEP("sha512");
1698 MODULE_SOFTDEP("aead2");
1699 MODULE_SOFTDEP("ccm");
1700 MODULE_SOFTDEP("gcm");
1701 module_init(init_cifs)
1702 module_exit(exit_cifs)