4 * Copyright (C) International Business Machines Corp., 2002,2008
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/uuid.h>
41 #include <linux/xattr.h>
45 #define DECLARE_GLOBALS_HERE
47 #include "cifsproto.h"
48 #include "cifs_debug.h"
49 #include "cifs_fs_sb.h"
51 #include <linux/key-type.h>
52 #include "cifs_spnego.h"
55 #ifdef CONFIG_CIFS_DFS_UPCALL
56 #include "dfs_cache.h"
60 * DOS dates from 1980/1/1 through 2107/12/31
61 * Protocol specifications indicate the range should be to 119, which
62 * limits maximum year to 2099. But this range has not been checked.
64 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
65 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
66 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
70 bool enable_oplocks = true;
71 bool linuxExtEnabled = true;
72 bool lookupCacheEnabled = true;
73 bool disable_legacy_dialects; /* false by default */
74 unsigned int global_secflags = CIFSSEC_DEF;
75 /* unsigned int ntlmv2_support = 0; */
76 unsigned int sign_CIFS_PDUs = 1;
77 static const struct super_operations cifs_super_ops;
78 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
79 module_param(CIFSMaxBufSize, uint, 0444);
80 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
82 "Default: 16384 Range: 8192 to 130048");
83 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
84 module_param(cifs_min_rcv, uint, 0444);
85 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
87 unsigned int cifs_min_small = 30;
88 module_param(cifs_min_small, uint, 0444);
89 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
91 unsigned int cifs_max_pending = CIFS_MAX_REQ;
92 module_param(cifs_max_pending, uint, 0444);
93 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
94 "CIFS/SMB1 dialect (N/A for SMB3) "
95 "Default: 32767 Range: 2 to 32767.");
96 #ifdef CONFIG_CIFS_STATS2
97 unsigned int slow_rsp_threshold = 1;
98 module_param(slow_rsp_threshold, uint, 0644);
99 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
100 "before logging that a response is delayed. "
101 "Default: 1 (if set to 0 disables msg).");
104 module_param(enable_oplocks, bool, 0644);
105 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
107 module_param(disable_legacy_dialects, bool, 0644);
108 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
109 "helpful to restrict the ability to "
110 "override the default dialects (SMB2.1, "
111 "SMB3 and SMB3.02) on mount with old "
112 "dialects (CIFS/SMB1 and SMB2) since "
113 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
114 " and less secure. Default: n/N/0");
116 extern mempool_t *cifs_sm_req_poolp;
117 extern mempool_t *cifs_req_poolp;
118 extern mempool_t *cifs_mid_poolp;
120 struct workqueue_struct *cifsiod_wq;
121 struct workqueue_struct *decrypt_wq;
122 struct workqueue_struct *fileinfo_put_wq;
123 struct workqueue_struct *cifsoplockd_wq;
124 __u32 cifs_lock_secret;
127 * Bumps refcount for cifs super block.
128 * Note that it should be only called if a referece to VFS super block is
129 * already held, e.g. in open-type syscalls context. Otherwise it can race with
130 * atomic_dec_and_test in deactivate_locked_super.
133 cifs_sb_active(struct super_block *sb)
135 struct cifs_sb_info *server = CIFS_SB(sb);
137 if (atomic_inc_return(&server->active) == 1)
138 atomic_inc(&sb->s_active);
142 cifs_sb_deactive(struct super_block *sb)
144 struct cifs_sb_info *server = CIFS_SB(sb);
146 if (atomic_dec_and_test(&server->active))
147 deactivate_super(sb);
151 cifs_read_super(struct super_block *sb)
154 struct cifs_sb_info *cifs_sb;
155 struct cifs_tcon *tcon;
156 struct timespec64 ts;
159 cifs_sb = CIFS_SB(sb);
160 tcon = cifs_sb_master_tcon(cifs_sb);
162 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
163 sb->s_flags |= SB_POSIXACL;
165 if (tcon->snapshot_time)
166 sb->s_flags |= SB_RDONLY;
168 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
169 sb->s_maxbytes = MAX_LFS_FILESIZE;
171 sb->s_maxbytes = MAX_NON_LFS;
174 * Some very old servers like DOS and OS/2 used 2 second granularity
175 * (while all current servers use 100ns granularity - see MS-DTYP)
176 * but 1 second is the maximum allowed granularity for the VFS
177 * so for old servers set time granularity to 1 second while for
178 * everything else (current servers) set it to 100ns.
180 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
181 ((tcon->ses->capabilities &
182 tcon->ses->server->vals->cap_nt_find) == 0) &&
184 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
185 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
186 sb->s_time_min = ts.tv_sec;
187 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
188 cpu_to_le16(SMB_TIME_MAX), 0);
189 sb->s_time_max = ts.tv_sec;
192 * Almost every server, including all SMB2+, uses DCE TIME
193 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
195 sb->s_time_gran = 100;
196 ts = cifs_NTtimeToUnix(0);
197 sb->s_time_min = ts.tv_sec;
198 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
199 sb->s_time_max = ts.tv_sec;
202 sb->s_magic = CIFS_MAGIC_NUMBER;
203 sb->s_op = &cifs_super_ops;
204 sb->s_xattr = cifs_xattr_handlers;
205 rc = super_setup_bdi(sb);
208 /* tune readahead according to rsize */
209 sb->s_bdi->ra_pages = cifs_sb->rsize / PAGE_SIZE;
211 sb->s_blocksize = CIFS_MAX_MSGSIZE;
212 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
213 inode = cifs_root_iget(sb);
221 sb->s_d_op = &cifs_ci_dentry_ops;
223 sb->s_d_op = &cifs_dentry_ops;
225 sb->s_root = d_make_root(inode);
231 #ifdef CONFIG_CIFS_NFSD_EXPORT
232 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
233 cifs_dbg(FYI, "export ops supported\n");
234 sb->s_export_op = &cifs_export_ops;
236 #endif /* CONFIG_CIFS_NFSD_EXPORT */
241 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
245 static void cifs_kill_sb(struct super_block *sb)
247 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
249 cifs_umount(cifs_sb);
253 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
255 struct super_block *sb = dentry->d_sb;
256 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
257 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
258 struct TCP_Server_Info *server = tcon->ses->server;
264 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
266 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
268 buf->f_namelen = PATH_MAX;
270 buf->f_fsid.val[0] = tcon->vol_serial_number;
271 /* are using part of create time for more randomness, see man statfs */
272 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
274 buf->f_files = 0; /* undefined */
275 buf->f_ffree = 0; /* unlimited */
277 if (server->ops->queryfs)
278 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
284 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
286 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
287 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
288 struct TCP_Server_Info *server = tcon->ses->server;
290 if (server->ops->fallocate)
291 return server->ops->fallocate(file, tcon, mode, off, len);
296 static int cifs_permission(struct inode *inode, int mask)
298 struct cifs_sb_info *cifs_sb;
300 cifs_sb = CIFS_SB(inode->i_sb);
302 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
303 if ((mask & MAY_EXEC) && !execute_ok(inode))
307 } else /* file mode might have been restricted at mount time
308 on the client (above and beyond ACL on servers) for
309 servers which do not support setting and viewing mode bits,
310 so allowing client to check permissions is useful */
311 return generic_permission(inode, mask);
314 static struct kmem_cache *cifs_inode_cachep;
315 static struct kmem_cache *cifs_req_cachep;
316 static struct kmem_cache *cifs_mid_cachep;
317 static struct kmem_cache *cifs_sm_req_cachep;
318 mempool_t *cifs_sm_req_poolp;
319 mempool_t *cifs_req_poolp;
320 mempool_t *cifs_mid_poolp;
322 static struct inode *
323 cifs_alloc_inode(struct super_block *sb)
325 struct cifsInodeInfo *cifs_inode;
326 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
329 cifs_inode->cifsAttrs = 0x20; /* default */
330 cifs_inode->time = 0;
332 * Until the file is open and we have gotten oplock info back from the
333 * server, can not assume caching of file data or metadata.
335 cifs_set_oplock_level(cifs_inode, 0);
336 cifs_inode->flags = 0;
337 spin_lock_init(&cifs_inode->writers_lock);
338 cifs_inode->writers = 0;
339 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
340 cifs_inode->server_eof = 0;
341 cifs_inode->uniqueid = 0;
342 cifs_inode->createtime = 0;
343 cifs_inode->epoch = 0;
344 spin_lock_init(&cifs_inode->open_file_lock);
345 generate_random_uuid(cifs_inode->lease_key);
348 * Can not set i_flags here - they get immediately overwritten to zero
351 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
352 INIT_LIST_HEAD(&cifs_inode->openFileList);
353 INIT_LIST_HEAD(&cifs_inode->llist);
354 return &cifs_inode->vfs_inode;
358 cifs_free_inode(struct inode *inode)
360 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
364 cifs_evict_inode(struct inode *inode)
366 truncate_inode_pages_final(&inode->i_data);
368 cifs_fscache_release_inode_cookie(inode);
372 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
374 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
375 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
377 seq_puts(s, ",addr=");
379 switch (server->dstaddr.ss_family) {
381 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
384 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
385 if (sa6->sin6_scope_id)
386 seq_printf(s, "%%%u", sa6->sin6_scope_id);
389 seq_puts(s, "(unknown)");
392 seq_puts(s, ",rdma");
396 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
398 if (ses->sectype == Unspecified) {
399 if (ses->user_name == NULL)
400 seq_puts(s, ",sec=none");
404 seq_puts(s, ",sec=");
406 switch (ses->sectype) {
408 seq_puts(s, "lanman");
411 seq_puts(s, "ntlmv2");
420 seq_puts(s, "ntlmssp");
423 /* shouldn't ever happen */
424 seq_puts(s, "unknown");
431 if (ses->sectype == Kerberos)
432 seq_printf(s, ",cruid=%u",
433 from_kuid_munged(&init_user_ns, ses->cred_uid));
437 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
439 seq_puts(s, ",cache=");
441 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
442 seq_puts(s, "strict");
443 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
445 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
446 seq_puts(s, "singleclient"); /* assume only one client access */
447 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
448 seq_puts(s, "ro"); /* read only caching assumed */
450 seq_puts(s, "loose");
454 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
456 struct nls_table *def;
458 /* Display iocharset= option if it's not default charset */
459 def = load_nls_default();
461 seq_printf(s, ",iocharset=%s", cur->charset);
466 * cifs_show_options() is for displaying mount options in /proc/mounts.
467 * Not all settable options are displayed but most of the important
471 cifs_show_options(struct seq_file *s, struct dentry *root)
473 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
474 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
475 struct sockaddr *srcaddr;
476 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
478 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
479 cifs_show_security(s, tcon->ses);
480 cifs_show_cache_flavor(s, cifs_sb);
483 seq_puts(s, ",nolease");
484 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
485 seq_puts(s, ",multiuser");
486 else if (tcon->ses->user_name)
487 seq_show_option(s, "username", tcon->ses->user_name);
489 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
490 seq_show_option(s, "domain", tcon->ses->domainName);
492 if (srcaddr->sa_family != AF_UNSPEC) {
493 struct sockaddr_in *saddr4;
494 struct sockaddr_in6 *saddr6;
495 saddr4 = (struct sockaddr_in *)srcaddr;
496 saddr6 = (struct sockaddr_in6 *)srcaddr;
497 if (srcaddr->sa_family == AF_INET6)
498 seq_printf(s, ",srcaddr=%pI6c",
500 else if (srcaddr->sa_family == AF_INET)
501 seq_printf(s, ",srcaddr=%pI4",
502 &saddr4->sin_addr.s_addr);
504 seq_printf(s, ",srcaddr=BAD-AF:%i",
505 (int)(srcaddr->sa_family));
508 seq_printf(s, ",uid=%u",
509 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
510 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
511 seq_puts(s, ",forceuid");
513 seq_puts(s, ",noforceuid");
515 seq_printf(s, ",gid=%u",
516 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
517 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
518 seq_puts(s, ",forcegid");
520 seq_puts(s, ",noforcegid");
522 cifs_show_address(s, tcon->ses->server);
525 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
526 cifs_sb->mnt_file_mode,
527 cifs_sb->mnt_dir_mode);
529 cifs_show_nls(s, cifs_sb->local_nls);
532 seq_puts(s, ",seal");
534 seq_puts(s, ",nocase");
535 if (tcon->local_lease)
536 seq_puts(s, ",locallease");
538 seq_puts(s, ",hard");
540 seq_puts(s, ",soft");
541 if (tcon->use_persistent)
542 seq_puts(s, ",persistenthandles");
543 else if (tcon->use_resilient)
544 seq_puts(s, ",resilienthandles");
545 if (tcon->posix_extensions)
546 seq_puts(s, ",posix");
547 else if (tcon->unix_ext)
548 seq_puts(s, ",unix");
550 seq_puts(s, ",nounix");
551 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
552 seq_puts(s, ",nodfs");
553 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
554 seq_puts(s, ",posixpaths");
555 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
556 seq_puts(s, ",setuids");
557 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
558 seq_puts(s, ",idsfromsid");
559 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
560 seq_puts(s, ",serverino");
561 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
562 seq_puts(s, ",rwpidforward");
563 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
564 seq_puts(s, ",forcemand");
565 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
566 seq_puts(s, ",nouser_xattr");
567 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
568 seq_puts(s, ",mapchars");
569 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
570 seq_puts(s, ",mapposix");
571 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
573 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
574 seq_puts(s, ",nobrl");
575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
576 seq_puts(s, ",nohandlecache");
577 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
578 seq_puts(s, ",modefromsid");
579 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
580 seq_puts(s, ",cifsacl");
581 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
582 seq_puts(s, ",dynperm");
583 if (root->d_sb->s_flags & SB_POSIXACL)
585 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
586 seq_puts(s, ",mfsymlinks");
587 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
589 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
590 seq_puts(s, ",nostrictsync");
591 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
592 seq_puts(s, ",noperm");
593 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
594 seq_printf(s, ",backupuid=%u",
595 from_kuid_munged(&init_user_ns,
596 cifs_sb->mnt_backupuid));
597 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
598 seq_printf(s, ",backupgid=%u",
599 from_kgid_munged(&init_user_ns,
600 cifs_sb->mnt_backupgid));
602 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
603 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
604 seq_printf(s, ",bsize=%u", cifs_sb->bsize);
605 if (tcon->ses->server->min_offload)
606 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
607 seq_printf(s, ",echo_interval=%lu",
608 tcon->ses->server->echo_interval / HZ);
610 /* Only display max_credits if it was overridden on mount */
611 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
612 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
614 if (tcon->snapshot_time)
615 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
616 if (tcon->handle_timeout)
617 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
618 /* convert actimeo and display it in seconds */
619 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
621 if (tcon->ses->chan_max > 1)
622 seq_printf(s, ",multichannel,max_channel=%zu",
623 tcon->ses->chan_max);
628 static void cifs_umount_begin(struct super_block *sb)
630 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
631 struct cifs_tcon *tcon;
636 tcon = cifs_sb_master_tcon(cifs_sb);
638 spin_lock(&cifs_tcp_ses_lock);
639 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
640 /* we have other mounts to same share or we have
641 already tried to force umount this and woken up
642 all waiting network requests, nothing to do */
643 spin_unlock(&cifs_tcp_ses_lock);
645 } else if (tcon->tc_count == 1)
646 tcon->tidStatus = CifsExiting;
647 spin_unlock(&cifs_tcp_ses_lock);
649 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
650 /* cancel_notify_requests(tcon); */
651 if (tcon->ses && tcon->ses->server) {
652 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
653 wake_up_all(&tcon->ses->server->request_q);
654 wake_up_all(&tcon->ses->server->response_q);
655 msleep(1); /* yield */
656 /* we have to kick the requests once more */
657 wake_up_all(&tcon->ses->server->response_q);
664 #ifdef CONFIG_CIFS_STATS2
665 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
672 static int cifs_remount(struct super_block *sb, int *flags, char *data)
675 *flags |= SB_NODIRATIME;
679 static int cifs_drop_inode(struct inode *inode)
681 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
683 /* no serverino => unconditional eviction */
684 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
685 generic_drop_inode(inode);
688 static const struct super_operations cifs_super_ops = {
689 .statfs = cifs_statfs,
690 .alloc_inode = cifs_alloc_inode,
691 .free_inode = cifs_free_inode,
692 .drop_inode = cifs_drop_inode,
693 .evict_inode = cifs_evict_inode,
694 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
695 function unless later we add lazy close of inodes or unless the
696 kernel forgets to call us with the same number of releases (closes)
698 .show_options = cifs_show_options,
699 .umount_begin = cifs_umount_begin,
700 .remount_fs = cifs_remount,
701 #ifdef CONFIG_CIFS_STATS2
702 .show_stats = cifs_show_stats,
707 * Get root dentry from superblock according to prefix path mount option.
708 * Return dentry with refcount + 1 on success and NULL otherwise.
710 static struct dentry *
711 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
713 struct dentry *dentry;
714 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
715 char *full_path = NULL;
719 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
720 return dget(sb->s_root);
722 full_path = cifs_build_path_to_root(vol, cifs_sb,
723 cifs_sb_master_tcon(cifs_sb), 0);
724 if (full_path == NULL)
725 return ERR_PTR(-ENOMEM);
727 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
729 sep = CIFS_DIR_SEP(cifs_sb);
730 dentry = dget(sb->s_root);
734 struct inode *dir = d_inode(dentry);
735 struct dentry *child;
737 if (!S_ISDIR(dir->i_mode)) {
739 dentry = ERR_PTR(-ENOTDIR);
743 /* skip separators */
750 while (*s && *s != sep)
753 child = lookup_positive_unlocked(p, dentry, s - p);
756 } while (!IS_ERR(dentry));
761 static int cifs_set_super(struct super_block *sb, void *data)
763 struct cifs_mnt_data *mnt_data = data;
764 sb->s_fs_info = mnt_data->cifs_sb;
765 return set_anon_super(sb, NULL);
768 static struct dentry *
769 cifs_smb3_do_mount(struct file_system_type *fs_type,
770 int flags, const char *dev_name, void *data, bool is_smb3)
773 struct super_block *sb;
774 struct cifs_sb_info *cifs_sb;
775 struct smb_vol *volume_info;
776 struct cifs_mnt_data mnt_data;
780 * Prints in Kernel / CIFS log the attempted mount operation
781 * If CIFS_DEBUG && cifs_FYI
784 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
786 cifs_info("Attempting to mount %s\n", dev_name);
788 volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
789 if (IS_ERR(volume_info))
790 return ERR_CAST(volume_info);
792 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
793 if (cifs_sb == NULL) {
794 root = ERR_PTR(-ENOMEM);
798 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
799 if (cifs_sb->mountdata == NULL) {
800 root = ERR_PTR(-ENOMEM);
804 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
810 rc = cifs_mount(cifs_sb, volume_info);
812 if (!(flags & SB_SILENT))
813 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
819 mnt_data.vol = volume_info;
820 mnt_data.cifs_sb = cifs_sb;
821 mnt_data.flags = flags;
823 /* BB should we make this contingent on mount parm? */
824 flags |= SB_NODIRATIME | SB_NOATIME;
826 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
829 cifs_umount(cifs_sb);
834 cifs_dbg(FYI, "Use existing superblock\n");
835 cifs_umount(cifs_sb);
837 rc = cifs_read_super(sb);
843 sb->s_flags |= SB_ACTIVE;
846 root = cifs_get_root(volume_info, sb);
850 cifs_dbg(FYI, "dentry root is: %p\n", root);
854 deactivate_locked_super(sb);
856 cifs_cleanup_volume_info(volume_info);
860 kfree(cifs_sb->prepath);
861 kfree(cifs_sb->mountdata);
864 unload_nls(volume_info->local_nls);
868 static struct dentry *
869 smb3_do_mount(struct file_system_type *fs_type,
870 int flags, const char *dev_name, void *data)
872 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, true);
875 static struct dentry *
876 cifs_do_mount(struct file_system_type *fs_type,
877 int flags, const char *dev_name, void *data)
879 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, false);
883 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
886 struct inode *inode = file_inode(iocb->ki_filp);
888 if (iocb->ki_filp->f_flags & O_DIRECT)
889 return cifs_user_readv(iocb, iter);
891 rc = cifs_revalidate_mapping(inode);
895 return generic_file_read_iter(iocb, iter);
898 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
900 struct inode *inode = file_inode(iocb->ki_filp);
901 struct cifsInodeInfo *cinode = CIFS_I(inode);
905 if (iocb->ki_filp->f_flags & O_DIRECT) {
906 written = cifs_user_writev(iocb, from);
907 if (written > 0 && CIFS_CACHE_READ(cinode)) {
908 cifs_zap_mapping(inode);
910 "Set no oplock for inode=%p after a write operation\n",
917 written = cifs_get_writer(cinode);
921 written = generic_file_write_iter(iocb, from);
923 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
926 rc = filemap_fdatawrite(inode->i_mapping);
928 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
932 cifs_put_writer(cinode);
936 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
938 struct cifsFileInfo *cfile = file->private_data;
939 struct cifs_tcon *tcon;
942 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
943 * the cached file length
945 if (whence != SEEK_SET && whence != SEEK_CUR) {
947 struct inode *inode = file_inode(file);
950 * We need to be sure that all dirty pages are written and the
951 * server has the newest file length.
953 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
954 inode->i_mapping->nrpages != 0) {
955 rc = filemap_fdatawait(inode->i_mapping);
957 mapping_set_error(inode->i_mapping, rc);
962 * Some applications poll for the file length in this strange
963 * way so we must seek to end on non-oplocked files by
964 * setting the revalidate time to zero.
966 CIFS_I(inode)->time = 0;
968 rc = cifs_revalidate_file_attr(file);
972 if (cfile && cfile->tlink) {
973 tcon = tlink_tcon(cfile->tlink);
974 if (tcon->ses->server->ops->llseek)
975 return tcon->ses->server->ops->llseek(file, tcon,
978 return generic_file_llseek(file, offset, whence);
982 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
985 * Note that this is called by vfs setlease with i_lock held to
986 * protect *lease from going away.
988 struct inode *inode = file_inode(file);
989 struct cifsFileInfo *cfile = file->private_data;
991 if (!(S_ISREG(inode->i_mode)))
994 /* Check if file is oplocked if this is request for new lease */
995 if (arg == F_UNLCK ||
996 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
997 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
998 return generic_setlease(file, arg, lease, priv);
999 else if (tlink_tcon(cfile->tlink)->local_lease &&
1000 !CIFS_CACHE_READ(CIFS_I(inode)))
1002 * If the server claims to support oplock on this file, then we
1003 * still need to check oplock even if the local_lease mount
1004 * option is set, but there are servers which do not support
1005 * oplock for which this mount option may be useful if the user
1006 * knows that the file won't be changed on the server by anyone
1009 return generic_setlease(file, arg, lease, priv);
1014 struct file_system_type cifs_fs_type = {
1015 .owner = THIS_MODULE,
1017 .mount = cifs_do_mount,
1018 .kill_sb = cifs_kill_sb,
1021 MODULE_ALIAS_FS("cifs");
1023 static struct file_system_type smb3_fs_type = {
1024 .owner = THIS_MODULE,
1026 .mount = smb3_do_mount,
1027 .kill_sb = cifs_kill_sb,
1030 MODULE_ALIAS_FS("smb3");
1031 MODULE_ALIAS("smb3");
1033 const struct inode_operations cifs_dir_inode_ops = {
1034 .create = cifs_create,
1035 .atomic_open = cifs_atomic_open,
1036 .lookup = cifs_lookup,
1037 .getattr = cifs_getattr,
1038 .unlink = cifs_unlink,
1039 .link = cifs_hardlink,
1040 .mkdir = cifs_mkdir,
1041 .rmdir = cifs_rmdir,
1042 .rename = cifs_rename2,
1043 .permission = cifs_permission,
1044 .setattr = cifs_setattr,
1045 .symlink = cifs_symlink,
1046 .mknod = cifs_mknod,
1047 .listxattr = cifs_listxattr,
1050 const struct inode_operations cifs_file_inode_ops = {
1051 .setattr = cifs_setattr,
1052 .getattr = cifs_getattr,
1053 .permission = cifs_permission,
1054 .listxattr = cifs_listxattr,
1055 .fiemap = cifs_fiemap,
1058 const struct inode_operations cifs_symlink_inode_ops = {
1059 .get_link = cifs_get_link,
1060 .permission = cifs_permission,
1061 .listxattr = cifs_listxattr,
1064 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1065 struct file *dst_file, loff_t destoff, loff_t len,
1066 unsigned int remap_flags)
1068 struct inode *src_inode = file_inode(src_file);
1069 struct inode *target_inode = file_inode(dst_file);
1070 struct cifsFileInfo *smb_file_src = src_file->private_data;
1071 struct cifsFileInfo *smb_file_target;
1072 struct cifs_tcon *target_tcon;
1076 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1079 cifs_dbg(FYI, "clone range\n");
1083 if (!src_file->private_data || !dst_file->private_data) {
1085 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1089 smb_file_target = dst_file->private_data;
1090 target_tcon = tlink_tcon(smb_file_target->tlink);
1093 * Note: cifs case is easier than btrfs since server responsible for
1094 * checks for proper open modes and file type and if it wants
1095 * server could even support copy of range where source = target
1097 lock_two_nondirectories(target_inode, src_inode);
1100 len = src_inode->i_size - off;
1102 cifs_dbg(FYI, "about to flush pages\n");
1103 /* should we flush first and last page first */
1104 truncate_inode_pages_range(&target_inode->i_data, destoff,
1105 PAGE_ALIGN(destoff + len)-1);
1107 if (target_tcon->ses->server->ops->duplicate_extents)
1108 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1109 smb_file_src, smb_file_target, off, len, destoff);
1113 /* force revalidate of size and timestamps of target file now
1114 that target is updated on the server */
1115 CIFS_I(target_inode)->time = 0;
1116 /* although unlocking in the reverse order from locking is not
1117 strictly necessary here it is a little cleaner to be consistent */
1118 unlock_two_nondirectories(src_inode, target_inode);
1121 return rc < 0 ? rc : len;
1124 ssize_t cifs_file_copychunk_range(unsigned int xid,
1125 struct file *src_file, loff_t off,
1126 struct file *dst_file, loff_t destoff,
1127 size_t len, unsigned int flags)
1129 struct inode *src_inode = file_inode(src_file);
1130 struct inode *target_inode = file_inode(dst_file);
1131 struct cifsFileInfo *smb_file_src;
1132 struct cifsFileInfo *smb_file_target;
1133 struct cifs_tcon *src_tcon;
1134 struct cifs_tcon *target_tcon;
1137 cifs_dbg(FYI, "copychunk range\n");
1139 if (!src_file->private_data || !dst_file->private_data) {
1141 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1146 smb_file_target = dst_file->private_data;
1147 smb_file_src = src_file->private_data;
1148 src_tcon = tlink_tcon(smb_file_src->tlink);
1149 target_tcon = tlink_tcon(smb_file_target->tlink);
1151 if (src_tcon->ses != target_tcon->ses) {
1152 cifs_dbg(VFS, "source and target of copy not on same server\n");
1157 if (!target_tcon->ses->server->ops->copychunk_range)
1161 * Note: cifs case is easier than btrfs since server responsible for
1162 * checks for proper open modes and file type and if it wants
1163 * server could even support copy of range where source = target
1165 lock_two_nondirectories(target_inode, src_inode);
1167 cifs_dbg(FYI, "about to flush pages\n");
1168 /* should we flush first and last page first */
1169 truncate_inode_pages(&target_inode->i_data, 0);
1171 rc = file_modified(dst_file);
1173 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1174 smb_file_src, smb_file_target, off, len, destoff);
1176 file_accessed(src_file);
1178 /* force revalidate of size and timestamps of target file now
1179 * that target is updated on the server
1181 CIFS_I(target_inode)->time = 0;
1182 /* although unlocking in the reverse order from locking is not
1183 * strictly necessary here it is a little cleaner to be consistent
1185 unlock_two_nondirectories(src_inode, target_inode);
1192 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1193 * is a dummy operation.
1195 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1197 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1203 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1204 struct file *dst_file, loff_t destoff,
1205 size_t len, unsigned int flags)
1207 unsigned int xid = get_xid();
1210 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1214 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1215 rc = generic_copy_file_range(src_file, off, dst_file,
1216 destoff, len, flags);
1220 const struct file_operations cifs_file_ops = {
1221 .read_iter = cifs_loose_read_iter,
1222 .write_iter = cifs_file_write_iter,
1224 .release = cifs_close,
1226 .flock = cifs_flock,
1227 .fsync = cifs_fsync,
1228 .flush = cifs_flush,
1229 .mmap = cifs_file_mmap,
1230 .splice_read = generic_file_splice_read,
1231 .splice_write = iter_file_splice_write,
1232 .llseek = cifs_llseek,
1233 .unlocked_ioctl = cifs_ioctl,
1234 .copy_file_range = cifs_copy_file_range,
1235 .remap_file_range = cifs_remap_file_range,
1236 .setlease = cifs_setlease,
1237 .fallocate = cifs_fallocate,
1240 const struct file_operations cifs_file_strict_ops = {
1241 .read_iter = cifs_strict_readv,
1242 .write_iter = cifs_strict_writev,
1244 .release = cifs_close,
1246 .flock = cifs_flock,
1247 .fsync = cifs_strict_fsync,
1248 .flush = cifs_flush,
1249 .mmap = cifs_file_strict_mmap,
1250 .splice_read = generic_file_splice_read,
1251 .splice_write = iter_file_splice_write,
1252 .llseek = cifs_llseek,
1253 .unlocked_ioctl = cifs_ioctl,
1254 .copy_file_range = cifs_copy_file_range,
1255 .remap_file_range = cifs_remap_file_range,
1256 .setlease = cifs_setlease,
1257 .fallocate = cifs_fallocate,
1260 const struct file_operations cifs_file_direct_ops = {
1261 .read_iter = cifs_direct_readv,
1262 .write_iter = cifs_direct_writev,
1264 .release = cifs_close,
1266 .flock = cifs_flock,
1267 .fsync = cifs_fsync,
1268 .flush = cifs_flush,
1269 .mmap = cifs_file_mmap,
1270 .splice_read = generic_file_splice_read,
1271 .splice_write = iter_file_splice_write,
1272 .unlocked_ioctl = cifs_ioctl,
1273 .copy_file_range = cifs_copy_file_range,
1274 .remap_file_range = cifs_remap_file_range,
1275 .llseek = cifs_llseek,
1276 .setlease = cifs_setlease,
1277 .fallocate = cifs_fallocate,
1280 const struct file_operations cifs_file_nobrl_ops = {
1281 .read_iter = cifs_loose_read_iter,
1282 .write_iter = cifs_file_write_iter,
1284 .release = cifs_close,
1285 .fsync = cifs_fsync,
1286 .flush = cifs_flush,
1287 .mmap = cifs_file_mmap,
1288 .splice_read = generic_file_splice_read,
1289 .splice_write = iter_file_splice_write,
1290 .llseek = cifs_llseek,
1291 .unlocked_ioctl = cifs_ioctl,
1292 .copy_file_range = cifs_copy_file_range,
1293 .remap_file_range = cifs_remap_file_range,
1294 .setlease = cifs_setlease,
1295 .fallocate = cifs_fallocate,
1298 const struct file_operations cifs_file_strict_nobrl_ops = {
1299 .read_iter = cifs_strict_readv,
1300 .write_iter = cifs_strict_writev,
1302 .release = cifs_close,
1303 .fsync = cifs_strict_fsync,
1304 .flush = cifs_flush,
1305 .mmap = cifs_file_strict_mmap,
1306 .splice_read = generic_file_splice_read,
1307 .splice_write = iter_file_splice_write,
1308 .llseek = cifs_llseek,
1309 .unlocked_ioctl = cifs_ioctl,
1310 .copy_file_range = cifs_copy_file_range,
1311 .remap_file_range = cifs_remap_file_range,
1312 .setlease = cifs_setlease,
1313 .fallocate = cifs_fallocate,
1316 const struct file_operations cifs_file_direct_nobrl_ops = {
1317 .read_iter = cifs_direct_readv,
1318 .write_iter = cifs_direct_writev,
1320 .release = cifs_close,
1321 .fsync = cifs_fsync,
1322 .flush = cifs_flush,
1323 .mmap = cifs_file_mmap,
1324 .splice_read = generic_file_splice_read,
1325 .splice_write = iter_file_splice_write,
1326 .unlocked_ioctl = cifs_ioctl,
1327 .copy_file_range = cifs_copy_file_range,
1328 .remap_file_range = cifs_remap_file_range,
1329 .llseek = cifs_llseek,
1330 .setlease = cifs_setlease,
1331 .fallocate = cifs_fallocate,
1334 const struct file_operations cifs_dir_ops = {
1335 .iterate_shared = cifs_readdir,
1336 .release = cifs_closedir,
1337 .read = generic_read_dir,
1338 .unlocked_ioctl = cifs_ioctl,
1339 .copy_file_range = cifs_copy_file_range,
1340 .remap_file_range = cifs_remap_file_range,
1341 .llseek = generic_file_llseek,
1342 .fsync = cifs_dir_fsync,
1346 cifs_init_once(void *inode)
1348 struct cifsInodeInfo *cifsi = inode;
1350 inode_init_once(&cifsi->vfs_inode);
1351 init_rwsem(&cifsi->lock_sem);
1355 cifs_init_inodecache(void)
1357 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1358 sizeof(struct cifsInodeInfo),
1359 0, (SLAB_RECLAIM_ACCOUNT|
1360 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1362 if (cifs_inode_cachep == NULL)
1369 cifs_destroy_inodecache(void)
1372 * Make sure all delayed rcu free inodes are flushed before we
1376 kmem_cache_destroy(cifs_inode_cachep);
1380 cifs_init_request_bufs(void)
1383 * SMB2 maximum header size is bigger than CIFS one - no problems to
1384 * allocate some more bytes for CIFS.
1386 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1388 if (CIFSMaxBufSize < 8192) {
1389 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1390 Unicode path name has to fit in any SMB/CIFS path based frames */
1391 CIFSMaxBufSize = 8192;
1392 } else if (CIFSMaxBufSize > 1024*127) {
1393 CIFSMaxBufSize = 1024 * 127;
1395 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1398 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1399 CIFSMaxBufSize, CIFSMaxBufSize);
1401 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1402 CIFSMaxBufSize + max_hdr_size, 0,
1403 SLAB_HWCACHE_ALIGN, 0,
1404 CIFSMaxBufSize + max_hdr_size,
1406 if (cifs_req_cachep == NULL)
1409 if (cifs_min_rcv < 1)
1411 else if (cifs_min_rcv > 64) {
1413 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1416 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1419 if (cifs_req_poolp == NULL) {
1420 kmem_cache_destroy(cifs_req_cachep);
1423 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1424 almost all handle based requests (but not write response, nor is it
1425 sufficient for path based requests). A smaller size would have
1426 been more efficient (compacting multiple slab items on one 4k page)
1427 for the case in which debug was on, but this larger size allows
1428 more SMBs to use small buffer alloc and is still much more
1429 efficient to alloc 1 per page off the slab compared to 17K (5page)
1430 alloc of large cifs buffers even when page debugging is on */
1431 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1432 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1433 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1434 if (cifs_sm_req_cachep == NULL) {
1435 mempool_destroy(cifs_req_poolp);
1436 kmem_cache_destroy(cifs_req_cachep);
1440 if (cifs_min_small < 2)
1442 else if (cifs_min_small > 256) {
1443 cifs_min_small = 256;
1444 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1447 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1448 cifs_sm_req_cachep);
1450 if (cifs_sm_req_poolp == NULL) {
1451 mempool_destroy(cifs_req_poolp);
1452 kmem_cache_destroy(cifs_req_cachep);
1453 kmem_cache_destroy(cifs_sm_req_cachep);
1461 cifs_destroy_request_bufs(void)
1463 mempool_destroy(cifs_req_poolp);
1464 kmem_cache_destroy(cifs_req_cachep);
1465 mempool_destroy(cifs_sm_req_poolp);
1466 kmem_cache_destroy(cifs_sm_req_cachep);
1470 cifs_init_mids(void)
1472 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1473 sizeof(struct mid_q_entry), 0,
1474 SLAB_HWCACHE_ALIGN, NULL);
1475 if (cifs_mid_cachep == NULL)
1478 /* 3 is a reasonable minimum number of simultaneous operations */
1479 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1480 if (cifs_mid_poolp == NULL) {
1481 kmem_cache_destroy(cifs_mid_cachep);
1489 cifs_destroy_mids(void)
1491 mempool_destroy(cifs_mid_poolp);
1492 kmem_cache_destroy(cifs_mid_cachep);
1500 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1501 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1502 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1503 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1504 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1506 * Initialize Global counters
1508 atomic_set(&sesInfoAllocCount, 0);
1509 atomic_set(&tconInfoAllocCount, 0);
1510 atomic_set(&tcpSesAllocCount, 0);
1511 atomic_set(&tcpSesReconnectCount, 0);
1512 atomic_set(&tconInfoReconnectCount, 0);
1514 atomic_set(&bufAllocCount, 0);
1515 atomic_set(&smBufAllocCount, 0);
1516 #ifdef CONFIG_CIFS_STATS2
1517 atomic_set(&totBufAllocCount, 0);
1518 atomic_set(&totSmBufAllocCount, 0);
1519 if (slow_rsp_threshold < 1)
1520 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1521 else if (slow_rsp_threshold > 32767)
1523 "slow response threshold set higher than recommended (0 to 32767)\n");
1524 #endif /* CONFIG_CIFS_STATS2 */
1526 atomic_set(&midCount, 0);
1527 GlobalCurrentXid = 0;
1528 GlobalTotalActiveXid = 0;
1529 GlobalMaxActiveXid = 0;
1530 spin_lock_init(&cifs_tcp_ses_lock);
1531 spin_lock_init(&GlobalMid_Lock);
1533 cifs_lock_secret = get_random_u32();
1535 if (cifs_max_pending < 2) {
1536 cifs_max_pending = 2;
1537 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1538 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1539 cifs_max_pending = CIFS_MAX_REQ;
1540 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1544 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1547 goto out_clean_proc;
1551 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1552 * so that we don't launch too many worker threads but
1553 * Documentation/core-api/workqueue.rst recommends setting it to 0
1556 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1557 decrypt_wq = alloc_workqueue("smb3decryptd",
1558 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1561 goto out_destroy_cifsiod_wq;
1564 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1565 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1566 if (!fileinfo_put_wq) {
1568 goto out_destroy_decrypt_wq;
1571 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1572 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1573 if (!cifsoplockd_wq) {
1575 goto out_destroy_fileinfo_put_wq;
1578 rc = cifs_fscache_register();
1580 goto out_destroy_cifsoplockd_wq;
1582 rc = cifs_init_inodecache();
1584 goto out_unreg_fscache;
1586 rc = cifs_init_mids();
1588 goto out_destroy_inodecache;
1590 rc = cifs_init_request_bufs();
1592 goto out_destroy_mids;
1594 #ifdef CONFIG_CIFS_DFS_UPCALL
1595 rc = dfs_cache_init();
1597 goto out_destroy_request_bufs;
1598 #endif /* CONFIG_CIFS_DFS_UPCALL */
1599 #ifdef CONFIG_CIFS_UPCALL
1600 rc = init_cifs_spnego();
1602 goto out_destroy_dfs_cache;
1603 #endif /* CONFIG_CIFS_UPCALL */
1605 rc = init_cifs_idmap();
1607 goto out_register_key_type;
1609 rc = register_filesystem(&cifs_fs_type);
1611 goto out_init_cifs_idmap;
1613 rc = register_filesystem(&smb3_fs_type);
1615 unregister_filesystem(&cifs_fs_type);
1616 goto out_init_cifs_idmap;
1621 out_init_cifs_idmap:
1623 out_register_key_type:
1624 #ifdef CONFIG_CIFS_UPCALL
1626 out_destroy_dfs_cache:
1628 #ifdef CONFIG_CIFS_DFS_UPCALL
1629 dfs_cache_destroy();
1630 out_destroy_request_bufs:
1632 cifs_destroy_request_bufs();
1634 cifs_destroy_mids();
1635 out_destroy_inodecache:
1636 cifs_destroy_inodecache();
1638 cifs_fscache_unregister();
1639 out_destroy_cifsoplockd_wq:
1640 destroy_workqueue(cifsoplockd_wq);
1641 out_destroy_fileinfo_put_wq:
1642 destroy_workqueue(fileinfo_put_wq);
1643 out_destroy_decrypt_wq:
1644 destroy_workqueue(decrypt_wq);
1645 out_destroy_cifsiod_wq:
1646 destroy_workqueue(cifsiod_wq);
1655 cifs_dbg(NOISY, "exit_smb3\n");
1656 unregister_filesystem(&cifs_fs_type);
1657 unregister_filesystem(&smb3_fs_type);
1658 cifs_dfs_release_automount_timer();
1660 #ifdef CONFIG_CIFS_UPCALL
1663 #ifdef CONFIG_CIFS_DFS_UPCALL
1664 dfs_cache_destroy();
1666 cifs_destroy_request_bufs();
1667 cifs_destroy_mids();
1668 cifs_destroy_inodecache();
1669 cifs_fscache_unregister();
1670 destroy_workqueue(cifsoplockd_wq);
1671 destroy_workqueue(decrypt_wq);
1672 destroy_workqueue(fileinfo_put_wq);
1673 destroy_workqueue(cifsiod_wq);
1677 MODULE_AUTHOR("Steve French");
1678 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1680 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1681 "also older servers complying with the SNIA CIFS Specification)");
1682 MODULE_VERSION(CIFS_VERSION);
1683 MODULE_SOFTDEP("ecb");
1684 MODULE_SOFTDEP("hmac");
1685 MODULE_SOFTDEP("md4");
1686 MODULE_SOFTDEP("md5");
1687 MODULE_SOFTDEP("nls");
1688 MODULE_SOFTDEP("aes");
1689 MODULE_SOFTDEP("cmac");
1690 MODULE_SOFTDEP("sha256");
1691 MODULE_SOFTDEP("sha512");
1692 MODULE_SOFTDEP("aead2");
1693 MODULE_SOFTDEP("ccm");
1694 MODULE_SOFTDEP("gcm");
1695 module_init(init_cifs)
1696 module_exit(exit_cifs)