4 * Copyright (C) International Business Machines Corp., 2002,2008
7 * Common Internet FileSystem (CIFS) client
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 /* Note that BB means BUGBUG (ie something to fix eventually) */
26 #include <linux/module.h>
28 #include <linux/mount.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/seq_file.h>
33 #include <linux/vfs.h>
34 #include <linux/mempool.h>
35 #include <linux/delay.h>
36 #include <linux/kthread.h>
37 #include <linux/freezer.h>
38 #include <linux/namei.h>
39 #include <linux/random.h>
40 #include <linux/uuid.h>
41 #include <linux/xattr.h>
45 #define DECLARE_GLOBALS_HERE
47 #include "cifsproto.h"
48 #include "cifs_debug.h"
49 #include "cifs_fs_sb.h"
51 #include <linux/key-type.h>
52 #include "cifs_spnego.h"
55 #ifdef CONFIG_CIFS_DFS_UPCALL
56 #include "dfs_cache.h"
61 bool enable_oplocks = true;
62 bool linuxExtEnabled = true;
63 bool lookupCacheEnabled = true;
64 bool disable_legacy_dialects; /* false by default */
65 unsigned int global_secflags = CIFSSEC_DEF;
66 /* unsigned int ntlmv2_support = 0; */
67 unsigned int sign_CIFS_PDUs = 1;
68 static const struct super_operations cifs_super_ops;
69 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
70 module_param(CIFSMaxBufSize, uint, 0444);
71 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
73 "Default: 16384 Range: 8192 to 130048");
74 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
75 module_param(cifs_min_rcv, uint, 0444);
76 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
78 unsigned int cifs_min_small = 30;
79 module_param(cifs_min_small, uint, 0444);
80 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
82 unsigned int cifs_max_pending = CIFS_MAX_REQ;
83 module_param(cifs_max_pending, uint, 0444);
84 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
85 "CIFS/SMB1 dialect (N/A for SMB3) "
86 "Default: 32767 Range: 2 to 32767.");
87 #ifdef CONFIG_CIFS_STATS2
88 unsigned int slow_rsp_threshold = 1;
89 module_param(slow_rsp_threshold, uint, 0644);
90 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
91 "before logging that a response is delayed. "
92 "Default: 1 (if set to 0 disables msg).");
95 module_param(enable_oplocks, bool, 0644);
96 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
98 module_param(disable_legacy_dialects, bool, 0644);
99 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
100 "helpful to restrict the ability to "
101 "override the default dialects (SMB2.1, "
102 "SMB3 and SMB3.02) on mount with old "
103 "dialects (CIFS/SMB1 and SMB2) since "
104 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
105 " and less secure. Default: n/N/0");
107 extern mempool_t *cifs_sm_req_poolp;
108 extern mempool_t *cifs_req_poolp;
109 extern mempool_t *cifs_mid_poolp;
111 struct workqueue_struct *cifsiod_wq;
112 struct workqueue_struct *cifsoplockd_wq;
113 __u32 cifs_lock_secret;
116 * Bumps refcount for cifs super block.
117 * Note that it should be only called if a referece to VFS super block is
118 * already held, e.g. in open-type syscalls context. Otherwise it can race with
119 * atomic_dec_and_test in deactivate_locked_super.
122 cifs_sb_active(struct super_block *sb)
124 struct cifs_sb_info *server = CIFS_SB(sb);
126 if (atomic_inc_return(&server->active) == 1)
127 atomic_inc(&sb->s_active);
131 cifs_sb_deactive(struct super_block *sb)
133 struct cifs_sb_info *server = CIFS_SB(sb);
135 if (atomic_dec_and_test(&server->active))
136 deactivate_super(sb);
140 cifs_read_super(struct super_block *sb)
143 struct cifs_sb_info *cifs_sb;
144 struct cifs_tcon *tcon;
147 cifs_sb = CIFS_SB(sb);
148 tcon = cifs_sb_master_tcon(cifs_sb);
150 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
151 sb->s_flags |= SB_POSIXACL;
153 if (tcon->snapshot_time)
154 sb->s_flags |= SB_RDONLY;
156 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
157 sb->s_maxbytes = MAX_LFS_FILESIZE;
159 sb->s_maxbytes = MAX_NON_LFS;
161 /* BB FIXME fix time_gran to be larger for LANMAN sessions */
162 sb->s_time_gran = 100;
164 sb->s_magic = CIFS_MAGIC_NUMBER;
165 sb->s_op = &cifs_super_ops;
166 sb->s_xattr = cifs_xattr_handlers;
167 rc = super_setup_bdi(sb);
170 /* tune readahead according to rsize */
171 sb->s_bdi->ra_pages = cifs_sb->rsize / PAGE_SIZE;
173 sb->s_blocksize = CIFS_MAX_MSGSIZE;
174 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
175 inode = cifs_root_iget(sb);
183 sb->s_d_op = &cifs_ci_dentry_ops;
185 sb->s_d_op = &cifs_dentry_ops;
187 sb->s_root = d_make_root(inode);
193 #ifdef CONFIG_CIFS_NFSD_EXPORT
194 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
195 cifs_dbg(FYI, "export ops supported\n");
196 sb->s_export_op = &cifs_export_ops;
198 #endif /* CONFIG_CIFS_NFSD_EXPORT */
203 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
207 static void cifs_kill_sb(struct super_block *sb)
209 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
211 cifs_umount(cifs_sb);
215 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
217 struct super_block *sb = dentry->d_sb;
218 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
219 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
220 struct TCP_Server_Info *server = tcon->ses->server;
226 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
228 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
230 buf->f_namelen = PATH_MAX;
232 buf->f_fsid.val[0] = tcon->vol_serial_number;
233 /* are using part of create time for more randomness, see man statfs */
234 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
236 buf->f_files = 0; /* undefined */
237 buf->f_ffree = 0; /* unlimited */
239 if (server->ops->queryfs)
240 rc = server->ops->queryfs(xid, tcon, buf);
246 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
248 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
249 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
250 struct TCP_Server_Info *server = tcon->ses->server;
252 if (server->ops->fallocate)
253 return server->ops->fallocate(file, tcon, mode, off, len);
258 static int cifs_permission(struct inode *inode, int mask)
260 struct cifs_sb_info *cifs_sb;
262 cifs_sb = CIFS_SB(inode->i_sb);
264 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
265 if ((mask & MAY_EXEC) && !execute_ok(inode))
269 } else /* file mode might have been restricted at mount time
270 on the client (above and beyond ACL on servers) for
271 servers which do not support setting and viewing mode bits,
272 so allowing client to check permissions is useful */
273 return generic_permission(inode, mask);
276 static struct kmem_cache *cifs_inode_cachep;
277 static struct kmem_cache *cifs_req_cachep;
278 static struct kmem_cache *cifs_mid_cachep;
279 static struct kmem_cache *cifs_sm_req_cachep;
280 mempool_t *cifs_sm_req_poolp;
281 mempool_t *cifs_req_poolp;
282 mempool_t *cifs_mid_poolp;
284 static struct inode *
285 cifs_alloc_inode(struct super_block *sb)
287 struct cifsInodeInfo *cifs_inode;
288 cifs_inode = kmem_cache_alloc(cifs_inode_cachep, GFP_KERNEL);
291 cifs_inode->cifsAttrs = 0x20; /* default */
292 cifs_inode->time = 0;
294 * Until the file is open and we have gotten oplock info back from the
295 * server, can not assume caching of file data or metadata.
297 cifs_set_oplock_level(cifs_inode, 0);
298 cifs_inode->flags = 0;
299 spin_lock_init(&cifs_inode->writers_lock);
300 cifs_inode->writers = 0;
301 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
302 cifs_inode->server_eof = 0;
303 cifs_inode->uniqueid = 0;
304 cifs_inode->createtime = 0;
305 cifs_inode->epoch = 0;
306 generate_random_uuid(cifs_inode->lease_key);
309 * Can not set i_flags here - they get immediately overwritten to zero
312 /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */
313 INIT_LIST_HEAD(&cifs_inode->openFileList);
314 INIT_LIST_HEAD(&cifs_inode->llist);
315 return &cifs_inode->vfs_inode;
318 static void cifs_i_callback(struct rcu_head *head)
320 struct inode *inode = container_of(head, struct inode, i_rcu);
321 kmem_cache_free(cifs_inode_cachep, CIFS_I(inode));
325 cifs_destroy_inode(struct inode *inode)
327 call_rcu(&inode->i_rcu, cifs_i_callback);
331 cifs_evict_inode(struct inode *inode)
333 truncate_inode_pages_final(&inode->i_data);
335 cifs_fscache_release_inode_cookie(inode);
339 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
341 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
342 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
344 seq_puts(s, ",addr=");
346 switch (server->dstaddr.ss_family) {
348 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
351 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
352 if (sa6->sin6_scope_id)
353 seq_printf(s, "%%%u", sa6->sin6_scope_id);
356 seq_puts(s, "(unknown)");
359 seq_puts(s, ",rdma");
363 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
365 if (ses->sectype == Unspecified) {
366 if (ses->user_name == NULL)
367 seq_puts(s, ",sec=none");
371 seq_puts(s, ",sec=");
373 switch (ses->sectype) {
375 seq_puts(s, "lanman");
378 seq_puts(s, "ntlmv2");
384 seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
387 seq_puts(s, "ntlmssp");
390 /* shouldn't ever happen */
391 seq_puts(s, "unknown");
400 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
402 seq_puts(s, ",cache=");
404 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
405 seq_puts(s, "strict");
406 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
409 seq_puts(s, "loose");
413 cifs_show_nls(struct seq_file *s, struct nls_table *cur)
415 struct nls_table *def;
417 /* Display iocharset= option if it's not default charset */
418 def = load_nls_default();
420 seq_printf(s, ",iocharset=%s", cur->charset);
425 * cifs_show_options() is for displaying mount options in /proc/mounts.
426 * Not all settable options are displayed but most of the important
430 cifs_show_options(struct seq_file *s, struct dentry *root)
432 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
433 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
434 struct sockaddr *srcaddr;
435 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
437 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
438 cifs_show_security(s, tcon->ses);
439 cifs_show_cache_flavor(s, cifs_sb);
441 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
442 seq_puts(s, ",multiuser");
443 else if (tcon->ses->user_name)
444 seq_show_option(s, "username", tcon->ses->user_name);
446 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
447 seq_show_option(s, "domain", tcon->ses->domainName);
449 if (srcaddr->sa_family != AF_UNSPEC) {
450 struct sockaddr_in *saddr4;
451 struct sockaddr_in6 *saddr6;
452 saddr4 = (struct sockaddr_in *)srcaddr;
453 saddr6 = (struct sockaddr_in6 *)srcaddr;
454 if (srcaddr->sa_family == AF_INET6)
455 seq_printf(s, ",srcaddr=%pI6c",
457 else if (srcaddr->sa_family == AF_INET)
458 seq_printf(s, ",srcaddr=%pI4",
459 &saddr4->sin_addr.s_addr);
461 seq_printf(s, ",srcaddr=BAD-AF:%i",
462 (int)(srcaddr->sa_family));
465 seq_printf(s, ",uid=%u",
466 from_kuid_munged(&init_user_ns, cifs_sb->mnt_uid));
467 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
468 seq_puts(s, ",forceuid");
470 seq_puts(s, ",noforceuid");
472 seq_printf(s, ",gid=%u",
473 from_kgid_munged(&init_user_ns, cifs_sb->mnt_gid));
474 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
475 seq_puts(s, ",forcegid");
477 seq_puts(s, ",noforcegid");
479 cifs_show_address(s, tcon->ses->server);
482 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
483 cifs_sb->mnt_file_mode,
484 cifs_sb->mnt_dir_mode);
486 cifs_show_nls(s, cifs_sb->local_nls);
489 seq_puts(s, ",seal");
491 seq_puts(s, ",nocase");
493 seq_puts(s, ",hard");
495 seq_puts(s, ",soft");
496 if (tcon->use_persistent)
497 seq_puts(s, ",persistenthandles");
498 else if (tcon->use_resilient)
499 seq_puts(s, ",resilienthandles");
500 if (tcon->posix_extensions)
501 seq_puts(s, ",posix");
502 else if (tcon->unix_ext)
503 seq_puts(s, ",unix");
505 seq_puts(s, ",nounix");
506 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
507 seq_puts(s, ",nodfs");
508 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
509 seq_puts(s, ",posixpaths");
510 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
511 seq_puts(s, ",setuids");
512 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
513 seq_puts(s, ",idsfromsid");
514 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
515 seq_puts(s, ",serverino");
516 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
517 seq_puts(s, ",rwpidforward");
518 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
519 seq_puts(s, ",forcemand");
520 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
521 seq_puts(s, ",nouser_xattr");
522 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
523 seq_puts(s, ",mapchars");
524 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
525 seq_puts(s, ",mapposix");
526 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
528 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
529 seq_puts(s, ",nobrl");
530 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
531 seq_puts(s, ",nohandlecache");
532 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
533 seq_puts(s, ",cifsacl");
534 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
535 seq_puts(s, ",dynperm");
536 if (root->d_sb->s_flags & SB_POSIXACL)
538 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
539 seq_puts(s, ",mfsymlinks");
540 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
542 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
543 seq_puts(s, ",nostrictsync");
544 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
545 seq_puts(s, ",noperm");
546 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
547 seq_printf(s, ",backupuid=%u",
548 from_kuid_munged(&init_user_ns,
549 cifs_sb->mnt_backupuid));
550 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
551 seq_printf(s, ",backupgid=%u",
552 from_kgid_munged(&init_user_ns,
553 cifs_sb->mnt_backupgid));
555 seq_printf(s, ",rsize=%u", cifs_sb->rsize);
556 seq_printf(s, ",wsize=%u", cifs_sb->wsize);
557 seq_printf(s, ",bsize=%u", cifs_sb->bsize);
558 seq_printf(s, ",echo_interval=%lu",
559 tcon->ses->server->echo_interval / HZ);
560 if (tcon->snapshot_time)
561 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
562 if (tcon->handle_timeout)
563 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
564 /* convert actimeo and display it in seconds */
565 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
570 static void cifs_umount_begin(struct super_block *sb)
572 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
573 struct cifs_tcon *tcon;
578 tcon = cifs_sb_master_tcon(cifs_sb);
580 spin_lock(&cifs_tcp_ses_lock);
581 if ((tcon->tc_count > 1) || (tcon->tidStatus == CifsExiting)) {
582 /* we have other mounts to same share or we have
583 already tried to force umount this and woken up
584 all waiting network requests, nothing to do */
585 spin_unlock(&cifs_tcp_ses_lock);
587 } else if (tcon->tc_count == 1)
588 tcon->tidStatus = CifsExiting;
589 spin_unlock(&cifs_tcp_ses_lock);
591 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
592 /* cancel_notify_requests(tcon); */
593 if (tcon->ses && tcon->ses->server) {
594 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
595 wake_up_all(&tcon->ses->server->request_q);
596 wake_up_all(&tcon->ses->server->response_q);
597 msleep(1); /* yield */
598 /* we have to kick the requests once more */
599 wake_up_all(&tcon->ses->server->response_q);
606 #ifdef CONFIG_CIFS_STATS2
607 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
614 static int cifs_remount(struct super_block *sb, int *flags, char *data)
617 *flags |= SB_NODIRATIME;
621 static int cifs_drop_inode(struct inode *inode)
623 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
625 /* no serverino => unconditional eviction */
626 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
627 generic_drop_inode(inode);
630 static const struct super_operations cifs_super_ops = {
631 .statfs = cifs_statfs,
632 .alloc_inode = cifs_alloc_inode,
633 .destroy_inode = cifs_destroy_inode,
634 .drop_inode = cifs_drop_inode,
635 .evict_inode = cifs_evict_inode,
636 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
637 function unless later we add lazy close of inodes or unless the
638 kernel forgets to call us with the same number of releases (closes)
640 .show_options = cifs_show_options,
641 .umount_begin = cifs_umount_begin,
642 .remount_fs = cifs_remount,
643 #ifdef CONFIG_CIFS_STATS2
644 .show_stats = cifs_show_stats,
649 * Get root dentry from superblock according to prefix path mount option.
650 * Return dentry with refcount + 1 on success and NULL otherwise.
652 static struct dentry *
653 cifs_get_root(struct smb_vol *vol, struct super_block *sb)
655 struct dentry *dentry;
656 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
657 char *full_path = NULL;
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
662 return dget(sb->s_root);
664 full_path = cifs_build_path_to_root(vol, cifs_sb,
665 cifs_sb_master_tcon(cifs_sb), 0);
666 if (full_path == NULL)
667 return ERR_PTR(-ENOMEM);
669 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
671 sep = CIFS_DIR_SEP(cifs_sb);
672 dentry = dget(sb->s_root);
676 struct inode *dir = d_inode(dentry);
677 struct dentry *child;
681 dentry = ERR_PTR(-ENOENT);
684 if (!S_ISDIR(dir->i_mode)) {
686 dentry = ERR_PTR(-ENOTDIR);
690 /* skip separators */
697 while (*s && *s != sep)
700 child = lookup_one_len_unlocked(p, dentry, s - p);
703 } while (!IS_ERR(dentry));
708 static int cifs_set_super(struct super_block *sb, void *data)
710 struct cifs_mnt_data *mnt_data = data;
711 sb->s_fs_info = mnt_data->cifs_sb;
712 return set_anon_super(sb, NULL);
715 static struct dentry *
716 cifs_smb3_do_mount(struct file_system_type *fs_type,
717 int flags, const char *dev_name, void *data, bool is_smb3)
720 struct super_block *sb;
721 struct cifs_sb_info *cifs_sb;
722 struct smb_vol *volume_info;
723 struct cifs_mnt_data mnt_data;
727 * Prints in Kernel / CIFS log the attempted mount operation
728 * If CIFS_DEBUG && cifs_FYI
731 cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
733 cifs_info("Attempting to mount %s\n", dev_name);
735 volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
736 if (IS_ERR(volume_info))
737 return ERR_CAST(volume_info);
739 cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
740 if (cifs_sb == NULL) {
741 root = ERR_PTR(-ENOMEM);
745 cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
746 if (cifs_sb->mountdata == NULL) {
747 root = ERR_PTR(-ENOMEM);
751 rc = cifs_setup_cifs_sb(volume_info, cifs_sb);
757 rc = cifs_mount(cifs_sb, volume_info);
759 if (!(flags & SB_SILENT))
760 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
766 mnt_data.vol = volume_info;
767 mnt_data.cifs_sb = cifs_sb;
768 mnt_data.flags = flags;
770 /* BB should we make this contingent on mount parm? */
771 flags |= SB_NODIRATIME | SB_NOATIME;
773 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
776 cifs_umount(cifs_sb);
781 cifs_dbg(FYI, "Use existing superblock\n");
782 cifs_umount(cifs_sb);
784 rc = cifs_read_super(sb);
790 sb->s_flags |= SB_ACTIVE;
793 root = cifs_get_root(volume_info, sb);
797 cifs_dbg(FYI, "dentry root is: %p\n", root);
801 deactivate_locked_super(sb);
803 cifs_cleanup_volume_info(volume_info);
807 kfree(cifs_sb->prepath);
808 kfree(cifs_sb->mountdata);
811 unload_nls(volume_info->local_nls);
815 static struct dentry *
816 smb3_do_mount(struct file_system_type *fs_type,
817 int flags, const char *dev_name, void *data)
819 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, true);
822 static struct dentry *
823 cifs_do_mount(struct file_system_type *fs_type,
824 int flags, const char *dev_name, void *data)
826 return cifs_smb3_do_mount(fs_type, flags, dev_name, data, false);
830 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
833 struct inode *inode = file_inode(iocb->ki_filp);
835 if (iocb->ki_filp->f_flags & O_DIRECT)
836 return cifs_user_readv(iocb, iter);
838 rc = cifs_revalidate_mapping(inode);
842 return generic_file_read_iter(iocb, iter);
845 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
847 struct inode *inode = file_inode(iocb->ki_filp);
848 struct cifsInodeInfo *cinode = CIFS_I(inode);
852 if (iocb->ki_filp->f_flags & O_DIRECT) {
853 written = cifs_user_writev(iocb, from);
854 if (written > 0 && CIFS_CACHE_READ(cinode)) {
855 cifs_zap_mapping(inode);
857 "Set no oplock for inode=%p after a write operation\n",
864 written = cifs_get_writer(cinode);
868 written = generic_file_write_iter(iocb, from);
870 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
873 rc = filemap_fdatawrite(inode->i_mapping);
875 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
879 cifs_put_writer(cinode);
883 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
886 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
887 * the cached file length
889 if (whence != SEEK_SET && whence != SEEK_CUR) {
891 struct inode *inode = file_inode(file);
894 * We need to be sure that all dirty pages are written and the
895 * server has the newest file length.
897 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
898 inode->i_mapping->nrpages != 0) {
899 rc = filemap_fdatawait(inode->i_mapping);
901 mapping_set_error(inode->i_mapping, rc);
906 * Some applications poll for the file length in this strange
907 * way so we must seek to end on non-oplocked files by
908 * setting the revalidate time to zero.
910 CIFS_I(inode)->time = 0;
912 rc = cifs_revalidate_file_attr(file);
916 return generic_file_llseek(file, offset, whence);
920 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
923 * Note that this is called by vfs setlease with i_lock held to
924 * protect *lease from going away.
926 struct inode *inode = file_inode(file);
927 struct cifsFileInfo *cfile = file->private_data;
929 if (!(S_ISREG(inode->i_mode)))
932 /* Check if file is oplocked if this is request for new lease */
933 if (arg == F_UNLCK ||
934 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
935 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
936 return generic_setlease(file, arg, lease, priv);
937 else if (tlink_tcon(cfile->tlink)->local_lease &&
938 !CIFS_CACHE_READ(CIFS_I(inode)))
940 * If the server claims to support oplock on this file, then we
941 * still need to check oplock even if the local_lease mount
942 * option is set, but there are servers which do not support
943 * oplock for which this mount option may be useful if the user
944 * knows that the file won't be changed on the server by anyone
947 return generic_setlease(file, arg, lease, priv);
952 struct file_system_type cifs_fs_type = {
953 .owner = THIS_MODULE,
955 .mount = cifs_do_mount,
956 .kill_sb = cifs_kill_sb,
959 MODULE_ALIAS_FS("cifs");
961 static struct file_system_type smb3_fs_type = {
962 .owner = THIS_MODULE,
964 .mount = smb3_do_mount,
965 .kill_sb = cifs_kill_sb,
968 MODULE_ALIAS_FS("smb3");
969 MODULE_ALIAS("smb3");
971 const struct inode_operations cifs_dir_inode_ops = {
972 .create = cifs_create,
973 .atomic_open = cifs_atomic_open,
974 .lookup = cifs_lookup,
975 .getattr = cifs_getattr,
976 .unlink = cifs_unlink,
977 .link = cifs_hardlink,
980 .rename = cifs_rename2,
981 .permission = cifs_permission,
982 .setattr = cifs_setattr,
983 .symlink = cifs_symlink,
985 .listxattr = cifs_listxattr,
988 const struct inode_operations cifs_file_inode_ops = {
989 .setattr = cifs_setattr,
990 .getattr = cifs_getattr,
991 .permission = cifs_permission,
992 .listxattr = cifs_listxattr,
995 const struct inode_operations cifs_symlink_inode_ops = {
996 .get_link = cifs_get_link,
997 .permission = cifs_permission,
998 .listxattr = cifs_listxattr,
1001 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1002 struct file *dst_file, loff_t destoff, loff_t len,
1003 unsigned int remap_flags)
1005 struct inode *src_inode = file_inode(src_file);
1006 struct inode *target_inode = file_inode(dst_file);
1007 struct cifsFileInfo *smb_file_src = src_file->private_data;
1008 struct cifsFileInfo *smb_file_target;
1009 struct cifs_tcon *target_tcon;
1013 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1016 cifs_dbg(FYI, "clone range\n");
1020 if (!src_file->private_data || !dst_file->private_data) {
1022 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1026 smb_file_target = dst_file->private_data;
1027 target_tcon = tlink_tcon(smb_file_target->tlink);
1030 * Note: cifs case is easier than btrfs since server responsible for
1031 * checks for proper open modes and file type and if it wants
1032 * server could even support copy of range where source = target
1034 lock_two_nondirectories(target_inode, src_inode);
1037 len = src_inode->i_size - off;
1039 cifs_dbg(FYI, "about to flush pages\n");
1040 /* should we flush first and last page first */
1041 truncate_inode_pages_range(&target_inode->i_data, destoff,
1042 PAGE_ALIGN(destoff + len)-1);
1044 if (target_tcon->ses->server->ops->duplicate_extents)
1045 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1046 smb_file_src, smb_file_target, off, len, destoff);
1050 /* force revalidate of size and timestamps of target file now
1051 that target is updated on the server */
1052 CIFS_I(target_inode)->time = 0;
1053 /* although unlocking in the reverse order from locking is not
1054 strictly necessary here it is a little cleaner to be consistent */
1055 unlock_two_nondirectories(src_inode, target_inode);
1058 return rc < 0 ? rc : len;
1061 ssize_t cifs_file_copychunk_range(unsigned int xid,
1062 struct file *src_file, loff_t off,
1063 struct file *dst_file, loff_t destoff,
1064 size_t len, unsigned int flags)
1066 struct inode *src_inode = file_inode(src_file);
1067 struct inode *target_inode = file_inode(dst_file);
1068 struct cifsFileInfo *smb_file_src;
1069 struct cifsFileInfo *smb_file_target;
1070 struct cifs_tcon *src_tcon;
1071 struct cifs_tcon *target_tcon;
1074 cifs_dbg(FYI, "copychunk range\n");
1076 if (src_inode == target_inode) {
1081 if (!src_file->private_data || !dst_file->private_data) {
1083 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1088 smb_file_target = dst_file->private_data;
1089 smb_file_src = src_file->private_data;
1090 src_tcon = tlink_tcon(smb_file_src->tlink);
1091 target_tcon = tlink_tcon(smb_file_target->tlink);
1093 if (src_tcon->ses != target_tcon->ses) {
1094 cifs_dbg(VFS, "source and target of copy not on same server\n");
1099 * Note: cifs case is easier than btrfs since server responsible for
1100 * checks for proper open modes and file type and if it wants
1101 * server could even support copy of range where source = target
1103 lock_two_nondirectories(target_inode, src_inode);
1105 cifs_dbg(FYI, "about to flush pages\n");
1106 /* should we flush first and last page first */
1107 truncate_inode_pages(&target_inode->i_data, 0);
1109 if (target_tcon->ses->server->ops->copychunk_range)
1110 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1111 smb_file_src, smb_file_target, off, len, destoff);
1115 /* force revalidate of size and timestamps of target file now
1116 * that target is updated on the server
1118 CIFS_I(target_inode)->time = 0;
1119 /* although unlocking in the reverse order from locking is not
1120 * strictly necessary here it is a little cleaner to be consistent
1122 unlock_two_nondirectories(src_inode, target_inode);
1129 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1130 * is a dummy operation.
1132 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1134 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1140 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1141 struct file *dst_file, loff_t destoff,
1142 size_t len, unsigned int flags)
1144 unsigned int xid = get_xid();
1147 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1153 const struct file_operations cifs_file_ops = {
1154 .read_iter = cifs_loose_read_iter,
1155 .write_iter = cifs_file_write_iter,
1157 .release = cifs_close,
1159 .fsync = cifs_fsync,
1160 .flush = cifs_flush,
1161 .mmap = cifs_file_mmap,
1162 .splice_read = generic_file_splice_read,
1163 .splice_write = iter_file_splice_write,
1164 .llseek = cifs_llseek,
1165 .unlocked_ioctl = cifs_ioctl,
1166 .copy_file_range = cifs_copy_file_range,
1167 .remap_file_range = cifs_remap_file_range,
1168 .setlease = cifs_setlease,
1169 .fallocate = cifs_fallocate,
1172 const struct file_operations cifs_file_strict_ops = {
1173 .read_iter = cifs_strict_readv,
1174 .write_iter = cifs_strict_writev,
1176 .release = cifs_close,
1178 .fsync = cifs_strict_fsync,
1179 .flush = cifs_flush,
1180 .mmap = cifs_file_strict_mmap,
1181 .splice_read = generic_file_splice_read,
1182 .splice_write = iter_file_splice_write,
1183 .llseek = cifs_llseek,
1184 .unlocked_ioctl = cifs_ioctl,
1185 .copy_file_range = cifs_copy_file_range,
1186 .remap_file_range = cifs_remap_file_range,
1187 .setlease = cifs_setlease,
1188 .fallocate = cifs_fallocate,
1191 const struct file_operations cifs_file_direct_ops = {
1192 .read_iter = cifs_direct_readv,
1193 .write_iter = cifs_direct_writev,
1195 .release = cifs_close,
1197 .fsync = cifs_fsync,
1198 .flush = cifs_flush,
1199 .mmap = cifs_file_mmap,
1200 .splice_read = generic_file_splice_read,
1201 .splice_write = iter_file_splice_write,
1202 .unlocked_ioctl = cifs_ioctl,
1203 .copy_file_range = cifs_copy_file_range,
1204 .remap_file_range = cifs_remap_file_range,
1205 .llseek = cifs_llseek,
1206 .setlease = cifs_setlease,
1207 .fallocate = cifs_fallocate,
1210 const struct file_operations cifs_file_nobrl_ops = {
1211 .read_iter = cifs_loose_read_iter,
1212 .write_iter = cifs_file_write_iter,
1214 .release = cifs_close,
1215 .fsync = cifs_fsync,
1216 .flush = cifs_flush,
1217 .mmap = cifs_file_mmap,
1218 .splice_read = generic_file_splice_read,
1219 .splice_write = iter_file_splice_write,
1220 .llseek = cifs_llseek,
1221 .unlocked_ioctl = cifs_ioctl,
1222 .copy_file_range = cifs_copy_file_range,
1223 .remap_file_range = cifs_remap_file_range,
1224 .setlease = cifs_setlease,
1225 .fallocate = cifs_fallocate,
1228 const struct file_operations cifs_file_strict_nobrl_ops = {
1229 .read_iter = cifs_strict_readv,
1230 .write_iter = cifs_strict_writev,
1232 .release = cifs_close,
1233 .fsync = cifs_strict_fsync,
1234 .flush = cifs_flush,
1235 .mmap = cifs_file_strict_mmap,
1236 .splice_read = generic_file_splice_read,
1237 .splice_write = iter_file_splice_write,
1238 .llseek = cifs_llseek,
1239 .unlocked_ioctl = cifs_ioctl,
1240 .copy_file_range = cifs_copy_file_range,
1241 .remap_file_range = cifs_remap_file_range,
1242 .setlease = cifs_setlease,
1243 .fallocate = cifs_fallocate,
1246 const struct file_operations cifs_file_direct_nobrl_ops = {
1247 .read_iter = cifs_direct_readv,
1248 .write_iter = cifs_direct_writev,
1250 .release = cifs_close,
1251 .fsync = cifs_fsync,
1252 .flush = cifs_flush,
1253 .mmap = cifs_file_mmap,
1254 .splice_read = generic_file_splice_read,
1255 .splice_write = iter_file_splice_write,
1256 .unlocked_ioctl = cifs_ioctl,
1257 .copy_file_range = cifs_copy_file_range,
1258 .remap_file_range = cifs_remap_file_range,
1259 .llseek = cifs_llseek,
1260 .setlease = cifs_setlease,
1261 .fallocate = cifs_fallocate,
1264 const struct file_operations cifs_dir_ops = {
1265 .iterate_shared = cifs_readdir,
1266 .release = cifs_closedir,
1267 .read = generic_read_dir,
1268 .unlocked_ioctl = cifs_ioctl,
1269 .copy_file_range = cifs_copy_file_range,
1270 .remap_file_range = cifs_remap_file_range,
1271 .llseek = generic_file_llseek,
1272 .fsync = cifs_dir_fsync,
1276 cifs_init_once(void *inode)
1278 struct cifsInodeInfo *cifsi = inode;
1280 inode_init_once(&cifsi->vfs_inode);
1281 init_rwsem(&cifsi->lock_sem);
1285 cifs_init_inodecache(void)
1287 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1288 sizeof(struct cifsInodeInfo),
1289 0, (SLAB_RECLAIM_ACCOUNT|
1290 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1292 if (cifs_inode_cachep == NULL)
1299 cifs_destroy_inodecache(void)
1302 * Make sure all delayed rcu free inodes are flushed before we
1306 kmem_cache_destroy(cifs_inode_cachep);
1310 cifs_init_request_bufs(void)
1313 * SMB2 maximum header size is bigger than CIFS one - no problems to
1314 * allocate some more bytes for CIFS.
1316 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1318 if (CIFSMaxBufSize < 8192) {
1319 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1320 Unicode path name has to fit in any SMB/CIFS path based frames */
1321 CIFSMaxBufSize = 8192;
1322 } else if (CIFSMaxBufSize > 1024*127) {
1323 CIFSMaxBufSize = 1024 * 127;
1325 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1328 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1329 CIFSMaxBufSize, CIFSMaxBufSize);
1331 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1332 CIFSMaxBufSize + max_hdr_size, 0,
1333 SLAB_HWCACHE_ALIGN, 0,
1334 CIFSMaxBufSize + max_hdr_size,
1336 if (cifs_req_cachep == NULL)
1339 if (cifs_min_rcv < 1)
1341 else if (cifs_min_rcv > 64) {
1343 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1346 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1349 if (cifs_req_poolp == NULL) {
1350 kmem_cache_destroy(cifs_req_cachep);
1353 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1354 almost all handle based requests (but not write response, nor is it
1355 sufficient for path based requests). A smaller size would have
1356 been more efficient (compacting multiple slab items on one 4k page)
1357 for the case in which debug was on, but this larger size allows
1358 more SMBs to use small buffer alloc and is still much more
1359 efficient to alloc 1 per page off the slab compared to 17K (5page)
1360 alloc of large cifs buffers even when page debugging is on */
1361 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1362 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1363 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1364 if (cifs_sm_req_cachep == NULL) {
1365 mempool_destroy(cifs_req_poolp);
1366 kmem_cache_destroy(cifs_req_cachep);
1370 if (cifs_min_small < 2)
1372 else if (cifs_min_small > 256) {
1373 cifs_min_small = 256;
1374 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1377 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1378 cifs_sm_req_cachep);
1380 if (cifs_sm_req_poolp == NULL) {
1381 mempool_destroy(cifs_req_poolp);
1382 kmem_cache_destroy(cifs_req_cachep);
1383 kmem_cache_destroy(cifs_sm_req_cachep);
1391 cifs_destroy_request_bufs(void)
1393 mempool_destroy(cifs_req_poolp);
1394 kmem_cache_destroy(cifs_req_cachep);
1395 mempool_destroy(cifs_sm_req_poolp);
1396 kmem_cache_destroy(cifs_sm_req_cachep);
1400 cifs_init_mids(void)
1402 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1403 sizeof(struct mid_q_entry), 0,
1404 SLAB_HWCACHE_ALIGN, NULL);
1405 if (cifs_mid_cachep == NULL)
1408 /* 3 is a reasonable minimum number of simultaneous operations */
1409 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1410 if (cifs_mid_poolp == NULL) {
1411 kmem_cache_destroy(cifs_mid_cachep);
1419 cifs_destroy_mids(void)
1421 mempool_destroy(cifs_mid_poolp);
1422 kmem_cache_destroy(cifs_mid_cachep);
1430 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1431 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
1432 INIT_LIST_HEAD(&GlobalDnotifyReqList);
1433 INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
1434 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
1436 * Initialize Global counters
1438 atomic_set(&sesInfoAllocCount, 0);
1439 atomic_set(&tconInfoAllocCount, 0);
1440 atomic_set(&tcpSesAllocCount, 0);
1441 atomic_set(&tcpSesReconnectCount, 0);
1442 atomic_set(&tconInfoReconnectCount, 0);
1444 atomic_set(&bufAllocCount, 0);
1445 atomic_set(&smBufAllocCount, 0);
1446 #ifdef CONFIG_CIFS_STATS2
1447 atomic_set(&totBufAllocCount, 0);
1448 atomic_set(&totSmBufAllocCount, 0);
1449 if (slow_rsp_threshold < 1)
1450 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1451 else if (slow_rsp_threshold > 32767)
1453 "slow response threshold set higher than recommended (0 to 32767)\n");
1454 #endif /* CONFIG_CIFS_STATS2 */
1456 atomic_set(&midCount, 0);
1457 GlobalCurrentXid = 0;
1458 GlobalTotalActiveXid = 0;
1459 GlobalMaxActiveXid = 0;
1460 spin_lock_init(&cifs_tcp_ses_lock);
1461 spin_lock_init(&GlobalMid_Lock);
1463 cifs_lock_secret = get_random_u32();
1465 if (cifs_max_pending < 2) {
1466 cifs_max_pending = 2;
1467 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1468 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1469 cifs_max_pending = CIFS_MAX_REQ;
1470 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1474 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1477 goto out_clean_proc;
1480 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1481 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1482 if (!cifsoplockd_wq) {
1484 goto out_destroy_cifsiod_wq;
1487 rc = cifs_fscache_register();
1489 goto out_destroy_cifsoplockd_wq;
1491 rc = cifs_init_inodecache();
1493 goto out_unreg_fscache;
1495 rc = cifs_init_mids();
1497 goto out_destroy_inodecache;
1499 rc = cifs_init_request_bufs();
1501 goto out_destroy_mids;
1503 #ifdef CONFIG_CIFS_DFS_UPCALL
1504 rc = dfs_cache_init();
1506 goto out_destroy_request_bufs;
1507 #endif /* CONFIG_CIFS_DFS_UPCALL */
1508 #ifdef CONFIG_CIFS_UPCALL
1509 rc = init_cifs_spnego();
1511 goto out_destroy_dfs_cache;
1512 #endif /* CONFIG_CIFS_UPCALL */
1514 #ifdef CONFIG_CIFS_ACL
1515 rc = init_cifs_idmap();
1517 goto out_register_key_type;
1518 #endif /* CONFIG_CIFS_ACL */
1520 rc = register_filesystem(&cifs_fs_type);
1522 goto out_init_cifs_idmap;
1524 rc = register_filesystem(&smb3_fs_type);
1526 unregister_filesystem(&cifs_fs_type);
1527 goto out_init_cifs_idmap;
1532 out_init_cifs_idmap:
1533 #ifdef CONFIG_CIFS_ACL
1535 out_register_key_type:
1537 #ifdef CONFIG_CIFS_UPCALL
1539 out_destroy_dfs_cache:
1541 #ifdef CONFIG_CIFS_DFS_UPCALL
1542 dfs_cache_destroy();
1543 out_destroy_request_bufs:
1545 cifs_destroy_request_bufs();
1547 cifs_destroy_mids();
1548 out_destroy_inodecache:
1549 cifs_destroy_inodecache();
1551 cifs_fscache_unregister();
1552 out_destroy_cifsoplockd_wq:
1553 destroy_workqueue(cifsoplockd_wq);
1554 out_destroy_cifsiod_wq:
1555 destroy_workqueue(cifsiod_wq);
1564 cifs_dbg(NOISY, "exit_smb3\n");
1565 unregister_filesystem(&cifs_fs_type);
1566 unregister_filesystem(&smb3_fs_type);
1567 cifs_dfs_release_automount_timer();
1568 #ifdef CONFIG_CIFS_ACL
1571 #ifdef CONFIG_CIFS_UPCALL
1574 #ifdef CONFIG_CIFS_DFS_UPCALL
1575 dfs_cache_destroy();
1577 cifs_destroy_request_bufs();
1578 cifs_destroy_mids();
1579 cifs_destroy_inodecache();
1580 cifs_fscache_unregister();
1581 destroy_workqueue(cifsoplockd_wq);
1582 destroy_workqueue(cifsiod_wq);
1586 MODULE_AUTHOR("Steve French");
1587 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1589 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1590 "also older servers complying with the SNIA CIFS Specification)");
1591 MODULE_VERSION(CIFS_VERSION);
1592 MODULE_SOFTDEP("pre: arc4");
1593 MODULE_SOFTDEP("pre: des");
1594 MODULE_SOFTDEP("pre: ecb");
1595 MODULE_SOFTDEP("pre: hmac");
1596 MODULE_SOFTDEP("pre: md4");
1597 MODULE_SOFTDEP("pre: md5");
1598 MODULE_SOFTDEP("pre: nls");
1599 MODULE_SOFTDEP("pre: aes");
1600 MODULE_SOFTDEP("pre: cmac");
1601 MODULE_SOFTDEP("pre: sha256");
1602 MODULE_SOFTDEP("pre: sha512");
1603 MODULE_SOFTDEP("pre: aead2");
1604 MODULE_SOFTDEP("pre: ccm");
1605 module_init(init_cifs)
1606 module_exit(exit_cifs)