1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2009, 2013
9 * Contains the routines for constructing the SMB2 PDUs themselves
13 /* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
14 /* Note that there are handle based routines which must be */
15 /* treated slightly differently for reconnection purposes since we never */
16 /* want to reuse a stale file handle and only the caller knows the file info */
19 #include <linux/kernel.h>
20 #include <linux/vfs.h>
21 #include <linux/task_io_accounting_ops.h>
22 #include <linux/uaccess.h>
23 #include <linux/uuid.h>
24 #include <linux/pagemap.h>
25 #include <linux/xattr.h>
28 #include "cifsproto.h"
29 #include "smb2proto.h"
30 #include "cifs_unicode.h"
31 #include "cifs_debug.h"
33 #include "smb2status.h"
36 #include "cifs_spnego.h"
37 #include "smbdirect.h"
39 #ifdef CONFIG_CIFS_DFS_UPCALL
40 #include "dfs_cache.h"
44 * The following table defines the expected "StructureSize" of SMB2 requests
45 * in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
47 * Note that commands are defined in smb2pdu.h in le16 but the array below is
48 * indexed by command in host byte order.
50 static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
51 /* SMB2_NEGOTIATE */ 36,
52 /* SMB2_SESSION_SETUP */ 25,
54 /* SMB2_TREE_CONNECT */ 9,
55 /* SMB2_TREE_DISCONNECT */ 4,
65 /* SMB2_QUERY_DIRECTORY */ 33,
66 /* SMB2_CHANGE_NOTIFY */ 32,
67 /* SMB2_QUERY_INFO */ 41,
68 /* SMB2_SET_INFO */ 33,
69 /* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
72 int smb3_encryption_required(const struct cifs_tcon *tcon)
74 if (!tcon || !tcon->ses)
76 if ((tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
77 (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
80 (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
86 smb2_hdr_assemble(struct smb2_hdr *shdr, __le16 smb2_cmd,
87 const struct cifs_tcon *tcon,
88 struct TCP_Server_Info *server)
90 shdr->ProtocolId = SMB2_PROTO_NUMBER;
91 shdr->StructureSize = cpu_to_le16(64);
92 shdr->Command = smb2_cmd;
94 spin_lock(&server->req_lock);
95 /* Request up to 10 credits but don't go over the limit. */
96 if (server->credits >= server->max_credits)
97 shdr->CreditRequest = cpu_to_le16(0);
99 shdr->CreditRequest = cpu_to_le16(
100 min_t(int, server->max_credits -
101 server->credits, 10));
102 spin_unlock(&server->req_lock);
104 shdr->CreditRequest = cpu_to_le16(2);
106 shdr->Id.SyncId.ProcessId = cpu_to_le32((__u16)current->tgid);
111 /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
112 /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
113 if (server && (server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
114 shdr->CreditCharge = cpu_to_le16(1);
115 /* else CreditCharge MBZ */
117 shdr->Id.SyncId.TreeId = cpu_to_le32(tcon->tid);
118 /* Uid is not converted */
120 shdr->SessionId = cpu_to_le64(tcon->ses->Suid);
123 * If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
124 * to pass the path on the Open SMB prefixed by \\server\share.
125 * Not sure when we would need to do the augmented path (if ever) and
126 * setting this flag breaks the SMB2 open operation since it is
127 * illegal to send an empty path name (without \\server\share prefix)
128 * when the DFS flag is set in the SMB open header. We could
129 * consider setting the flag on all operations other than open
130 * but it is safer to net set it for now.
132 /* if (tcon->share_flags & SHI1005_FLAGS_DFS)
133 shdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
135 if (server && server->sign && !smb3_encryption_required(tcon))
136 shdr->Flags |= SMB2_FLAGS_SIGNED;
142 smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
143 struct TCP_Server_Info *server)
146 struct nls_table *nls_codepage;
147 struct cifs_ses *ses;
151 * SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
152 * check for tcp and smb session status done differently
153 * for those three - in the calling routine.
159 * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in
160 * cifs_tree_connect().
162 if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
165 if (tcon->tidStatus == CifsExiting) {
167 * only tree disconnect, open, and write,
168 * (and ulogoff which does not have tcon)
169 * are allowed as we start force umount.
171 if ((smb2_command != SMB2_WRITE) &&
172 (smb2_command != SMB2_CREATE) &&
173 (smb2_command != SMB2_TREE_DISCONNECT)) {
174 cifs_dbg(FYI, "can not send cmd %d while umounting\n",
179 if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
180 (!tcon->ses->server) || !server)
184 retries = server->nr_targets;
187 * Give demultiplex thread up to 10 seconds to each target available for
188 * reconnect -- should be greater than cifs socket timeout which is 7
191 while (server->tcpStatus == CifsNeedReconnect) {
193 * Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
194 * here since they are implicitly done when session drops.
196 switch (smb2_command) {
198 * BB Should we keep oplock break and add flush to exceptions?
200 case SMB2_TREE_DISCONNECT:
203 case SMB2_OPLOCK_BREAK:
207 rc = wait_event_interruptible_timeout(server->response_q,
208 (server->tcpStatus != CifsNeedReconnect),
211 cifs_dbg(FYI, "%s: aborting reconnect due to a received signal by the process\n",
216 /* are we still trying to reconnect? */
217 if (server->tcpStatus != CifsNeedReconnect)
220 if (retries && --retries)
224 * on "soft" mounts we wait once. Hard mounts keep
225 * retrying until process is killed or server comes
229 cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
232 retries = server->nr_targets;
235 if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
238 nls_codepage = load_nls_default();
241 * need to prevent multiple threads trying to simultaneously reconnect
242 * the same SMB session
244 mutex_lock(&tcon->ses->session_mutex);
247 * Recheck after acquire mutex. If another thread is negotiating
248 * and the server never sends an answer the socket will be closed
249 * and tcpStatus set to reconnect.
251 if (server->tcpStatus == CifsNeedReconnect) {
253 mutex_unlock(&tcon->ses->session_mutex);
258 * If we are reconnecting an extra channel, bind
260 if (CIFS_SERVER_IS_CHAN(server)) {
262 ses->binding_chan = cifs_ses_find_chan(ses, server);
265 rc = cifs_negotiate_protocol(0, tcon->ses);
266 if (!rc && tcon->ses->need_reconnect) {
267 rc = cifs_setup_session(0, tcon->ses, nls_codepage);
268 if ((rc == -EACCES) && !tcon->retry) {
270 ses->binding = false;
271 ses->binding_chan = NULL;
272 mutex_unlock(&tcon->ses->session_mutex);
277 * End of channel binding
279 ses->binding = false;
280 ses->binding_chan = NULL;
282 if (rc || !tcon->need_reconnect) {
283 mutex_unlock(&tcon->ses->session_mutex);
287 cifs_mark_open_files_invalid(tcon);
288 if (tcon->use_persistent)
289 tcon->need_reopen_files = true;
291 rc = cifs_tree_connect(0, tcon, nls_codepage);
292 mutex_unlock(&tcon->ses->session_mutex);
294 cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
296 /* If sess reconnected but tcon didn't, something strange ... */
297 pr_warn_once("reconnect tcon failed rc = %d\n", rc);
301 if (smb2_command != SMB2_INTERNAL_CMD)
302 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
304 atomic_inc(&tconInfoReconnectCount);
307 * Check if handle based operation so we know whether we can continue
308 * or not without returning to caller to reset file handle.
311 * BB Is flush done by server on drop of tcp session? Should we special
312 * case it and skip above?
314 switch (smb2_command) {
320 case SMB2_QUERY_DIRECTORY:
321 case SMB2_CHANGE_NOTIFY:
322 case SMB2_QUERY_INFO:
327 unload_nls(nls_codepage);
332 fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon,
333 struct TCP_Server_Info *server,
335 unsigned int *total_len)
337 struct smb2_pdu *spdu = (struct smb2_pdu *)buf;
338 /* lookup word count ie StructureSize from table */
339 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)];
342 * smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
343 * largest operations (Create)
347 smb2_hdr_assemble(&spdu->hdr, smb2_command, tcon, server);
348 spdu->StructureSize2 = cpu_to_le16(parmsize);
350 *total_len = parmsize + sizeof(struct smb2_hdr);
354 * Allocate and return pointer to an SMB request hdr, and set basic
355 * SMB information in the SMB header. If the return code is zero, this
356 * function must have filled in request_buf pointer.
358 static int __smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
359 struct TCP_Server_Info *server,
360 void **request_buf, unsigned int *total_len)
362 /* BB eventually switch this to SMB2 specific small buf size */
363 if (smb2_command == SMB2_SET_INFO)
364 *request_buf = cifs_buf_get();
366 *request_buf = cifs_small_buf_get();
367 if (*request_buf == NULL) {
368 /* BB should we add a retry in here if not a writepage? */
372 fill_small_buf(smb2_command, tcon, server,
373 (struct smb2_hdr *)(*request_buf),
377 uint16_t com_code = le16_to_cpu(smb2_command);
378 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
379 cifs_stats_inc(&tcon->num_smbs_sent);
385 static int smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
386 struct TCP_Server_Info *server,
387 void **request_buf, unsigned int *total_len)
391 rc = smb2_reconnect(smb2_command, tcon, server);
395 return __smb2_plain_req_init(smb2_command, tcon, server, request_buf,
399 static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
400 struct TCP_Server_Info *server,
401 void **request_buf, unsigned int *total_len)
403 /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
404 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
405 return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
406 request_buf, total_len);
408 return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
409 request_buf, total_len);
412 /* For explanation of negotiate contexts see MS-SMB2 section 2.2.3.1 */
415 build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
417 pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
418 pneg_ctxt->DataLength = cpu_to_le16(38);
419 pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
420 pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
421 get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
422 pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
426 build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt)
428 pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
429 pneg_ctxt->DataLength =
430 cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
431 - sizeof(struct smb2_neg_context));
432 pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(3);
433 pneg_ctxt->CompressionAlgorithms[0] = SMB3_COMPRESS_LZ77;
434 pneg_ctxt->CompressionAlgorithms[1] = SMB3_COMPRESS_LZ77_HUFF;
435 pneg_ctxt->CompressionAlgorithms[2] = SMB3_COMPRESS_LZNT1;
439 build_signing_ctxt(struct smb2_signing_capabilities *pneg_ctxt)
441 unsigned int ctxt_len = sizeof(struct smb2_signing_capabilities);
442 unsigned short num_algs = 1; /* number of signing algorithms sent */
444 pneg_ctxt->ContextType = SMB2_SIGNING_CAPABILITIES;
446 * Context Data length must be rounded to multiple of 8 for some servers
448 pneg_ctxt->DataLength = cpu_to_le16(DIV_ROUND_UP(
449 sizeof(struct smb2_signing_capabilities) -
450 sizeof(struct smb2_neg_context) +
451 (num_algs * 2 /* sizeof u16 */), 8) * 8);
452 pneg_ctxt->SigningAlgorithmCount = cpu_to_le16(num_algs);
453 pneg_ctxt->SigningAlgorithms[0] = cpu_to_le16(SIGNING_ALG_AES_CMAC);
455 ctxt_len += 2 /* sizeof le16 */ * num_algs;
456 ctxt_len = DIV_ROUND_UP(ctxt_len, 8) * 8;
458 /* TBD add SIGNING_ALG_AES_GMAC and/or SIGNING_ALG_HMAC_SHA256 */
462 build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt)
464 pneg_ctxt->ContextType = SMB2_ENCRYPTION_CAPABILITIES;
465 if (require_gcm_256) {
466 pneg_ctxt->DataLength = cpu_to_le16(4); /* Cipher Count + 1 cipher */
467 pneg_ctxt->CipherCount = cpu_to_le16(1);
468 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES256_GCM;
469 } else if (enable_gcm_256) {
470 pneg_ctxt->DataLength = cpu_to_le16(8); /* Cipher Count + 3 ciphers */
471 pneg_ctxt->CipherCount = cpu_to_le16(3);
472 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
473 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES256_GCM;
474 pneg_ctxt->Ciphers[2] = SMB2_ENCRYPTION_AES128_CCM;
476 pneg_ctxt->DataLength = cpu_to_le16(6); /* Cipher Count + 2 ciphers */
477 pneg_ctxt->CipherCount = cpu_to_le16(2);
478 pneg_ctxt->Ciphers[0] = SMB2_ENCRYPTION_AES128_GCM;
479 pneg_ctxt->Ciphers[1] = SMB2_ENCRYPTION_AES128_CCM;
484 build_netname_ctxt(struct smb2_netname_neg_context *pneg_ctxt, char *hostname)
486 struct nls_table *cp = load_nls_default();
488 pneg_ctxt->ContextType = SMB2_NETNAME_NEGOTIATE_CONTEXT_ID;
490 /* copy up to max of first 100 bytes of server name to NetName field */
491 pneg_ctxt->DataLength = cpu_to_le16(2 * cifs_strtoUTF16(pneg_ctxt->NetName, hostname, 100, cp));
492 /* context size is DataLength + minimal smb2_neg_context */
493 return DIV_ROUND_UP(le16_to_cpu(pneg_ctxt->DataLength) +
494 sizeof(struct smb2_neg_context), 8) * 8;
498 build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
500 pneg_ctxt->ContextType = SMB2_POSIX_EXTENSIONS_AVAILABLE;
501 pneg_ctxt->DataLength = cpu_to_le16(POSIX_CTXT_DATA_LEN);
502 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
503 pneg_ctxt->Name[0] = 0x93;
504 pneg_ctxt->Name[1] = 0xAD;
505 pneg_ctxt->Name[2] = 0x25;
506 pneg_ctxt->Name[3] = 0x50;
507 pneg_ctxt->Name[4] = 0x9C;
508 pneg_ctxt->Name[5] = 0xB4;
509 pneg_ctxt->Name[6] = 0x11;
510 pneg_ctxt->Name[7] = 0xE7;
511 pneg_ctxt->Name[8] = 0xB4;
512 pneg_ctxt->Name[9] = 0x23;
513 pneg_ctxt->Name[10] = 0x83;
514 pneg_ctxt->Name[11] = 0xDE;
515 pneg_ctxt->Name[12] = 0x96;
516 pneg_ctxt->Name[13] = 0x8B;
517 pneg_ctxt->Name[14] = 0xCD;
518 pneg_ctxt->Name[15] = 0x7C;
522 assemble_neg_contexts(struct smb2_negotiate_req *req,
523 struct TCP_Server_Info *server, unsigned int *total_len)
526 unsigned int ctxt_len, neg_context_count;
528 if (*total_len > 200) {
529 /* In case length corrupted don't want to overrun smb buffer */
530 cifs_server_dbg(VFS, "Bad frame length assembling neg contexts\n");
535 * round up total_len of fixed part of SMB3 negotiate request to 8
536 * byte boundary before adding negotiate contexts
538 *total_len = roundup(*total_len, 8);
540 pneg_ctxt = (*total_len) + (char *)req;
541 req->NegotiateContextOffset = cpu_to_le32(*total_len);
543 build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt);
544 ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_preauth_neg_context), 8) * 8;
545 *total_len += ctxt_len;
546 pneg_ctxt += ctxt_len;
548 build_encrypt_ctxt((struct smb2_encryption_neg_context *)pneg_ctxt);
549 ctxt_len = DIV_ROUND_UP(sizeof(struct smb2_encryption_neg_context), 8) * 8;
550 *total_len += ctxt_len;
551 pneg_ctxt += ctxt_len;
553 ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
555 *total_len += ctxt_len;
556 pneg_ctxt += ctxt_len;
558 build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
559 *total_len += sizeof(struct smb2_posix_neg_context);
560 pneg_ctxt += sizeof(struct smb2_posix_neg_context);
562 neg_context_count = 4;
564 if (server->compress_algorithm) {
565 build_compression_ctxt((struct smb2_compression_capabilities_context *)
567 ctxt_len = DIV_ROUND_UP(
568 sizeof(struct smb2_compression_capabilities_context),
570 *total_len += ctxt_len;
571 pneg_ctxt += ctxt_len;
575 if (enable_negotiate_signing) {
576 ctxt_len = build_signing_ctxt((struct smb2_signing_capabilities *)
578 *total_len += ctxt_len;
579 pneg_ctxt += ctxt_len;
583 /* check for and add transport_capabilities and signing capabilities */
584 req->NegotiateContextCount = cpu_to_le16(neg_context_count);
588 static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
590 unsigned int len = le16_to_cpu(ctxt->DataLength);
592 /* If invalid preauth context warn but use what we requested, SHA-512 */
593 if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
594 pr_warn_once("server sent bad preauth context\n");
596 } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
597 pr_warn_once("server sent invalid SaltLength\n");
600 if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
601 pr_warn_once("Invalid SMB3 hash algorithm count\n");
602 if (ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
603 pr_warn_once("unknown SMB3 hash algorithm\n");
606 static void decode_compress_ctx(struct TCP_Server_Info *server,
607 struct smb2_compression_capabilities_context *ctxt)
609 unsigned int len = le16_to_cpu(ctxt->DataLength);
611 /* sizeof compress context is a one element compression capbility struct */
613 pr_warn_once("server sent bad compression cntxt\n");
616 if (le16_to_cpu(ctxt->CompressionAlgorithmCount) != 1) {
617 pr_warn_once("Invalid SMB3 compress algorithm count\n");
620 if (le16_to_cpu(ctxt->CompressionAlgorithms[0]) > 3) {
621 pr_warn_once("unknown compression algorithm\n");
624 server->compress_algorithm = ctxt->CompressionAlgorithms[0];
627 static int decode_encrypt_ctx(struct TCP_Server_Info *server,
628 struct smb2_encryption_neg_context *ctxt)
630 unsigned int len = le16_to_cpu(ctxt->DataLength);
632 cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len);
633 if (len < MIN_ENCRYPT_CTXT_DATA_LEN) {
634 pr_warn_once("server sent bad crypto ctxt len\n");
638 if (le16_to_cpu(ctxt->CipherCount) != 1) {
639 pr_warn_once("Invalid SMB3.11 cipher count\n");
642 cifs_dbg(FYI, "SMB311 cipher type:%d\n", le16_to_cpu(ctxt->Ciphers[0]));
643 if (require_gcm_256) {
644 if (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM) {
645 cifs_dbg(VFS, "Server does not support requested encryption type (AES256 GCM)\n");
648 } else if (ctxt->Ciphers[0] == 0) {
650 * e.g. if server only supported AES256_CCM (very unlikely)
651 * or server supported no encryption types or had all disabled.
652 * Since GLOBAL_CAP_ENCRYPTION will be not set, in the case
653 * in which mount requested encryption ("seal") checks later
654 * on during tree connection will return proper rc, but if
655 * seal not requested by client, since server is allowed to
656 * return 0 to indicate no supported cipher, we can't fail here
658 server->cipher_type = 0;
659 server->capabilities &= ~SMB2_GLOBAL_CAP_ENCRYPTION;
660 pr_warn_once("Server does not support requested encryption types\n");
662 } else if ((ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_CCM) &&
663 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES128_GCM) &&
664 (ctxt->Ciphers[0] != SMB2_ENCRYPTION_AES256_GCM)) {
665 /* server returned a cipher we didn't ask for */
666 pr_warn_once("Invalid SMB3.11 cipher returned\n");
669 server->cipher_type = ctxt->Ciphers[0];
670 server->capabilities |= SMB2_GLOBAL_CAP_ENCRYPTION;
674 static void decode_signing_ctx(struct TCP_Server_Info *server,
675 struct smb2_signing_capabilities *pctxt)
677 unsigned int len = le16_to_cpu(pctxt->DataLength);
679 if ((len < 4) || (len > 16)) {
680 pr_warn_once("server sent bad signing negcontext\n");
683 if (le16_to_cpu(pctxt->SigningAlgorithmCount) != 1) {
684 pr_warn_once("Invalid signing algorithm count\n");
687 if (le16_to_cpu(pctxt->SigningAlgorithms[0]) > 2) {
688 pr_warn_once("unknown signing algorithm\n");
692 server->signing_negotiated = true;
693 server->signing_algorithm = le16_to_cpu(pctxt->SigningAlgorithms[0]);
694 cifs_dbg(FYI, "signing algorithm %d chosen\n",
695 server->signing_algorithm);
699 static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp,
700 struct TCP_Server_Info *server,
701 unsigned int len_of_smb)
703 struct smb2_neg_context *pctx;
704 unsigned int offset = le32_to_cpu(rsp->NegotiateContextOffset);
705 unsigned int ctxt_cnt = le16_to_cpu(rsp->NegotiateContextCount);
706 unsigned int len_of_ctxts, i;
709 cifs_dbg(FYI, "decoding %d negotiate contexts\n", ctxt_cnt);
710 if (len_of_smb <= offset) {
711 cifs_server_dbg(VFS, "Invalid response: negotiate context offset\n");
715 len_of_ctxts = len_of_smb - offset;
717 for (i = 0; i < ctxt_cnt; i++) {
719 /* check that offset is not beyond end of SMB */
720 if (len_of_ctxts == 0)
723 if (len_of_ctxts < sizeof(struct smb2_neg_context))
726 pctx = (struct smb2_neg_context *)(offset + (char *)rsp);
727 clen = le16_to_cpu(pctx->DataLength);
728 if (clen > len_of_ctxts)
731 if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES)
732 decode_preauth_context(
733 (struct smb2_preauth_neg_context *)pctx);
734 else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES)
735 rc = decode_encrypt_ctx(server,
736 (struct smb2_encryption_neg_context *)pctx);
737 else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES)
738 decode_compress_ctx(server,
739 (struct smb2_compression_capabilities_context *)pctx);
740 else if (pctx->ContextType == SMB2_POSIX_EXTENSIONS_AVAILABLE)
741 server->posix_ext_supported = true;
742 else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES)
743 decode_signing_ctx(server,
744 (struct smb2_signing_capabilities *)pctx);
746 cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n",
747 le16_to_cpu(pctx->ContextType));
751 /* offsets must be 8 byte aligned */
752 clen = (clen + 7) & ~0x7;
753 offset += clen + sizeof(struct smb2_neg_context);
754 len_of_ctxts -= clen;
759 static struct create_posix *
760 create_posix_buf(umode_t mode)
762 struct create_posix *buf;
764 buf = kzalloc(sizeof(struct create_posix),
769 buf->ccontext.DataOffset =
770 cpu_to_le16(offsetof(struct create_posix, Mode));
771 buf->ccontext.DataLength = cpu_to_le32(4);
772 buf->ccontext.NameOffset =
773 cpu_to_le16(offsetof(struct create_posix, Name));
774 buf->ccontext.NameLength = cpu_to_le16(16);
776 /* SMB2_CREATE_TAG_POSIX is "0x93AD25509CB411E7B42383DE968BCD7C" */
787 buf->Name[10] = 0x83;
788 buf->Name[11] = 0xDE;
789 buf->Name[12] = 0x96;
790 buf->Name[13] = 0x8B;
791 buf->Name[14] = 0xCD;
792 buf->Name[15] = 0x7C;
793 buf->Mode = cpu_to_le32(mode);
794 cifs_dbg(FYI, "mode on posix create 0%o\n", mode);
799 add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
801 struct smb2_create_req *req = iov[0].iov_base;
802 unsigned int num = *num_iovec;
804 iov[num].iov_base = create_posix_buf(mode);
805 if (mode == ACL_NO_MODE)
806 cifs_dbg(FYI, "Invalid mode\n");
807 if (iov[num].iov_base == NULL)
809 iov[num].iov_len = sizeof(struct create_posix);
810 if (!req->CreateContextsOffset)
811 req->CreateContextsOffset = cpu_to_le32(
812 sizeof(struct smb2_create_req) +
813 iov[num - 1].iov_len);
814 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_posix));
815 *num_iovec = num + 1;
822 * SMB2 Worker functions follow:
824 * The general structure of the worker functions is:
825 * 1) Call smb2_init (assembles SMB2 header)
826 * 2) Initialize SMB2 command specific fields in fixed length area of SMB
827 * 3) Call smb_sendrcv2 (sends request on socket and waits for response)
828 * 4) Decode SMB2 command specific fields in the fixed length area
829 * 5) Decode variable length data area (if any for this SMB2 command type)
830 * 6) Call free smb buffer
836 SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
838 struct smb_rqst rqst;
839 struct smb2_negotiate_req *req;
840 struct smb2_negotiate_rsp *rsp;
845 struct TCP_Server_Info *server = cifs_ses_server(ses);
846 int blob_offset, blob_length;
848 int flags = CIFS_NEG_OP;
849 unsigned int total_len;
851 cifs_dbg(FYI, "Negotiate protocol\n");
854 WARN(1, "%s: server is NULL!\n", __func__);
858 rc = smb2_plain_req_init(SMB2_NEGOTIATE, NULL, server,
859 (void **) &req, &total_len);
863 req->hdr.SessionId = 0;
865 memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
866 memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
868 if (strcmp(server->vals->version_string,
869 SMB3ANY_VERSION_STRING) == 0) {
870 req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
871 req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
872 req->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
873 req->DialectCount = cpu_to_le16(3);
875 } else if (strcmp(server->vals->version_string,
876 SMBDEFAULT_VERSION_STRING) == 0) {
877 req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
878 req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
879 req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
880 req->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
881 req->DialectCount = cpu_to_le16(4);
884 /* otherwise send specific dialect */
885 req->Dialects[0] = cpu_to_le16(server->vals->protocol_id);
886 req->DialectCount = cpu_to_le16(1);
890 /* only one of SMB2 signing flags may be set in SMB2 request */
892 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
893 else if (global_secflags & CIFSSEC_MAY_SIGN)
894 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
896 req->SecurityMode = 0;
898 req->Capabilities = cpu_to_le32(server->vals->req_capabilities);
899 if (ses->chan_max > 1)
900 req->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
902 /* ClientGUID must be zero for SMB2.02 dialect */
903 if (server->vals->protocol_id == SMB20_PROT_ID)
904 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
906 memcpy(req->ClientGUID, server->client_guid,
907 SMB2_CLIENT_GUID_SIZE);
908 if ((server->vals->protocol_id == SMB311_PROT_ID) ||
909 (strcmp(server->vals->version_string,
910 SMB3ANY_VERSION_STRING) == 0) ||
911 (strcmp(server->vals->version_string,
912 SMBDEFAULT_VERSION_STRING) == 0))
913 assemble_neg_contexts(req, server, &total_len);
915 iov[0].iov_base = (char *)req;
916 iov[0].iov_len = total_len;
918 memset(&rqst, 0, sizeof(struct smb_rqst));
922 rc = cifs_send_recv(xid, ses, server,
923 &rqst, &resp_buftype, flags, &rsp_iov);
924 cifs_small_buf_release(req);
925 rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
927 * No tcon so can't do
928 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
930 if (rc == -EOPNOTSUPP) {
931 cifs_server_dbg(VFS, "Dialect not supported by server. Consider specifying vers=1.0 or vers=2.0 on mount for accessing older servers\n");
936 if (strcmp(server->vals->version_string,
937 SMB3ANY_VERSION_STRING) == 0) {
938 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
940 "SMB2 dialect returned but not requested\n");
942 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
944 "SMB2.1 dialect returned but not requested\n");
946 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
947 /* ops set to 3.0 by default for default so update */
948 server->ops = &smb311_operations;
949 server->vals = &smb311_values;
951 } else if (strcmp(server->vals->version_string,
952 SMBDEFAULT_VERSION_STRING) == 0) {
953 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
955 "SMB2 dialect returned but not requested\n");
957 } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
958 /* ops set to 3.0 by default for default so update */
959 server->ops = &smb21_operations;
960 server->vals = &smb21_values;
961 } else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
962 server->ops = &smb311_operations;
963 server->vals = &smb311_values;
965 } else if (le16_to_cpu(rsp->DialectRevision) !=
966 server->vals->protocol_id) {
967 /* if requested single dialect ensure returned dialect matched */
968 cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
969 le16_to_cpu(rsp->DialectRevision));
973 cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
975 if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
976 cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
977 else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
978 cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
979 else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
980 cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
981 else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
982 cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
983 else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID))
984 cifs_dbg(FYI, "negotiated smb3.1.1 dialect\n");
986 cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
987 le16_to_cpu(rsp->DialectRevision));
991 server->dialect = le16_to_cpu(rsp->DialectRevision);
994 * Keep a copy of the hash after negprot. This hash will be
995 * the starting hash value for all sessions made from this
998 memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
999 SMB2_PREAUTH_HASH_SIZE);
1001 /* SMB2 only has an extended negflavor */
1002 server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
1003 /* set it to the maximum buffer size value we can send with 1 credit */
1004 server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
1005 SMB2_MAX_BUFFER_SIZE);
1006 server->max_read = le32_to_cpu(rsp->MaxReadSize);
1007 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
1008 server->sec_mode = le16_to_cpu(rsp->SecurityMode);
1009 if ((server->sec_mode & SMB2_SEC_MODE_FLAGS_ALL) != server->sec_mode)
1010 cifs_dbg(FYI, "Server returned unexpected security mode 0x%x\n",
1012 server->capabilities = le32_to_cpu(rsp->Capabilities);
1013 /* Internal types */
1014 server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
1017 * SMB3.0 supports only 1 cipher and doesn't have a encryption neg context
1018 * Set the cipher type manually.
1020 if (server->dialect == SMB30_PROT_ID && (server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
1021 server->cipher_type = SMB2_ENCRYPTION_AES128_CCM;
1023 security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
1024 (struct smb2_hdr *)rsp);
1026 * See MS-SMB2 section 2.2.4: if no blob, client picks default which
1028 * ses->sectype = RawNTLMSSP;
1029 * but for time being this is our only auth choice so doesn't matter.
1030 * We just found a server which sets blob length to zero expecting raw.
1032 if (blob_length == 0) {
1033 cifs_dbg(FYI, "missing security blob on negprot\n");
1034 server->sec_ntlmssp = true;
1037 rc = cifs_enable_signing(server, ses->sign);
1041 rc = decode_negTokenInit(security_blob, blob_length, server);
1048 if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
1049 if (rsp->NegotiateContextCount)
1050 rc = smb311_decode_neg_context(rsp, server,
1053 cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
1056 free_rsp_buf(resp_buftype, rsp);
1060 int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1063 struct validate_negotiate_info_req *pneg_inbuf;
1064 struct validate_negotiate_info_rsp *pneg_rsp = NULL;
1066 u32 inbuflen; /* max of 4 dialects */
1067 struct TCP_Server_Info *server = tcon->ses->server;
1069 cifs_dbg(FYI, "validate negotiate\n");
1071 /* In SMB3.11 preauth integrity supersedes validate negotiate */
1072 if (server->dialect == SMB311_PROT_ID)
1076 * validation ioctl must be signed, so no point sending this if we
1077 * can not sign it (ie are not known user). Even if signing is not
1078 * required (enabled but not negotiated), in those cases we selectively
1079 * sign just this, the first and only signed request on a connection.
1080 * Having validation of negotiate info helps reduce attack vectors.
1082 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
1083 return 0; /* validation requires signing */
1085 if (tcon->ses->user_name == NULL) {
1086 cifs_dbg(FYI, "Can't validate negotiate: null user mount\n");
1087 return 0; /* validation requires signing */
1090 if (tcon->ses->session_flags & SMB2_SESSION_FLAG_IS_NULL)
1091 cifs_tcon_dbg(VFS, "Unexpected null user (anonymous) auth flag sent by server\n");
1093 pneg_inbuf = kmalloc(sizeof(*pneg_inbuf), GFP_NOFS);
1097 pneg_inbuf->Capabilities =
1098 cpu_to_le32(server->vals->req_capabilities);
1099 if (tcon->ses->chan_max > 1)
1100 pneg_inbuf->Capabilities |= cpu_to_le32(SMB2_GLOBAL_CAP_MULTI_CHANNEL);
1102 memcpy(pneg_inbuf->Guid, server->client_guid,
1103 SMB2_CLIENT_GUID_SIZE);
1105 if (tcon->ses->sign)
1106 pneg_inbuf->SecurityMode =
1107 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
1108 else if (global_secflags & CIFSSEC_MAY_SIGN)
1109 pneg_inbuf->SecurityMode =
1110 cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
1112 pneg_inbuf->SecurityMode = 0;
1115 if (strcmp(server->vals->version_string,
1116 SMB3ANY_VERSION_STRING) == 0) {
1117 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
1118 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
1119 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB311_PROT_ID);
1120 pneg_inbuf->DialectCount = cpu_to_le16(3);
1121 /* SMB 2.1 not included so subtract one dialect from len */
1122 inbuflen = sizeof(*pneg_inbuf) -
1123 (sizeof(pneg_inbuf->Dialects[0]));
1124 } else if (strcmp(server->vals->version_string,
1125 SMBDEFAULT_VERSION_STRING) == 0) {
1126 pneg_inbuf->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
1127 pneg_inbuf->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
1128 pneg_inbuf->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
1129 pneg_inbuf->Dialects[3] = cpu_to_le16(SMB311_PROT_ID);
1130 pneg_inbuf->DialectCount = cpu_to_le16(4);
1131 /* structure is big enough for 4 dialects */
1132 inbuflen = sizeof(*pneg_inbuf);
1134 /* otherwise specific dialect was requested */
1135 pneg_inbuf->Dialects[0] =
1136 cpu_to_le16(server->vals->protocol_id);
1137 pneg_inbuf->DialectCount = cpu_to_le16(1);
1138 /* structure is big enough for 3 dialects, sending only 1 */
1139 inbuflen = sizeof(*pneg_inbuf) -
1140 sizeof(pneg_inbuf->Dialects[0]) * 2;
1143 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1144 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
1145 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1146 (char **)&pneg_rsp, &rsplen);
1147 if (rc == -EOPNOTSUPP) {
1149 * Old Windows versions or Netapp SMB server can return
1150 * not supported error. Client should accept it.
1152 cifs_tcon_dbg(VFS, "Server does not support validate negotiate\n");
1154 goto out_free_inbuf;
1155 } else if (rc != 0) {
1156 cifs_tcon_dbg(VFS, "validate protocol negotiate failed: %d\n",
1159 goto out_free_inbuf;
1163 if (rsplen != sizeof(*pneg_rsp)) {
1164 cifs_tcon_dbg(VFS, "Invalid protocol negotiate response size: %d\n",
1167 /* relax check since Mac returns max bufsize allowed on ioctl */
1168 if (rsplen > CIFSMaxBufSize || rsplen < sizeof(*pneg_rsp))
1172 /* check validate negotiate info response matches what we got earlier */
1173 if (pneg_rsp->Dialect != cpu_to_le16(server->dialect))
1176 if (pneg_rsp->SecurityMode != cpu_to_le16(server->sec_mode))
1179 /* do not validate server guid because not saved at negprot time yet */
1181 if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
1182 SMB2_LARGE_FILES) != server->capabilities)
1185 /* validate negotiate successful */
1187 cifs_dbg(FYI, "validate negotiate info successful\n");
1191 cifs_tcon_dbg(VFS, "protocol revalidation - security settings mismatch\n");
1200 smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
1202 switch (requested) {
1209 if (server->sec_ntlmssp &&
1210 (global_secflags & CIFSSEC_MAY_NTLMSSP))
1212 if ((server->sec_kerberos || server->sec_mskerberos) &&
1213 (global_secflags & CIFSSEC_MAY_KRB5))
1221 struct SMB2_sess_data {
1223 struct cifs_ses *ses;
1224 struct nls_table *nls_cp;
1225 void (*func)(struct SMB2_sess_data *);
1227 u64 previous_session;
1229 /* we will send the SMB in three pieces:
1230 * a fixed length beginning part, an optional
1231 * SPNEGO blob (which can be zero length), and a
1232 * last part which will include the strings
1233 * and rest of bcc area. This allows us to avoid
1234 * a large buffer 17K allocation
1241 SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
1244 struct cifs_ses *ses = sess_data->ses;
1245 struct smb2_sess_setup_req *req;
1246 struct TCP_Server_Info *server = cifs_ses_server(ses);
1247 unsigned int total_len;
1249 rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
1255 if (sess_data->ses->binding) {
1256 req->hdr.SessionId = cpu_to_le64(sess_data->ses->Suid);
1257 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
1258 req->PreviousSessionId = 0;
1259 req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
1261 /* First session, not a reauthenticate */
1262 req->hdr.SessionId = 0;
1264 * if reconnect, we need to send previous sess id
1267 req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
1268 req->Flags = 0; /* MBZ */
1271 /* enough to enable echos and oplocks and one max size write */
1272 req->hdr.CreditRequest = cpu_to_le16(130);
1274 /* only one of SMB2 signing flags may be set in SMB2 request */
1276 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
1277 else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
1278 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
1280 req->SecurityMode = 0;
1282 #ifdef CONFIG_CIFS_DFS_UPCALL
1283 req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
1285 req->Capabilities = 0;
1286 #endif /* DFS_UPCALL */
1288 req->Channel = 0; /* MBZ */
1290 sess_data->iov[0].iov_base = (char *)req;
1292 sess_data->iov[0].iov_len = total_len - 1;
1294 * This variable will be used to clear the buffer
1295 * allocated above in case of any error in the calling function.
1297 sess_data->buf0_type = CIFS_SMALL_BUFFER;
1303 SMB2_sess_free_buffer(struct SMB2_sess_data *sess_data)
1305 free_rsp_buf(sess_data->buf0_type, sess_data->iov[0].iov_base);
1306 sess_data->buf0_type = CIFS_NO_BUFFER;
1310 SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
1313 struct smb_rqst rqst;
1314 struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
1315 struct kvec rsp_iov = { NULL, 0 };
1317 /* Testing shows that buffer offset must be at location of Buffer[0] */
1318 req->SecurityBufferOffset =
1319 cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
1320 req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
1322 memset(&rqst, 0, sizeof(struct smb_rqst));
1323 rqst.rq_iov = sess_data->iov;
1326 /* BB add code to build os and lm fields */
1327 rc = cifs_send_recv(sess_data->xid, sess_data->ses,
1328 cifs_ses_server(sess_data->ses),
1330 &sess_data->buf0_type,
1331 CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
1332 cifs_small_buf_release(sess_data->iov[0].iov_base);
1333 memcpy(&sess_data->iov[0], &rsp_iov, sizeof(struct kvec));
1339 SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
1342 struct cifs_ses *ses = sess_data->ses;
1343 struct TCP_Server_Info *server = cifs_ses_server(ses);
1345 mutex_lock(&server->srv_mutex);
1346 if (server->ops->generate_signingkey) {
1347 rc = server->ops->generate_signingkey(ses);
1350 "SMB3 session key generation failed\n");
1351 mutex_unlock(&server->srv_mutex);
1355 if (!server->session_estab) {
1356 server->sequence_number = 0x2;
1357 server->session_estab = true;
1359 mutex_unlock(&server->srv_mutex);
1361 cifs_dbg(FYI, "SMB2/3 session established successfully\n");
1362 /* keep existing ses state if binding */
1363 if (!ses->binding) {
1364 spin_lock(&GlobalMid_Lock);
1365 ses->status = CifsGood;
1366 ses->need_reconnect = false;
1367 spin_unlock(&GlobalMid_Lock);
1373 #ifdef CONFIG_CIFS_UPCALL
1375 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1378 struct cifs_ses *ses = sess_data->ses;
1379 struct cifs_spnego_msg *msg;
1380 struct key *spnego_key = NULL;
1381 struct smb2_sess_setup_rsp *rsp = NULL;
1383 rc = SMB2_sess_alloc_buffer(sess_data);
1387 spnego_key = cifs_get_spnego_key(ses);
1388 if (IS_ERR(spnego_key)) {
1389 rc = PTR_ERR(spnego_key);
1391 cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
1396 msg = spnego_key->payload.data[0];
1398 * check version field to make sure that cifs.upcall is
1399 * sending us a response in an expected form
1401 if (msg->version != CIFS_SPNEGO_UPCALL_VERSION) {
1402 cifs_dbg(VFS, "bad cifs.upcall version. Expected %d got %d\n",
1403 CIFS_SPNEGO_UPCALL_VERSION, msg->version);
1405 goto out_put_spnego_key;
1408 /* keep session key if binding */
1409 if (!ses->binding) {
1410 ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
1412 if (!ses->auth_key.response) {
1413 cifs_dbg(VFS, "Kerberos can't allocate (%u bytes) memory\n",
1416 goto out_put_spnego_key;
1418 ses->auth_key.len = msg->sesskey_len;
1421 sess_data->iov[1].iov_base = msg->data + msg->sesskey_len;
1422 sess_data->iov[1].iov_len = msg->secblob_len;
1424 rc = SMB2_sess_sendreceive(sess_data);
1426 goto out_put_spnego_key;
1428 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1429 /* keep session id and flags if binding */
1430 if (!ses->binding) {
1431 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1432 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1435 rc = SMB2_sess_establish_session(sess_data);
1437 key_invalidate(spnego_key);
1438 key_put(spnego_key);
1440 sess_data->result = rc;
1441 sess_data->func = NULL;
1442 SMB2_sess_free_buffer(sess_data);
1446 SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
1448 cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n");
1449 sess_data->result = -EOPNOTSUPP;
1450 sess_data->func = NULL;
1455 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data);
1458 SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
1461 struct cifs_ses *ses = sess_data->ses;
1462 struct smb2_sess_setup_rsp *rsp = NULL;
1463 unsigned char *ntlmssp_blob = NULL;
1464 bool use_spnego = false; /* else use raw ntlmssp */
1465 u16 blob_length = 0;
1468 * If memory allocation is successful, caller of this function
1471 ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
1472 if (!ses->ntlmssp) {
1476 ses->ntlmssp->sesskey_per_smbsess = true;
1478 rc = SMB2_sess_alloc_buffer(sess_data);
1482 rc = build_ntlmssp_negotiate_blob(&ntlmssp_blob,
1489 /* BB eventually need to add this */
1490 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1494 sess_data->iov[1].iov_base = ntlmssp_blob;
1495 sess_data->iov[1].iov_len = blob_length;
1497 rc = SMB2_sess_sendreceive(sess_data);
1498 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1500 /* If true, rc here is expected and not an error */
1501 if (sess_data->buf0_type != CIFS_NO_BUFFER &&
1502 rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED)
1508 if (offsetof(struct smb2_sess_setup_rsp, Buffer) !=
1509 le16_to_cpu(rsp->SecurityBufferOffset)) {
1510 cifs_dbg(VFS, "Invalid security buffer offset %d\n",
1511 le16_to_cpu(rsp->SecurityBufferOffset));
1515 rc = decode_ntlmssp_challenge(rsp->Buffer,
1516 le16_to_cpu(rsp->SecurityBufferLength), ses);
1520 cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
1522 /* keep existing ses id and flags if binding */
1523 if (!ses->binding) {
1524 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1525 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1529 kfree(ntlmssp_blob);
1530 SMB2_sess_free_buffer(sess_data);
1532 sess_data->result = 0;
1533 sess_data->func = SMB2_sess_auth_rawntlmssp_authenticate;
1537 kfree(ses->ntlmssp);
1538 ses->ntlmssp = NULL;
1539 sess_data->result = rc;
1540 sess_data->func = NULL;
1544 SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
1547 struct cifs_ses *ses = sess_data->ses;
1548 struct smb2_sess_setup_req *req;
1549 struct smb2_sess_setup_rsp *rsp = NULL;
1550 unsigned char *ntlmssp_blob = NULL;
1551 bool use_spnego = false; /* else use raw ntlmssp */
1552 u16 blob_length = 0;
1554 rc = SMB2_sess_alloc_buffer(sess_data);
1558 req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
1559 req->hdr.SessionId = cpu_to_le64(ses->Suid);
1561 rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
1564 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
1569 /* BB eventually need to add this */
1570 cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
1574 sess_data->iov[1].iov_base = ntlmssp_blob;
1575 sess_data->iov[1].iov_len = blob_length;
1577 rc = SMB2_sess_sendreceive(sess_data);
1581 rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
1583 /* keep existing ses id and flags if binding */
1584 if (!ses->binding) {
1585 ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
1586 ses->session_flags = le16_to_cpu(rsp->SessionFlags);
1589 rc = SMB2_sess_establish_session(sess_data);
1590 #ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
1591 if (ses->server->dialect < SMB30_PROT_ID) {
1592 cifs_dbg(VFS, "%s: dumping generated SMB2 session keys\n", __func__);
1594 * The session id is opaque in terms of endianness, so we can't
1595 * print it as a long long. we dump it as we got it on the wire
1597 cifs_dbg(VFS, "Session Id %*ph\n", (int)sizeof(ses->Suid),
1599 cifs_dbg(VFS, "Session Key %*ph\n",
1600 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
1601 cifs_dbg(VFS, "Signing Key %*ph\n",
1602 SMB3_SIGN_KEY_SIZE, ses->auth_key.response);
1606 kfree(ntlmssp_blob);
1607 SMB2_sess_free_buffer(sess_data);
1608 kfree(ses->ntlmssp);
1609 ses->ntlmssp = NULL;
1610 sess_data->result = rc;
1611 sess_data->func = NULL;
1615 SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
1619 type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype);
1620 cifs_dbg(FYI, "sess setup type %d\n", type);
1621 if (type == Unspecified) {
1622 cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
1628 sess_data->func = SMB2_auth_kerberos;
1631 sess_data->func = SMB2_sess_auth_rawntlmssp_negotiate;
1634 cifs_dbg(VFS, "secType %d not supported!\n", type);
1642 SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
1643 const struct nls_table *nls_cp)
1646 struct TCP_Server_Info *server = cifs_ses_server(ses);
1647 struct SMB2_sess_data *sess_data;
1649 cifs_dbg(FYI, "Session Setup\n");
1652 WARN(1, "%s: server is NULL!\n", __func__);
1656 sess_data = kzalloc(sizeof(struct SMB2_sess_data), GFP_KERNEL);
1660 rc = SMB2_select_sec(ses, sess_data);
1663 sess_data->xid = xid;
1664 sess_data->ses = ses;
1665 sess_data->buf0_type = CIFS_NO_BUFFER;
1666 sess_data->nls_cp = (struct nls_table *) nls_cp;
1667 sess_data->previous_session = ses->Suid;
1670 * Initialize the session hash with the server one.
1672 memcpy(ses->preauth_sha_hash, server->preauth_sha_hash,
1673 SMB2_PREAUTH_HASH_SIZE);
1675 while (sess_data->func)
1676 sess_data->func(sess_data);
1678 if ((ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST) && (ses->sign))
1679 cifs_server_dbg(VFS, "signing requested but authenticated as guest\n");
1680 rc = sess_data->result;
1687 SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
1689 struct smb_rqst rqst;
1690 struct smb2_logoff_req *req; /* response is also trivial struct */
1692 struct TCP_Server_Info *server;
1694 unsigned int total_len;
1696 struct kvec rsp_iov;
1699 cifs_dbg(FYI, "disconnect session %p\n", ses);
1701 if (ses && (ses->server))
1702 server = ses->server;
1706 /* no need to send SMB logoff if uid already closed due to reconnect */
1707 if (ses->need_reconnect)
1708 goto smb2_session_already_dead;
1710 rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
1711 (void **) &req, &total_len);
1715 /* since no tcon, smb2_init can not do this, so do here */
1716 req->hdr.SessionId = cpu_to_le64(ses->Suid);
1718 if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
1719 flags |= CIFS_TRANSFORM_REQ;
1720 else if (server->sign)
1721 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
1723 flags |= CIFS_NO_RSP_BUF;
1725 iov[0].iov_base = (char *)req;
1726 iov[0].iov_len = total_len;
1728 memset(&rqst, 0, sizeof(struct smb_rqst));
1732 rc = cifs_send_recv(xid, ses, ses->server,
1733 &rqst, &resp_buf_type, flags, &rsp_iov);
1734 cifs_small_buf_release(req);
1736 * No tcon so can't do
1737 * cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
1740 smb2_session_already_dead:
1744 static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
1746 cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
1749 #define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
1751 /* These are similar values to what Windows uses */
1752 static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
1754 tcon->max_chunks = 256;
1755 tcon->max_bytes_chunk = 1048576;
1756 tcon->max_bytes_copy = 16777216;
1760 SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
1761 struct cifs_tcon *tcon, const struct nls_table *cp)
1763 struct smb_rqst rqst;
1764 struct smb2_tree_connect_req *req;
1765 struct smb2_tree_connect_rsp *rsp = NULL;
1767 struct kvec rsp_iov = { NULL, 0 };
1771 __le16 *unc_path = NULL;
1773 unsigned int total_len;
1774 struct TCP_Server_Info *server;
1776 /* always use master channel */
1777 server = ses->server;
1779 cifs_dbg(FYI, "TCON\n");
1781 if (!server || !tree)
1784 unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
1785 if (unc_path == NULL)
1788 unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
1790 if (unc_path_len < 2) {
1795 /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
1797 atomic_set(&tcon->num_remote_opens, 0);
1798 rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, server,
1799 (void **) &req, &total_len);
1805 if (smb3_encryption_required(tcon))
1806 flags |= CIFS_TRANSFORM_REQ;
1808 iov[0].iov_base = (char *)req;
1810 iov[0].iov_len = total_len - 1;
1812 /* Testing shows that buffer offset must be at location of Buffer[0] */
1813 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
1815 req->PathLength = cpu_to_le16(unc_path_len - 2);
1816 iov[1].iov_base = unc_path;
1817 iov[1].iov_len = unc_path_len;
1820 * 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1
1821 * unless it is guest or anonymous user. See MS-SMB2 3.2.5.3.1
1822 * (Samba servers don't always set the flag so also check if null user)
1824 if ((server->dialect == SMB311_PROT_ID) &&
1825 !smb3_encryption_required(tcon) &&
1826 !(ses->session_flags &
1827 (SMB2_SESSION_FLAG_IS_GUEST|SMB2_SESSION_FLAG_IS_NULL)) &&
1828 ((ses->user_name != NULL) || (ses->sectype == Kerberos)))
1829 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
1831 memset(&rqst, 0, sizeof(struct smb_rqst));
1835 /* Need 64 for max size write so ask for more in case not there yet */
1836 req->hdr.CreditRequest = cpu_to_le16(64);
1838 rc = cifs_send_recv(xid, ses, server,
1839 &rqst, &resp_buftype, flags, &rsp_iov);
1840 cifs_small_buf_release(req);
1841 rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
1842 trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
1843 if ((rc != 0) || (rsp == NULL)) {
1844 cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
1845 tcon->need_reconnect = true;
1846 goto tcon_error_exit;
1849 switch (rsp->ShareType) {
1850 case SMB2_SHARE_TYPE_DISK:
1851 cifs_dbg(FYI, "connection to disk share\n");
1853 case SMB2_SHARE_TYPE_PIPE:
1855 cifs_dbg(FYI, "connection to pipe share\n");
1857 case SMB2_SHARE_TYPE_PRINT:
1859 cifs_dbg(FYI, "connection to printer\n");
1862 cifs_server_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
1864 goto tcon_error_exit;
1867 tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
1868 tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
1869 tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
1870 tcon->tidStatus = CifsGood;
1871 tcon->need_reconnect = false;
1872 tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
1873 strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
1875 if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
1876 ((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
1877 cifs_tcon_dbg(VFS, "DFS capability contradicts DFS flag\n");
1880 !(server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
1881 cifs_tcon_dbg(VFS, "Encryption is requested but not supported\n");
1883 init_copy_chunk_defaults(tcon);
1884 if (server->ops->validate_negotiate)
1885 rc = server->ops->validate_negotiate(xid, tcon);
1888 free_rsp_buf(resp_buftype, rsp);
1893 if (rsp && rsp->hdr.Status == STATUS_BAD_NETWORK_NAME)
1894 cifs_tcon_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
1899 SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
1901 struct smb_rqst rqst;
1902 struct smb2_tree_disconnect_req *req; /* response is trivial */
1904 struct cifs_ses *ses = tcon->ses;
1906 unsigned int total_len;
1908 struct kvec rsp_iov;
1911 cifs_dbg(FYI, "Tree Disconnect\n");
1913 if (!ses || !(ses->server))
1916 if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
1919 close_cached_dir_lease(&tcon->crfid);
1921 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server,
1927 if (smb3_encryption_required(tcon))
1928 flags |= CIFS_TRANSFORM_REQ;
1930 flags |= CIFS_NO_RSP_BUF;
1932 iov[0].iov_base = (char *)req;
1933 iov[0].iov_len = total_len;
1935 memset(&rqst, 0, sizeof(struct smb_rqst));
1939 rc = cifs_send_recv(xid, ses, ses->server,
1940 &rqst, &resp_buf_type, flags, &rsp_iov);
1941 cifs_small_buf_release(req);
1943 cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
1949 static struct create_durable *
1950 create_durable_buf(void)
1952 struct create_durable *buf;
1954 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1958 buf->ccontext.DataOffset = cpu_to_le16(offsetof
1959 (struct create_durable, Data));
1960 buf->ccontext.DataLength = cpu_to_le32(16);
1961 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1962 (struct create_durable, Name));
1963 buf->ccontext.NameLength = cpu_to_le16(4);
1964 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
1972 static struct create_durable *
1973 create_reconnect_durable_buf(struct cifs_fid *fid)
1975 struct create_durable *buf;
1977 buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
1981 buf->ccontext.DataOffset = cpu_to_le16(offsetof
1982 (struct create_durable, Data));
1983 buf->ccontext.DataLength = cpu_to_le32(16);
1984 buf->ccontext.NameOffset = cpu_to_le16(offsetof
1985 (struct create_durable, Name));
1986 buf->ccontext.NameLength = cpu_to_le16(4);
1987 buf->Data.Fid.PersistentFileId = fid->persistent_fid;
1988 buf->Data.Fid.VolatileFileId = fid->volatile_fid;
1989 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
1998 parse_query_id_ctxt(struct create_context *cc, struct smb2_file_all_info *buf)
2000 struct create_on_disk_id *pdisk_id = (struct create_on_disk_id *)cc;
2002 cifs_dbg(FYI, "parse query id context 0x%llx 0x%llx\n",
2003 pdisk_id->DiskFileId, pdisk_id->VolumeId);
2004 buf->IndexNumber = pdisk_id->DiskFileId;
2008 parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info,
2009 struct create_posix_rsp *posix)
2012 u8 *beg = (u8 *)cc + le16_to_cpu(cc->DataOffset);
2013 u8 *end = beg + le32_to_cpu(cc->DataLength);
2016 memset(posix, 0, sizeof(*posix));
2018 posix->nlink = le32_to_cpu(*(__le32 *)(beg + 0));
2019 posix->reparse_tag = le32_to_cpu(*(__le32 *)(beg + 4));
2020 posix->mode = le32_to_cpu(*(__le32 *)(beg + 8));
2023 sid_len = posix_info_sid_size(sid, end);
2025 cifs_dbg(VFS, "bad owner sid in posix create response\n");
2028 memcpy(&posix->owner, sid, sid_len);
2030 sid = sid + sid_len;
2031 sid_len = posix_info_sid_size(sid, end);
2033 cifs_dbg(VFS, "bad group sid in posix create response\n");
2036 memcpy(&posix->group, sid, sid_len);
2038 cifs_dbg(FYI, "nlink=%d mode=%o reparse_tag=%x\n",
2039 posix->nlink, posix->mode, posix->reparse_tag);
2043 smb2_parse_contexts(struct TCP_Server_Info *server,
2044 struct smb2_create_rsp *rsp,
2045 unsigned int *epoch, char *lease_key, __u8 *oplock,
2046 struct smb2_file_all_info *buf,
2047 struct create_posix_rsp *posix)
2050 struct create_context *cc;
2052 unsigned int remaining;
2054 static const char smb3_create_tag_posix[] = {
2055 0x93, 0xAD, 0x25, 0x50, 0x9C,
2056 0xB4, 0x11, 0xE7, 0xB4, 0x23, 0x83,
2057 0xDE, 0x96, 0x8B, 0xCD, 0x7C
2061 data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset);
2062 remaining = le32_to_cpu(rsp->CreateContextsLength);
2063 cc = (struct create_context *)data_offset;
2065 /* Initialize inode number to 0 in case no valid data in qfid context */
2067 buf->IndexNumber = 0;
2069 while (remaining >= sizeof(struct create_context)) {
2070 name = le16_to_cpu(cc->NameOffset) + (char *)cc;
2071 if (le16_to_cpu(cc->NameLength) == 4 &&
2072 strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0)
2073 *oplock = server->ops->parse_lease_buf(cc, epoch,
2075 else if (buf && (le16_to_cpu(cc->NameLength) == 4) &&
2076 strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0)
2077 parse_query_id_ctxt(cc, buf);
2078 else if ((le16_to_cpu(cc->NameLength) == 16)) {
2080 memcmp(name, smb3_create_tag_posix, 16) == 0)
2081 parse_posix_ctxt(cc, buf, posix);
2084 cifs_dbg(FYI, "Context not matched with len %d\n",
2085 le16_to_cpu(cc->NameLength));
2086 cifs_dump_mem("Cctxt name: ", name, 4);
2089 next = le32_to_cpu(cc->Next);
2093 cc = (struct create_context *)((char *)cc + next);
2096 if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
2097 *oplock = rsp->OplockLevel;
2103 add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
2104 unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
2106 struct smb2_create_req *req = iov[0].iov_base;
2107 unsigned int num = *num_iovec;
2109 iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
2110 if (iov[num].iov_base == NULL)
2112 iov[num].iov_len = server->vals->create_lease_size;
2113 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
2114 if (!req->CreateContextsOffset)
2115 req->CreateContextsOffset = cpu_to_le32(
2116 sizeof(struct smb2_create_req) +
2117 iov[num - 1].iov_len);
2118 le32_add_cpu(&req->CreateContextsLength,
2119 server->vals->create_lease_size);
2120 *num_iovec = num + 1;
2124 static struct create_durable_v2 *
2125 create_durable_v2_buf(struct cifs_open_parms *oparms)
2127 struct cifs_fid *pfid = oparms->fid;
2128 struct create_durable_v2 *buf;
2130 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
2134 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2135 (struct create_durable_v2, dcontext));
2136 buf->ccontext.DataLength = cpu_to_le32(sizeof(struct durable_context_v2));
2137 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2138 (struct create_durable_v2, Name));
2139 buf->ccontext.NameLength = cpu_to_le16(4);
2142 * NB: Handle timeout defaults to 0, which allows server to choose
2143 * (most servers default to 120 seconds) and most clients default to 0.
2144 * This can be overridden at mount ("handletimeout=") if the user wants
2145 * a different persistent (or resilient) handle timeout for all opens
2146 * opens on a particular SMB3 mount.
2148 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
2149 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2150 generate_random_uuid(buf->dcontext.CreateGuid);
2151 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
2153 /* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DH2Q" */
2161 static struct create_durable_handle_reconnect_v2 *
2162 create_reconnect_durable_v2_buf(struct cifs_fid *fid)
2164 struct create_durable_handle_reconnect_v2 *buf;
2166 buf = kzalloc(sizeof(struct create_durable_handle_reconnect_v2),
2171 buf->ccontext.DataOffset =
2172 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2174 buf->ccontext.DataLength =
2175 cpu_to_le32(sizeof(struct durable_reconnect_context_v2));
2176 buf->ccontext.NameOffset =
2177 cpu_to_le16(offsetof(struct create_durable_handle_reconnect_v2,
2179 buf->ccontext.NameLength = cpu_to_le16(4);
2181 buf->dcontext.Fid.PersistentFileId = fid->persistent_fid;
2182 buf->dcontext.Fid.VolatileFileId = fid->volatile_fid;
2183 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
2184 memcpy(buf->dcontext.CreateGuid, fid->create_guid, 16);
2186 /* SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 is "DH2C" */
2195 add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
2196 struct cifs_open_parms *oparms)
2198 struct smb2_create_req *req = iov[0].iov_base;
2199 unsigned int num = *num_iovec;
2201 iov[num].iov_base = create_durable_v2_buf(oparms);
2202 if (iov[num].iov_base == NULL)
2204 iov[num].iov_len = sizeof(struct create_durable_v2);
2205 if (!req->CreateContextsOffset)
2206 req->CreateContextsOffset =
2207 cpu_to_le32(sizeof(struct smb2_create_req) +
2209 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable_v2));
2210 *num_iovec = num + 1;
2215 add_durable_reconnect_v2_context(struct kvec *iov, unsigned int *num_iovec,
2216 struct cifs_open_parms *oparms)
2218 struct smb2_create_req *req = iov[0].iov_base;
2219 unsigned int num = *num_iovec;
2221 /* indicate that we don't need to relock the file */
2222 oparms->reconnect = false;
2224 iov[num].iov_base = create_reconnect_durable_v2_buf(oparms->fid);
2225 if (iov[num].iov_base == NULL)
2227 iov[num].iov_len = sizeof(struct create_durable_handle_reconnect_v2);
2228 if (!req->CreateContextsOffset)
2229 req->CreateContextsOffset =
2230 cpu_to_le32(sizeof(struct smb2_create_req) +
2232 le32_add_cpu(&req->CreateContextsLength,
2233 sizeof(struct create_durable_handle_reconnect_v2));
2234 *num_iovec = num + 1;
2239 add_durable_context(struct kvec *iov, unsigned int *num_iovec,
2240 struct cifs_open_parms *oparms, bool use_persistent)
2242 struct smb2_create_req *req = iov[0].iov_base;
2243 unsigned int num = *num_iovec;
2245 if (use_persistent) {
2246 if (oparms->reconnect)
2247 return add_durable_reconnect_v2_context(iov, num_iovec,
2250 return add_durable_v2_context(iov, num_iovec, oparms);
2253 if (oparms->reconnect) {
2254 iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
2255 /* indicate that we don't need to relock the file */
2256 oparms->reconnect = false;
2258 iov[num].iov_base = create_durable_buf();
2259 if (iov[num].iov_base == NULL)
2261 iov[num].iov_len = sizeof(struct create_durable);
2262 if (!req->CreateContextsOffset)
2263 req->CreateContextsOffset =
2264 cpu_to_le32(sizeof(struct smb2_create_req) +
2266 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
2267 *num_iovec = num + 1;
2271 /* See MS-SMB2 2.2.13.2.7 */
2272 static struct crt_twarp_ctxt *
2273 create_twarp_buf(__u64 timewarp)
2275 struct crt_twarp_ctxt *buf;
2277 buf = kzalloc(sizeof(struct crt_twarp_ctxt), GFP_KERNEL);
2281 buf->ccontext.DataOffset = cpu_to_le16(offsetof
2282 (struct crt_twarp_ctxt, Timestamp));
2283 buf->ccontext.DataLength = cpu_to_le32(8);
2284 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2285 (struct crt_twarp_ctxt, Name));
2286 buf->ccontext.NameLength = cpu_to_le16(4);
2287 /* SMB2_CREATE_TIMEWARP_TOKEN is "TWrp" */
2292 buf->Timestamp = cpu_to_le64(timewarp);
2296 /* See MS-SMB2 2.2.13.2.7 */
2298 add_twarp_context(struct kvec *iov, unsigned int *num_iovec, __u64 timewarp)
2300 struct smb2_create_req *req = iov[0].iov_base;
2301 unsigned int num = *num_iovec;
2303 iov[num].iov_base = create_twarp_buf(timewarp);
2304 if (iov[num].iov_base == NULL)
2306 iov[num].iov_len = sizeof(struct crt_twarp_ctxt);
2307 if (!req->CreateContextsOffset)
2308 req->CreateContextsOffset = cpu_to_le32(
2309 sizeof(struct smb2_create_req) +
2310 iov[num - 1].iov_len);
2311 le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_twarp_ctxt));
2312 *num_iovec = num + 1;
2316 /* See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */
2317 static void setup_owner_group_sids(char *buf)
2319 struct owner_group_sids *sids = (struct owner_group_sids *)buf;
2321 /* Populate the user ownership fields S-1-5-88-1 */
2322 sids->owner.Revision = 1;
2323 sids->owner.NumAuth = 3;
2324 sids->owner.Authority[5] = 5;
2325 sids->owner.SubAuthorities[0] = cpu_to_le32(88);
2326 sids->owner.SubAuthorities[1] = cpu_to_le32(1);
2327 sids->owner.SubAuthorities[2] = cpu_to_le32(current_fsuid().val);
2329 /* Populate the group ownership fields S-1-5-88-2 */
2330 sids->group.Revision = 1;
2331 sids->group.NumAuth = 3;
2332 sids->group.Authority[5] = 5;
2333 sids->group.SubAuthorities[0] = cpu_to_le32(88);
2334 sids->group.SubAuthorities[1] = cpu_to_le32(2);
2335 sids->group.SubAuthorities[2] = cpu_to_le32(current_fsgid().val);
2337 cifs_dbg(FYI, "owner S-1-5-88-1-%d, group S-1-5-88-2-%d\n", current_fsuid().val, current_fsgid().val);
2340 /* See MS-SMB2 2.2.13.2.2 and MS-DTYP 2.4.6 */
2341 static struct crt_sd_ctxt *
2342 create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
2344 struct crt_sd_ctxt *buf;
2346 unsigned int acelen, acl_size, ace_count;
2347 unsigned int owner_offset = 0;
2348 unsigned int group_offset = 0;
2349 struct smb3_acl acl;
2351 *len = roundup(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
2354 /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
2355 *len += sizeof(struct owner_group_sids);
2358 buf = kzalloc(*len, GFP_KERNEL);
2362 ptr = (__u8 *)&buf[1];
2364 /* offset fields are from beginning of security descriptor not of create context */
2365 owner_offset = ptr - (__u8 *)&buf->sd;
2366 buf->sd.OffsetOwner = cpu_to_le32(owner_offset);
2367 group_offset = owner_offset + offsetof(struct owner_group_sids, group);
2368 buf->sd.OffsetGroup = cpu_to_le32(group_offset);
2370 setup_owner_group_sids(ptr);
2371 ptr += sizeof(struct owner_group_sids);
2373 buf->sd.OffsetOwner = 0;
2374 buf->sd.OffsetGroup = 0;
2377 buf->ccontext.DataOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, sd));
2378 buf->ccontext.NameOffset = cpu_to_le16(offsetof(struct crt_sd_ctxt, Name));
2379 buf->ccontext.NameLength = cpu_to_le16(4);
2380 /* SMB2_CREATE_SD_BUFFER_TOKEN is "SecD" */
2385 buf->sd.Revision = 1; /* Must be one see MS-DTYP 2.4.6 */
2388 * ACL is "self relative" ie ACL is stored in contiguous block of memory
2389 * and "DP" ie the DACL is present
2391 buf->sd.Control = cpu_to_le16(ACL_CONTROL_SR | ACL_CONTROL_DP);
2393 /* offset owner, group and Sbz1 and SACL are all zero */
2394 buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2395 /* Ship the ACL for now. we will copy it into buf later. */
2397 ptr += sizeof(struct smb3_acl);
2399 /* create one ACE to hold the mode embedded in reserved special SID */
2400 acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
2402 acl_size = acelen + sizeof(struct smb3_acl);
2406 /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
2407 acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
2413 /* and one more ACE to allow access for authenticated users */
2414 acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
2419 acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
2420 acl.AclSize = cpu_to_le16(acl_size);
2421 acl.AceCount = cpu_to_le16(ace_count);
2422 memcpy(aclptr, &acl, sizeof(struct smb3_acl));
2424 buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
2425 *len = roundup(ptr - (__u8 *)buf, 8);
2431 add_sd_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode, bool set_owner)
2433 struct smb2_create_req *req = iov[0].iov_base;
2434 unsigned int num = *num_iovec;
2435 unsigned int len = 0;
2437 iov[num].iov_base = create_sd_buf(mode, set_owner, &len);
2438 if (iov[num].iov_base == NULL)
2440 iov[num].iov_len = len;
2441 if (!req->CreateContextsOffset)
2442 req->CreateContextsOffset = cpu_to_le32(
2443 sizeof(struct smb2_create_req) +
2444 iov[num - 1].iov_len);
2445 le32_add_cpu(&req->CreateContextsLength, len);
2446 *num_iovec = num + 1;
2450 static struct crt_query_id_ctxt *
2451 create_query_id_buf(void)
2453 struct crt_query_id_ctxt *buf;
2455 buf = kzalloc(sizeof(struct crt_query_id_ctxt), GFP_KERNEL);
2459 buf->ccontext.DataOffset = cpu_to_le16(0);
2460 buf->ccontext.DataLength = cpu_to_le32(0);
2461 buf->ccontext.NameOffset = cpu_to_le16(offsetof
2462 (struct crt_query_id_ctxt, Name));
2463 buf->ccontext.NameLength = cpu_to_le16(4);
2464 /* SMB2_CREATE_QUERY_ON_DISK_ID is "QFid" */
2472 /* See MS-SMB2 2.2.13.2.9 */
2474 add_query_id_context(struct kvec *iov, unsigned int *num_iovec)
2476 struct smb2_create_req *req = iov[0].iov_base;
2477 unsigned int num = *num_iovec;
2479 iov[num].iov_base = create_query_id_buf();
2480 if (iov[num].iov_base == NULL)
2482 iov[num].iov_len = sizeof(struct crt_query_id_ctxt);
2483 if (!req->CreateContextsOffset)
2484 req->CreateContextsOffset = cpu_to_le32(
2485 sizeof(struct smb2_create_req) +
2486 iov[num - 1].iov_len);
2487 le32_add_cpu(&req->CreateContextsLength, sizeof(struct crt_query_id_ctxt));
2488 *num_iovec = num + 1;
2493 alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
2494 const char *treename, const __le16 *path)
2496 int treename_len, path_len;
2497 struct nls_table *cp;
2498 const __le16 sep[] = {cpu_to_le16('\\'), cpu_to_le16(0x0000)};
2503 treename_len = strlen(treename);
2504 if (treename_len < 2 || !(treename[0] == '\\' && treename[1] == '\\'))
2510 path_len = UniStrnlen((wchar_t *)path, PATH_MAX);
2513 * make room for one path separator between the treename and
2516 *out_len = treename_len + 1 + path_len;
2519 * final path needs to be null-terminated UTF16 with a
2523 *out_size = roundup((*out_len+1)*2, 8);
2524 *out_path = kzalloc(*out_size, GFP_KERNEL);
2528 cp = load_nls_default();
2529 cifs_strtoUTF16(*out_path, treename, treename_len, cp);
2530 UniStrcat(*out_path, sep);
2531 UniStrcat(*out_path, path);
2537 int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
2538 umode_t mode, struct cifs_tcon *tcon,
2539 const char *full_path,
2540 struct cifs_sb_info *cifs_sb)
2542 struct smb_rqst rqst;
2543 struct smb2_create_req *req;
2544 struct smb2_create_rsp *rsp = NULL;
2545 struct cifs_ses *ses = tcon->ses;
2546 struct kvec iov[3]; /* make sure at least one for each open context */
2547 struct kvec rsp_iov = {NULL, 0};
2550 __le16 *copy_path = NULL;
2553 unsigned int n_iov = 2;
2554 __u32 file_attributes = 0;
2555 char *pc_buf = NULL;
2557 unsigned int total_len;
2558 __le16 *utf16_path = NULL;
2559 struct TCP_Server_Info *server = cifs_pick_channel(ses);
2561 cifs_dbg(FYI, "mkdir\n");
2563 /* resource #1: path allocation */
2564 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
2568 if (!ses || !server) {
2573 /* resource #2: request */
2574 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2575 (void **) &req, &total_len);
2580 if (smb3_encryption_required(tcon))
2581 flags |= CIFS_TRANSFORM_REQ;
2583 req->ImpersonationLevel = IL_IMPERSONATION;
2584 req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
2585 /* File attributes ignored on open (used in create though) */
2586 req->FileAttributes = cpu_to_le32(file_attributes);
2587 req->ShareAccess = FILE_SHARE_ALL_LE;
2588 req->CreateDisposition = cpu_to_le32(FILE_CREATE);
2589 req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
2591 iov[0].iov_base = (char *)req;
2592 /* -1 since last byte is buf[0] which is sent below (path) */
2593 iov[0].iov_len = total_len - 1;
2595 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
2597 /* [MS-SMB2] 2.2.13 NameOffset:
2598 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2599 * the SMB2 header, the file name includes a prefix that will
2600 * be processed during DFS name normalization as specified in
2601 * section 3.3.5.9. Otherwise, the file name is relative to
2602 * the share that is identified by the TreeId in the SMB2
2605 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2608 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
2609 rc = alloc_path_with_tree_prefix(©_path, ©_size,
2611 tcon->treeName, utf16_path);
2615 req->NameLength = cpu_to_le16(name_len * 2);
2616 uni_path_len = copy_size;
2617 /* free before overwriting resource */
2619 utf16_path = copy_path;
2621 uni_path_len = (2 * UniStrnlen((wchar_t *)utf16_path, PATH_MAX)) + 2;
2622 /* MUST set path len (NameLength) to 0 opening root of share */
2623 req->NameLength = cpu_to_le16(uni_path_len - 2);
2624 if (uni_path_len % 8 != 0) {
2625 copy_size = roundup(uni_path_len, 8);
2626 copy_path = kzalloc(copy_size, GFP_KERNEL);
2631 memcpy((char *)copy_path, (const char *)utf16_path,
2633 uni_path_len = copy_size;
2634 /* free before overwriting resource */
2636 utf16_path = copy_path;
2640 iov[1].iov_len = uni_path_len;
2641 iov[1].iov_base = utf16_path;
2642 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
2644 if (tcon->posix_extensions) {
2645 /* resource #3: posix buf */
2646 rc = add_posix_context(iov, &n_iov, mode);
2649 pc_buf = iov[n_iov-1].iov_base;
2653 memset(&rqst, 0, sizeof(struct smb_rqst));
2655 rqst.rq_nvec = n_iov;
2657 /* no need to inc num_remote_opens because we close it just below */
2658 trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, CREATE_NOT_FILE,
2659 FILE_WRITE_ATTRIBUTES);
2660 /* resource #4: response buffer */
2661 rc = cifs_send_recv(xid, ses, server,
2662 &rqst, &resp_buftype, flags, &rsp_iov);
2664 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
2665 trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
2667 FILE_WRITE_ATTRIBUTES, rc);
2668 goto err_free_rsp_buf;
2672 * Although unlikely to be possible for rsp to be null and rc not set,
2673 * adding check below is slightly safer long term (and quiets Coverity
2676 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
2683 trace_smb3_posix_mkdir_done(xid, le64_to_cpu(rsp->PersistentFileId),
2685 ses->Suid, CREATE_NOT_FILE,
2686 FILE_WRITE_ATTRIBUTES);
2688 SMB2_close(xid, tcon, le64_to_cpu(rsp->PersistentFileId),
2689 le64_to_cpu(rsp->VolatileFileId));
2691 /* Eventually save off posix specific response info and timestaps */
2694 free_rsp_buf(resp_buftype, rsp);
2697 cifs_small_buf_release(req);
2704 SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
2705 struct smb_rqst *rqst, __u8 *oplock,
2706 struct cifs_open_parms *oparms, __le16 *path)
2708 struct smb2_create_req *req;
2709 unsigned int n_iov = 2;
2710 __u32 file_attributes = 0;
2713 unsigned int total_len;
2714 struct kvec *iov = rqst->rq_iov;
2718 rc = smb2_plain_req_init(SMB2_CREATE, tcon, server,
2719 (void **) &req, &total_len);
2723 iov[0].iov_base = (char *)req;
2724 /* -1 since last byte is buf[0] which is sent below (path) */
2725 iov[0].iov_len = total_len - 1;
2727 if (oparms->create_options & CREATE_OPTION_READONLY)
2728 file_attributes |= ATTR_READONLY;
2729 if (oparms->create_options & CREATE_OPTION_SPECIAL)
2730 file_attributes |= ATTR_SYSTEM;
2732 req->ImpersonationLevel = IL_IMPERSONATION;
2733 req->DesiredAccess = cpu_to_le32(oparms->desired_access);
2734 /* File attributes ignored on open (used in create though) */
2735 req->FileAttributes = cpu_to_le32(file_attributes);
2736 req->ShareAccess = FILE_SHARE_ALL_LE;
2738 req->CreateDisposition = cpu_to_le32(oparms->disposition);
2739 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
2740 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
2742 /* [MS-SMB2] 2.2.13 NameOffset:
2743 * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
2744 * the SMB2 header, the file name includes a prefix that will
2745 * be processed during DFS name normalization as specified in
2746 * section 3.3.5.9. Otherwise, the file name is relative to
2747 * the share that is identified by the TreeId in the SMB2
2750 if (tcon->share_flags & SHI1005_FLAGS_DFS) {
2753 req->hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
2754 rc = alloc_path_with_tree_prefix(©_path, ©_size,
2756 tcon->treeName, path);
2759 req->NameLength = cpu_to_le16(name_len * 2);
2760 uni_path_len = copy_size;
2763 uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
2764 /* MUST set path len (NameLength) to 0 opening root of share */
2765 req->NameLength = cpu_to_le16(uni_path_len - 2);
2766 copy_size = uni_path_len;
2767 if (copy_size % 8 != 0)
2768 copy_size = roundup(copy_size, 8);
2769 copy_path = kzalloc(copy_size, GFP_KERNEL);
2772 memcpy((char *)copy_path, (const char *)path,
2774 uni_path_len = copy_size;
2778 iov[1].iov_len = uni_path_len;
2779 iov[1].iov_base = path;
2781 if ((!server->oplocks) || (tcon->no_lease))
2782 *oplock = SMB2_OPLOCK_LEVEL_NONE;
2784 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
2785 *oplock == SMB2_OPLOCK_LEVEL_NONE)
2786 req->RequestedOplockLevel = *oplock;
2787 else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
2788 (oparms->create_options & CREATE_NOT_FILE))
2789 req->RequestedOplockLevel = *oplock; /* no srv lease support */
2791 rc = add_lease_context(server, iov, &n_iov,
2792 oparms->fid->lease_key, oplock);
2797 if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
2798 /* need to set Next field of lease context if we request it */
2799 if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
2800 struct create_context *ccontext =
2801 (struct create_context *)iov[n_iov-1].iov_base;
2803 cpu_to_le32(server->vals->create_lease_size);
2806 rc = add_durable_context(iov, &n_iov, oparms,
2807 tcon->use_persistent);
2812 if (tcon->posix_extensions) {
2814 struct create_context *ccontext =
2815 (struct create_context *)iov[n_iov-1].iov_base;
2817 cpu_to_le32(iov[n_iov-1].iov_len);
2820 rc = add_posix_context(iov, &n_iov, oparms->mode);
2825 if (tcon->snapshot_time) {
2826 cifs_dbg(FYI, "adding snapshot context\n");
2828 struct create_context *ccontext =
2829 (struct create_context *)iov[n_iov-1].iov_base;
2831 cpu_to_le32(iov[n_iov-1].iov_len);
2834 rc = add_twarp_context(iov, &n_iov, tcon->snapshot_time);
2839 if ((oparms->disposition != FILE_OPEN) && (oparms->cifs_sb)) {
2843 if ((oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) &&
2844 (oparms->mode != ACL_NO_MODE))
2848 oparms->mode = ACL_NO_MODE;
2851 if (oparms->cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
2856 if (set_owner | set_mode) {
2858 struct create_context *ccontext =
2859 (struct create_context *)iov[n_iov-1].iov_base;
2860 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2863 cifs_dbg(FYI, "add sd with mode 0x%x\n", oparms->mode);
2864 rc = add_sd_context(iov, &n_iov, oparms->mode, set_owner);
2871 struct create_context *ccontext =
2872 (struct create_context *)iov[n_iov-1].iov_base;
2873 ccontext->Next = cpu_to_le32(iov[n_iov-1].iov_len);
2875 add_query_id_context(iov, &n_iov);
2877 rqst->rq_nvec = n_iov;
2881 /* rq_iov[0] is the request and is released by cifs_small_buf_release().
2882 * All other vectors are freed by kfree().
2885 SMB2_open_free(struct smb_rqst *rqst)
2889 if (rqst && rqst->rq_iov) {
2890 cifs_small_buf_release(rqst->rq_iov[0].iov_base);
2891 for (i = 1; i < rqst->rq_nvec; i++)
2892 if (rqst->rq_iov[i].iov_base != smb2_padding)
2893 kfree(rqst->rq_iov[i].iov_base);
2898 SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
2899 __u8 *oplock, struct smb2_file_all_info *buf,
2900 struct create_posix_rsp *posix,
2901 struct kvec *err_iov, int *buftype)
2903 struct smb_rqst rqst;
2904 struct smb2_create_rsp *rsp = NULL;
2905 struct cifs_tcon *tcon = oparms->tcon;
2906 struct cifs_ses *ses = tcon->ses;
2907 struct TCP_Server_Info *server = cifs_pick_channel(ses);
2908 struct kvec iov[SMB2_CREATE_IOV_SIZE];
2909 struct kvec rsp_iov = {NULL, 0};
2910 int resp_buftype = CIFS_NO_BUFFER;
2914 cifs_dbg(FYI, "create/open\n");
2915 if (!ses || !server)
2918 if (smb3_encryption_required(tcon))
2919 flags |= CIFS_TRANSFORM_REQ;
2921 memset(&rqst, 0, sizeof(struct smb_rqst));
2922 memset(&iov, 0, sizeof(iov));
2924 rqst.rq_nvec = SMB2_CREATE_IOV_SIZE;
2926 rc = SMB2_open_init(tcon, server,
2927 &rqst, oplock, oparms, path);
2931 trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid,
2932 oparms->create_options, oparms->desired_access);
2934 rc = cifs_send_recv(xid, ses, server,
2935 &rqst, &resp_buftype, flags,
2937 rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
2940 cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
2941 if (err_iov && rsp) {
2943 *buftype = resp_buftype;
2944 resp_buftype = CIFS_NO_BUFFER;
2947 trace_smb3_open_err(xid, tcon->tid, ses->Suid,
2948 oparms->create_options, oparms->desired_access, rc);
2949 if (rc == -EREMCHG) {
2950 pr_warn_once("server share %s deleted\n",
2952 tcon->need_reconnect = true;
2955 } else if (rsp == NULL) /* unlikely to happen, but safer to check */
2958 trace_smb3_open_done(xid, le64_to_cpu(rsp->PersistentFileId),
2960 ses->Suid, oparms->create_options,
2961 oparms->desired_access);
2963 atomic_inc(&tcon->num_remote_opens);
2964 oparms->fid->persistent_fid = le64_to_cpu(rsp->PersistentFileId);
2965 oparms->fid->volatile_fid = le64_to_cpu(rsp->VolatileFileId);
2966 oparms->fid->access = oparms->desired_access;
2967 #ifdef CONFIG_CIFS_DEBUG2
2968 oparms->fid->mid = le64_to_cpu(rsp->hdr.MessageId);
2969 #endif /* CIFS_DEBUG2 */
2972 buf->CreationTime = rsp->CreationTime;
2973 buf->LastAccessTime = rsp->LastAccessTime;
2974 buf->LastWriteTime = rsp->LastWriteTime;
2975 buf->ChangeTime = rsp->ChangeTime;
2976 buf->AllocationSize = rsp->AllocationSize;
2977 buf->EndOfFile = rsp->EndofFile;
2978 buf->Attributes = rsp->FileAttributes;
2979 buf->NumberOfLinks = cpu_to_le32(1);
2980 buf->DeletePending = 0;
2984 smb2_parse_contexts(server, rsp, &oparms->fid->epoch,
2985 oparms->fid->lease_key, oplock, buf, posix);
2987 SMB2_open_free(&rqst);
2988 free_rsp_buf(resp_buftype, rsp);
2993 SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
2994 struct smb_rqst *rqst,
2995 u64 persistent_fid, u64 volatile_fid, u32 opcode,
2996 bool is_fsctl, char *in_data, u32 indatalen,
2997 __u32 max_response_size)
2999 struct smb2_ioctl_req *req;
3000 struct kvec *iov = rqst->rq_iov;
3001 unsigned int total_len;
3005 rc = smb2_ioctl_req_init(opcode, tcon, server,
3006 (void **) &req, &total_len);
3012 * indatalen is usually small at a couple of bytes max, so
3013 * just allocate through generic pool
3015 in_data_buf = kmemdup(in_data, indatalen, GFP_NOFS);
3017 cifs_small_buf_release(req);
3022 req->CtlCode = cpu_to_le32(opcode);
3023 req->PersistentFileId = persistent_fid;
3024 req->VolatileFileId = volatile_fid;
3026 iov[0].iov_base = (char *)req;
3028 * If no input data, the size of ioctl struct in
3029 * protocol spec still includes a 1 byte data buffer,
3030 * but if input data passed to ioctl, we do not
3031 * want to double count this, so we do not send
3032 * the dummy one byte of data in iovec[0] if sending
3033 * input data (in iovec[1]).
3036 req->InputCount = cpu_to_le32(indatalen);
3037 /* do not set InputOffset if no input data */
3039 cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer));
3041 iov[0].iov_len = total_len - 1;
3042 iov[1].iov_base = in_data_buf;
3043 iov[1].iov_len = indatalen;
3046 iov[0].iov_len = total_len;
3049 req->OutputOffset = 0;
3050 req->OutputCount = 0; /* MBZ */
3053 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
3054 * We Could increase default MaxOutputResponse, but that could require
3055 * more credits. Windows typically sets this smaller, but for some
3056 * ioctls it may be useful to allow server to send more. No point
3057 * limiting what the server can send as long as fits in one credit
3058 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
3059 * to increase this limit up in the future.
3060 * Note that for snapshot queries that servers like Azure expect that
3061 * the first query be minimal size (and just used to get the number/size
3062 * of previous versions) so response size must be specified as EXACTLY
3063 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
3064 * of eight bytes. Currently that is the only case where we set max
3065 * response size smaller.
3067 req->MaxOutputResponse = cpu_to_le32(max_response_size);
3068 req->hdr.CreditCharge =
3069 cpu_to_le16(DIV_ROUND_UP(max(indatalen, max_response_size),
3070 SMB2_MAX_BUFFER_SIZE));
3072 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
3076 /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */
3077 if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
3078 req->hdr.Flags |= SMB2_FLAGS_SIGNED;
3084 SMB2_ioctl_free(struct smb_rqst *rqst)
3087 if (rqst && rqst->rq_iov) {
3088 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3089 for (i = 1; i < rqst->rq_nvec; i++)
3090 if (rqst->rq_iov[i].iov_base != smb2_padding)
3091 kfree(rqst->rq_iov[i].iov_base);
3097 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
3100 SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3101 u64 volatile_fid, u32 opcode, bool is_fsctl,
3102 char *in_data, u32 indatalen, u32 max_out_data_len,
3103 char **out_data, u32 *plen /* returned data len */)
3105 struct smb_rqst rqst;
3106 struct smb2_ioctl_rsp *rsp = NULL;
3107 struct cifs_ses *ses;
3108 struct TCP_Server_Info *server;
3109 struct kvec iov[SMB2_IOCTL_IOV_SIZE];
3110 struct kvec rsp_iov = {NULL, 0};
3111 int resp_buftype = CIFS_NO_BUFFER;
3115 cifs_dbg(FYI, "SMB2 IOCTL\n");
3117 if (out_data != NULL)
3120 /* zero out returned data len, in case of error */
3131 server = cifs_pick_channel(ses);
3135 if (smb3_encryption_required(tcon))
3136 flags |= CIFS_TRANSFORM_REQ;
3138 memset(&rqst, 0, sizeof(struct smb_rqst));
3139 memset(&iov, 0, sizeof(iov));
3141 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
3143 rc = SMB2_ioctl_init(tcon, server,
3144 &rqst, persistent_fid, volatile_fid, opcode,
3145 is_fsctl, in_data, indatalen, max_out_data_len);
3149 rc = cifs_send_recv(xid, ses, server,
3150 &rqst, &resp_buftype, flags,
3152 rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
3155 trace_smb3_fsctl_err(xid, persistent_fid, tcon->tid,
3156 ses->Suid, 0, opcode, rc);
3158 if ((rc != 0) && (rc != -EINVAL) && (rc != -E2BIG)) {
3159 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3161 } else if (rc == -EINVAL) {
3162 if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
3163 (opcode != FSCTL_SRV_COPYCHUNK)) {
3164 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3167 } else if (rc == -E2BIG) {
3168 if (opcode != FSCTL_QUERY_ALLOCATED_RANGES) {
3169 cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
3174 /* check if caller wants to look at return data or just return rc */
3175 if ((plen == NULL) || (out_data == NULL))
3179 * Although unlikely to be possible for rsp to be null and rc not set,
3180 * adding check below is slightly safer long term (and quiets Coverity
3188 *plen = le32_to_cpu(rsp->OutputCount);
3190 /* We check for obvious errors in the output buffer length and offset */
3192 goto ioctl_exit; /* server returned no data */
3193 else if (*plen > rsp_iov.iov_len || *plen > 0xFF00) {
3194 cifs_tcon_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
3200 if (rsp_iov.iov_len - *plen < le32_to_cpu(rsp->OutputOffset)) {
3201 cifs_tcon_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
3202 le32_to_cpu(rsp->OutputOffset));
3208 *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
3210 if (*out_data == NULL) {
3216 SMB2_ioctl_free(&rqst);
3217 free_rsp_buf(resp_buftype, rsp);
3222 * Individual callers to ioctl worker function follow
3226 SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
3227 u64 persistent_fid, u64 volatile_fid)
3230 struct compress_ioctl fsctl_input;
3231 char *ret_data = NULL;
3233 fsctl_input.CompressionState =
3234 cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
3236 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
3237 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
3238 (char *)&fsctl_input /* data input */,
3239 2 /* in data len */, CIFSMaxBufSize /* max out data */,
3240 &ret_data /* out data */, NULL);
3242 cifs_dbg(FYI, "set compression rc %d\n", rc);
3248 SMB2_close_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3249 struct smb_rqst *rqst,
3250 u64 persistent_fid, u64 volatile_fid, bool query_attrs)
3252 struct smb2_close_req *req;
3253 struct kvec *iov = rqst->rq_iov;
3254 unsigned int total_len;
3257 rc = smb2_plain_req_init(SMB2_CLOSE, tcon, server,
3258 (void **) &req, &total_len);
3262 req->PersistentFileId = cpu_to_le64(persistent_fid);
3263 req->VolatileFileId = cpu_to_le64(volatile_fid);
3265 req->Flags = SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB;
3268 iov[0].iov_base = (char *)req;
3269 iov[0].iov_len = total_len;
3275 SMB2_close_free(struct smb_rqst *rqst)
3277 if (rqst && rqst->rq_iov)
3278 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3282 __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3283 u64 persistent_fid, u64 volatile_fid,
3284 struct smb2_file_network_open_info *pbuf)
3286 struct smb_rqst rqst;
3287 struct smb2_close_rsp *rsp = NULL;
3288 struct cifs_ses *ses = tcon->ses;
3289 struct TCP_Server_Info *server = cifs_pick_channel(ses);
3291 struct kvec rsp_iov;
3292 int resp_buftype = CIFS_NO_BUFFER;
3295 bool query_attrs = false;
3297 cifs_dbg(FYI, "Close\n");
3299 if (!ses || !server)
3302 if (smb3_encryption_required(tcon))
3303 flags |= CIFS_TRANSFORM_REQ;
3305 memset(&rqst, 0, sizeof(struct smb_rqst));
3306 memset(&iov, 0, sizeof(iov));
3310 /* check if need to ask server to return timestamps in close response */
3314 trace_smb3_close_enter(xid, persistent_fid, tcon->tid, ses->Suid);
3315 rc = SMB2_close_init(tcon, server,
3316 &rqst, persistent_fid, volatile_fid,
3321 rc = cifs_send_recv(xid, ses, server,
3322 &rqst, &resp_buftype, flags, &rsp_iov);
3323 rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
3326 cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
3327 trace_smb3_close_err(xid, persistent_fid, tcon->tid, ses->Suid,
3331 trace_smb3_close_done(xid, persistent_fid, tcon->tid,
3334 * Note that have to subtract 4 since struct network_open_info
3335 * has a final 4 byte pad that close response does not have
3338 memcpy(pbuf, (char *)&rsp->CreationTime, sizeof(*pbuf) - 4);
3341 atomic_dec(&tcon->num_remote_opens);
3343 SMB2_close_free(&rqst);
3344 free_rsp_buf(resp_buftype, rsp);
3346 /* retry close in a worker thread if this one is interrupted */
3347 if (is_interrupt_error(rc)) {
3350 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
3353 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
3354 persistent_fid, tmp_rc);
3360 SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
3361 u64 persistent_fid, u64 volatile_fid)
3363 return __SMB2_close(xid, tcon, persistent_fid, volatile_fid, NULL);
3367 smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
3368 struct kvec *iov, unsigned int min_buf_size)
3370 unsigned int smb_len = iov->iov_len;
3371 char *end_of_smb = smb_len + (char *)iov->iov_base;
3372 char *begin_of_buf = offset + (char *)iov->iov_base;
3373 char *end_of_buf = begin_of_buf + buffer_length;
3376 if (buffer_length < min_buf_size) {
3377 cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
3378 buffer_length, min_buf_size);
3382 /* check if beyond RFC1001 maximum length */
3383 if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
3384 cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
3385 buffer_length, smb_len);
3389 if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
3390 cifs_dbg(VFS, "Invalid server response, bad offset to data\n");
3398 * If SMB buffer fields are valid, copy into temporary buffer to hold result.
3399 * Caller must free buffer.
3402 smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
3403 struct kvec *iov, unsigned int minbufsize,
3406 char *begin_of_buf = offset + (char *)iov->iov_base;
3412 rc = smb2_validate_iov(offset, buffer_length, iov, minbufsize);
3416 memcpy(data, begin_of_buf, buffer_length);
3422 SMB2_query_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3423 struct smb_rqst *rqst,
3424 u64 persistent_fid, u64 volatile_fid,
3425 u8 info_class, u8 info_type, u32 additional_info,
3426 size_t output_len, size_t input_len, void *input)
3428 struct smb2_query_info_req *req;
3429 struct kvec *iov = rqst->rq_iov;
3430 unsigned int total_len;
3433 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
3434 (void **) &req, &total_len);
3438 req->InfoType = info_type;
3439 req->FileInfoClass = info_class;
3440 req->PersistentFileId = persistent_fid;
3441 req->VolatileFileId = volatile_fid;
3442 req->AdditionalInformation = cpu_to_le32(additional_info);
3444 req->OutputBufferLength = cpu_to_le32(output_len);
3446 req->InputBufferLength = cpu_to_le32(input_len);
3447 /* total_len for smb query request never close to le16 max */
3448 req->InputBufferOffset = cpu_to_le16(total_len - 1);
3449 memcpy(req->Buffer, input, input_len);
3452 iov[0].iov_base = (char *)req;
3454 iov[0].iov_len = total_len - 1 + input_len;
3459 SMB2_query_info_free(struct smb_rqst *rqst)
3461 if (rqst && rqst->rq_iov)
3462 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3466 query_info(const unsigned int xid, struct cifs_tcon *tcon,
3467 u64 persistent_fid, u64 volatile_fid, u8 info_class, u8 info_type,
3468 u32 additional_info, size_t output_len, size_t min_len, void **data,
3471 struct smb_rqst rqst;
3472 struct smb2_query_info_rsp *rsp = NULL;
3474 struct kvec rsp_iov;
3476 int resp_buftype = CIFS_NO_BUFFER;
3477 struct cifs_ses *ses = tcon->ses;
3478 struct TCP_Server_Info *server;
3480 bool allocated = false;
3482 cifs_dbg(FYI, "Query Info\n");
3486 server = cifs_pick_channel(ses);
3490 if (smb3_encryption_required(tcon))
3491 flags |= CIFS_TRANSFORM_REQ;
3493 memset(&rqst, 0, sizeof(struct smb_rqst));
3494 memset(&iov, 0, sizeof(iov));
3498 rc = SMB2_query_info_init(tcon, server,
3499 &rqst, persistent_fid, volatile_fid,
3500 info_class, info_type, additional_info,
3501 output_len, 0, NULL);
3505 trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
3506 ses->Suid, info_class, (__u32)info_type);
3508 rc = cifs_send_recv(xid, ses, server,
3509 &rqst, &resp_buftype, flags, &rsp_iov);
3510 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
3513 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
3514 trace_smb3_query_info_err(xid, persistent_fid, tcon->tid,
3515 ses->Suid, info_class, (__u32)info_type, rc);
3519 trace_smb3_query_info_done(xid, persistent_fid, tcon->tid,
3520 ses->Suid, info_class, (__u32)info_type);
3523 *dlen = le32_to_cpu(rsp->OutputBufferLength);
3525 *data = kmalloc(*dlen, GFP_KERNEL);
3528 "Error %d allocating memory for acl\n",
3538 rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
3539 le32_to_cpu(rsp->OutputBufferLength),
3540 &rsp_iov, min_len, *data);
3541 if (rc && allocated) {
3548 SMB2_query_info_free(&rqst);
3549 free_rsp_buf(resp_buftype, rsp);
3553 int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3554 u64 persistent_fid, u64 volatile_fid, struct smb2_file_all_info *data)
3556 return query_info(xid, tcon, persistent_fid, volatile_fid,
3557 FILE_ALL_INFORMATION, SMB2_O_INFO_FILE, 0,
3558 sizeof(struct smb2_file_all_info) + PATH_MAX * 2,
3559 sizeof(struct smb2_file_all_info), (void **)&data,
3564 /* currently unused, as now we are doing compounding instead (see smb311_posix_query_path_info) */
3566 SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
3567 u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
3569 size_t output_len = sizeof(struct smb311_posix_qinfo *) +
3570 (sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2);
3573 return query_info(xid, tcon, persistent_fid, volatile_fid,
3574 SMB_FIND_FILE_POSIX_INFO, SMB2_O_INFO_FILE, 0,
3575 output_len, sizeof(struct smb311_posix_qinfo), (void **)&data, plen);
3576 /* Note caller must free "data" (passed in above). It may be allocated in query_info call */
3581 SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
3582 u64 persistent_fid, u64 volatile_fid,
3583 void **data, u32 *plen, u32 extra_info)
3585 __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
3589 return query_info(xid, tcon, persistent_fid, volatile_fid,
3590 0, SMB2_O_INFO_SECURITY, additional_info,
3591 SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
3595 SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
3596 u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
3598 return query_info(xid, tcon, persistent_fid, volatile_fid,
3599 FILE_INTERNAL_INFORMATION, SMB2_O_INFO_FILE, 0,
3600 sizeof(struct smb2_file_internal_info),
3601 sizeof(struct smb2_file_internal_info),
3602 (void **)&uniqueid, NULL);
3606 * CHANGE_NOTIFY Request is sent to get notifications on changes to a directory
3607 * See MS-SMB2 2.2.35 and 2.2.36
3611 SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
3612 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3613 u64 persistent_fid, u64 volatile_fid,
3614 u32 completion_filter, bool watch_tree)
3616 struct smb2_change_notify_req *req;
3617 struct kvec *iov = rqst->rq_iov;
3618 unsigned int total_len;
3621 rc = smb2_plain_req_init(SMB2_CHANGE_NOTIFY, tcon, server,
3622 (void **) &req, &total_len);
3626 req->PersistentFileId = cpu_to_le64(persistent_fid);
3627 req->VolatileFileId = cpu_to_le64(volatile_fid);
3628 /* See note 354 of MS-SMB2, 64K max */
3629 req->OutputBufferLength =
3630 cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
3631 req->CompletionFilter = cpu_to_le32(completion_filter);
3633 req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
3637 iov[0].iov_base = (char *)req;
3638 iov[0].iov_len = total_len;
3644 SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
3645 u64 persistent_fid, u64 volatile_fid, bool watch_tree,
3646 u32 completion_filter)
3648 struct cifs_ses *ses = tcon->ses;
3649 struct TCP_Server_Info *server = cifs_pick_channel(ses);
3650 struct smb_rqst rqst;
3652 struct kvec rsp_iov = {NULL, 0};
3653 int resp_buftype = CIFS_NO_BUFFER;
3657 cifs_dbg(FYI, "change notify\n");
3658 if (!ses || !server)
3661 if (smb3_encryption_required(tcon))
3662 flags |= CIFS_TRANSFORM_REQ;
3664 memset(&rqst, 0, sizeof(struct smb_rqst));
3665 memset(&iov, 0, sizeof(iov));
3669 rc = SMB2_notify_init(xid, &rqst, tcon, server,
3670 persistent_fid, volatile_fid,
3671 completion_filter, watch_tree);
3675 trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
3676 (u8)watch_tree, completion_filter);
3677 rc = cifs_send_recv(xid, ses, server,
3678 &rqst, &resp_buftype, flags, &rsp_iov);
3681 cifs_stats_fail_inc(tcon, SMB2_CHANGE_NOTIFY_HE);
3682 trace_smb3_notify_err(xid, persistent_fid, tcon->tid, ses->Suid,
3683 (u8)watch_tree, completion_filter, rc);
3685 trace_smb3_notify_done(xid, persistent_fid, tcon->tid,
3686 ses->Suid, (u8)watch_tree, completion_filter);
3690 cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
3691 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3698 * This is a no-op for now. We're not really interested in the reply, but
3699 * rather in the fact that the server sent one and that server->lstrp
3702 * FIXME: maybe we should consider checking that the reply matches request?
3705 smb2_echo_callback(struct mid_q_entry *mid)
3707 struct TCP_Server_Info *server = mid->callback_data;
3708 struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
3709 struct cifs_credits credits = { .value = 0, .instance = 0 };
3711 if (mid->mid_state == MID_RESPONSE_RECEIVED
3712 || mid->mid_state == MID_RESPONSE_MALFORMED) {
3713 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
3714 credits.instance = server->reconnect_instance;
3717 DeleteMidQEntry(mid);
3718 add_credits(server, &credits, CIFS_ECHO_OP);
3721 void smb2_reconnect_server(struct work_struct *work)
3723 struct TCP_Server_Info *server = container_of(work,
3724 struct TCP_Server_Info, reconnect.work);
3725 struct cifs_ses *ses;
3726 struct cifs_tcon *tcon, *tcon2;
3727 struct list_head tmp_list;
3728 int tcon_exist = false;
3730 int resched = false;
3733 /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
3734 mutex_lock(&server->reconnect_mutex);
3736 INIT_LIST_HEAD(&tmp_list);
3737 cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
3739 spin_lock(&cifs_tcp_ses_lock);
3740 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
3741 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
3742 if (tcon->need_reconnect || tcon->need_reopen_files) {
3744 list_add_tail(&tcon->rlist, &tmp_list);
3749 * IPC has the same lifetime as its session and uses its
3752 if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
3753 list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
3759 * Get the reference to server struct to be sure that the last call of
3760 * cifs_put_tcon() in the loop below won't release the server pointer.
3763 server->srv_count++;
3765 spin_unlock(&cifs_tcp_ses_lock);
3767 list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
3768 rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
3770 cifs_reopen_persistent_handles(tcon);
3773 list_del_init(&tcon->rlist);
3775 cifs_put_smb_ses(tcon->ses);
3777 cifs_put_tcon(tcon);
3780 cifs_dbg(FYI, "Reconnecting tcons finished\n");
3782 queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
3783 mutex_unlock(&server->reconnect_mutex);
3785 /* now we can safely release srv struct */
3787 cifs_put_tcp_session(server, 1);
3791 SMB2_echo(struct TCP_Server_Info *server)
3793 struct smb2_echo_req *req;
3796 struct smb_rqst rqst = { .rq_iov = iov,
3798 unsigned int total_len;
3800 cifs_dbg(FYI, "In echo request\n");
3802 if (server->tcpStatus == CifsNeedNegotiate) {
3803 /* No need to send echo on newly established connections */
3804 mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
3808 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
3809 (void **)&req, &total_len);
3813 req->hdr.CreditRequest = cpu_to_le16(1);
3815 iov[0].iov_len = total_len;
3816 iov[0].iov_base = (char *)req;
3818 rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
3819 server, CIFS_ECHO_OP, NULL);
3821 cifs_dbg(FYI, "Echo request failed: %d\n", rc);
3823 cifs_small_buf_release(req);
3828 SMB2_flush_free(struct smb_rqst *rqst)
3830 if (rqst && rqst->rq_iov)
3831 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
3835 SMB2_flush_init(const unsigned int xid, struct smb_rqst *rqst,
3836 struct cifs_tcon *tcon, struct TCP_Server_Info *server,
3837 u64 persistent_fid, u64 volatile_fid)
3839 struct smb2_flush_req *req;
3840 struct kvec *iov = rqst->rq_iov;
3841 unsigned int total_len;
3844 rc = smb2_plain_req_init(SMB2_FLUSH, tcon, server,
3845 (void **) &req, &total_len);
3849 req->PersistentFileId = cpu_to_le64(persistent_fid);
3850 req->VolatileFileId = cpu_to_le64(volatile_fid);
3852 iov[0].iov_base = (char *)req;
3853 iov[0].iov_len = total_len;
3859 SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
3862 struct cifs_ses *ses = tcon->ses;
3863 struct smb_rqst rqst;
3865 struct kvec rsp_iov = {NULL, 0};
3866 struct TCP_Server_Info *server = cifs_pick_channel(ses);
3867 int resp_buftype = CIFS_NO_BUFFER;
3871 cifs_dbg(FYI, "flush\n");
3872 if (!ses || !(ses->server))
3875 if (smb3_encryption_required(tcon))
3876 flags |= CIFS_TRANSFORM_REQ;
3878 memset(&rqst, 0, sizeof(struct smb_rqst));
3879 memset(&iov, 0, sizeof(iov));
3883 rc = SMB2_flush_init(xid, &rqst, tcon, server,
3884 persistent_fid, volatile_fid);
3888 trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
3889 rc = cifs_send_recv(xid, ses, server,
3890 &rqst, &resp_buftype, flags, &rsp_iov);
3893 cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
3894 trace_smb3_flush_err(xid, persistent_fid, tcon->tid, ses->Suid,
3897 trace_smb3_flush_done(xid, persistent_fid, tcon->tid,
3901 SMB2_flush_free(&rqst);
3902 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
3907 * To form a chain of read requests, any read requests after the first should
3908 * have the end_of_chain boolean set to true.
3911 smb2_new_read_req(void **buf, unsigned int *total_len,
3912 struct cifs_io_parms *io_parms, struct cifs_readdata *rdata,
3913 unsigned int remaining_bytes, int request_type)
3916 struct smb2_read_req *req = NULL;
3917 struct smb2_hdr *shdr;
3918 struct TCP_Server_Info *server = io_parms->server;
3920 rc = smb2_plain_req_init(SMB2_READ, io_parms->tcon, server,
3921 (void **) &req, total_len);
3926 return -ECONNABORTED;
3929 shdr->Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
3931 req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
3932 req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
3933 req->ReadChannelInfoOffset = 0; /* reserved */
3934 req->ReadChannelInfoLength = 0; /* reserved */
3935 req->Channel = 0; /* reserved */
3936 req->MinimumCount = 0;
3937 req->Length = cpu_to_le32(io_parms->length);
3938 req->Offset = cpu_to_le64(io_parms->offset);
3940 trace_smb3_read_enter(0 /* xid */,
3941 io_parms->persistent_fid,
3942 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
3943 io_parms->offset, io_parms->length);
3944 #ifdef CONFIG_CIFS_SMB_DIRECT
3946 * If we want to do a RDMA write, fill in and append
3947 * smbd_buffer_descriptor_v1 to the end of read request
3949 if (server->rdma && rdata && !server->sign &&
3950 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
3952 struct smbd_buffer_descriptor_v1 *v1;
3953 bool need_invalidate = server->dialect == SMB30_PROT_ID;
3955 rdata->mr = smbd_register_mr(
3956 server->smbd_conn, rdata->pages,
3957 rdata->nr_pages, rdata->page_offset,
3958 rdata->tailsz, true, need_invalidate);
3962 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
3963 if (need_invalidate)
3964 req->Channel = SMB2_CHANNEL_RDMA_V1;
3965 req->ReadChannelInfoOffset =
3966 cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
3967 req->ReadChannelInfoLength =
3968 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
3969 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
3970 v1->offset = cpu_to_le64(rdata->mr->mr->iova);
3971 v1->token = cpu_to_le32(rdata->mr->mr->rkey);
3972 v1->length = cpu_to_le32(rdata->mr->mr->length);
3974 *total_len += sizeof(*v1) - 1;
3977 if (request_type & CHAINED_REQUEST) {
3978 if (!(request_type & END_OF_CHAIN)) {
3979 /* next 8-byte aligned request */
3980 *total_len = DIV_ROUND_UP(*total_len, 8) * 8;
3981 shdr->NextCommand = cpu_to_le32(*total_len);
3982 } else /* END_OF_CHAIN */
3983 shdr->NextCommand = 0;
3984 if (request_type & RELATED_REQUEST) {
3985 shdr->Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
3987 * Related requests use info from previous read request
3990 shdr->SessionId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
3991 shdr->Id.SyncId.TreeId = cpu_to_le32(0xFFFFFFFF);
3992 req->PersistentFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
3993 req->VolatileFileId = cpu_to_le64(0xFFFFFFFFFFFFFFFF);
3996 if (remaining_bytes > io_parms->length)
3997 req->RemainingBytes = cpu_to_le32(remaining_bytes);
3999 req->RemainingBytes = 0;
4006 smb2_readv_callback(struct mid_q_entry *mid)
4008 struct cifs_readdata *rdata = mid->callback_data;
4009 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
4010 struct TCP_Server_Info *server = rdata->server;
4011 struct smb2_hdr *shdr =
4012 (struct smb2_hdr *)rdata->iov[0].iov_base;
4013 struct cifs_credits credits = { .value = 0, .instance = 0 };
4014 struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
4016 .rq_pages = rdata->pages,
4017 .rq_offset = rdata->page_offset,
4018 .rq_npages = rdata->nr_pages,
4019 .rq_pagesz = rdata->pagesz,
4020 .rq_tailsz = rdata->tailsz };
4022 WARN_ONCE(rdata->server != mid->server,
4023 "rdata server %p != mid server %p",
4024 rdata->server, mid->server);
4026 cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
4027 __func__, mid->mid, mid->mid_state, rdata->result,
4030 switch (mid->mid_state) {
4031 case MID_RESPONSE_RECEIVED:
4032 credits.value = le16_to_cpu(shdr->CreditRequest);
4033 credits.instance = server->reconnect_instance;
4034 /* result already set, check signature */
4035 if (server->sign && !mid->decrypted) {
4038 rc = smb2_verify_signature(&rqst, server);
4040 cifs_tcon_dbg(VFS, "SMB signature verification returned error = %d\n",
4043 /* FIXME: should this be counted toward the initiating task? */
4044 task_io_account_read(rdata->got_bytes);
4045 cifs_stats_bytes_read(tcon, rdata->got_bytes);
4047 case MID_REQUEST_SUBMITTED:
4048 case MID_RETRY_NEEDED:
4049 rdata->result = -EAGAIN;
4050 if (server->sign && rdata->got_bytes)
4051 /* reset bytes number since we can not check a sign */
4052 rdata->got_bytes = 0;
4053 /* FIXME: should this be counted toward the initiating task? */
4054 task_io_account_read(rdata->got_bytes);
4055 cifs_stats_bytes_read(tcon, rdata->got_bytes);
4057 case MID_RESPONSE_MALFORMED:
4058 credits.value = le16_to_cpu(shdr->CreditRequest);
4059 credits.instance = server->reconnect_instance;
4062 rdata->result = -EIO;
4064 #ifdef CONFIG_CIFS_SMB_DIRECT
4066 * If this rdata has a memmory registered, the MR can be freed
4067 * MR needs to be freed as soon as I/O finishes to prevent deadlock
4068 * because they have limited number and are used for future I/Os
4071 smbd_deregister_mr(rdata->mr);
4075 if (rdata->result && rdata->result != -ENODATA) {
4076 cifs_stats_fail_inc(tcon, SMB2_READ_HE);
4077 trace_smb3_read_err(0 /* xid */,
4078 rdata->cfile->fid.persistent_fid,
4079 tcon->tid, tcon->ses->Suid, rdata->offset,
4080 rdata->bytes, rdata->result);
4082 trace_smb3_read_done(0 /* xid */,
4083 rdata->cfile->fid.persistent_fid,
4084 tcon->tid, tcon->ses->Suid,
4085 rdata->offset, rdata->got_bytes);
4087 queue_work(cifsiod_wq, &rdata->work);
4088 DeleteMidQEntry(mid);
4089 add_credits(server, &credits, 0);
4092 /* smb2_async_readv - send an async read, and set up mid to handle result */
4094 smb2_async_readv(struct cifs_readdata *rdata)
4098 struct smb2_hdr *shdr;
4099 struct cifs_io_parms io_parms;
4100 struct smb_rqst rqst = { .rq_iov = rdata->iov,
4102 struct TCP_Server_Info *server;
4103 struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
4104 unsigned int total_len;
4106 cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
4107 __func__, rdata->offset, rdata->bytes);
4110 rdata->server = cifs_pick_channel(tcon->ses);
4112 io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
4113 io_parms.server = server = rdata->server;
4114 io_parms.offset = rdata->offset;
4115 io_parms.length = rdata->bytes;
4116 io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
4117 io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
4118 io_parms.pid = rdata->pid;
4120 rc = smb2_new_read_req(
4121 (void **) &buf, &total_len, &io_parms, rdata, 0, 0);
4125 if (smb3_encryption_required(io_parms.tcon))
4126 flags |= CIFS_TRANSFORM_REQ;
4128 rdata->iov[0].iov_base = buf;
4129 rdata->iov[0].iov_len = total_len;
4131 shdr = (struct smb2_hdr *)buf;
4133 if (rdata->credits.value > 0) {
4134 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
4135 SMB2_MAX_BUFFER_SIZE));
4136 shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
4138 rc = adjust_credits(server, &rdata->credits, rdata->bytes);
4140 goto async_readv_out;
4142 flags |= CIFS_HAS_CREDITS;
4145 kref_get(&rdata->refcount);
4146 rc = cifs_call_async(server, &rqst,
4147 cifs_readv_receive, smb2_readv_callback,
4148 smb3_handle_read_data, rdata, flags,
4151 kref_put(&rdata->refcount, cifs_readdata_release);
4152 cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
4153 trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid,
4155 io_parms.tcon->ses->Suid,
4156 io_parms.offset, io_parms.length, rc);
4160 cifs_small_buf_release(buf);
4165 SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
4166 unsigned int *nbytes, char **buf, int *buf_type)
4168 struct smb_rqst rqst;
4169 int resp_buftype, rc;
4170 struct smb2_read_req *req = NULL;
4171 struct smb2_read_rsp *rsp = NULL;
4173 struct kvec rsp_iov;
4174 unsigned int total_len;
4175 int flags = CIFS_LOG_ERROR;
4176 struct cifs_ses *ses = io_parms->tcon->ses;
4178 if (!io_parms->server)
4179 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4182 rc = smb2_new_read_req((void **)&req, &total_len, io_parms, NULL, 0, 0);
4186 if (smb3_encryption_required(io_parms->tcon))
4187 flags |= CIFS_TRANSFORM_REQ;
4189 iov[0].iov_base = (char *)req;
4190 iov[0].iov_len = total_len;
4192 memset(&rqst, 0, sizeof(struct smb_rqst));
4196 rc = cifs_send_recv(xid, ses, io_parms->server,
4197 &rqst, &resp_buftype, flags, &rsp_iov);
4198 rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
4201 if (rc != -ENODATA) {
4202 cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
4203 cifs_dbg(VFS, "Send error in read = %d\n", rc);
4204 trace_smb3_read_err(xid,
4205 le64_to_cpu(req->PersistentFileId),
4206 io_parms->tcon->tid, ses->Suid,
4207 io_parms->offset, io_parms->length,
4210 trace_smb3_read_done(xid,
4211 le64_to_cpu(req->PersistentFileId),
4212 io_parms->tcon->tid, ses->Suid,
4213 io_parms->offset, 0);
4214 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4215 cifs_small_buf_release(req);
4216 return rc == -ENODATA ? 0 : rc;
4218 trace_smb3_read_done(xid,
4219 le64_to_cpu(req->PersistentFileId),
4220 io_parms->tcon->tid, ses->Suid,
4221 io_parms->offset, io_parms->length);
4223 cifs_small_buf_release(req);
4225 *nbytes = le32_to_cpu(rsp->DataLength);
4226 if ((*nbytes > CIFS_MAX_MSGSIZE) ||
4227 (*nbytes > io_parms->length)) {
4228 cifs_dbg(FYI, "bad length %d for count %d\n",
4229 *nbytes, io_parms->length);
4235 memcpy(*buf, (char *)rsp + rsp->DataOffset, *nbytes);
4236 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
4237 } else if (resp_buftype != CIFS_NO_BUFFER) {
4238 *buf = rsp_iov.iov_base;
4239 if (resp_buftype == CIFS_SMALL_BUFFER)
4240 *buf_type = CIFS_SMALL_BUFFER;
4241 else if (resp_buftype == CIFS_LARGE_BUFFER)
4242 *buf_type = CIFS_LARGE_BUFFER;
4248 * Check the mid_state and signature on received buffer (if any), and queue the
4249 * workqueue completion task.
4252 smb2_writev_callback(struct mid_q_entry *mid)
4254 struct cifs_writedata *wdata = mid->callback_data;
4255 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
4256 struct TCP_Server_Info *server = wdata->server;
4257 unsigned int written;
4258 struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
4259 struct cifs_credits credits = { .value = 0, .instance = 0 };
4261 WARN_ONCE(wdata->server != mid->server,
4262 "wdata server %p != mid server %p",
4263 wdata->server, mid->server);
4265 switch (mid->mid_state) {
4266 case MID_RESPONSE_RECEIVED:
4267 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4268 credits.instance = server->reconnect_instance;
4269 wdata->result = smb2_check_receive(mid, server, 0);
4270 if (wdata->result != 0)
4273 written = le32_to_cpu(rsp->DataLength);
4275 * Mask off high 16 bits when bytes written as returned
4276 * by the server is greater than bytes requested by the
4277 * client. OS/2 servers are known to set incorrect
4280 if (written > wdata->bytes)
4283 if (written < wdata->bytes)
4284 wdata->result = -ENOSPC;
4286 wdata->bytes = written;
4288 case MID_REQUEST_SUBMITTED:
4289 case MID_RETRY_NEEDED:
4290 wdata->result = -EAGAIN;
4292 case MID_RESPONSE_MALFORMED:
4293 credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
4294 credits.instance = server->reconnect_instance;
4297 wdata->result = -EIO;
4300 #ifdef CONFIG_CIFS_SMB_DIRECT
4302 * If this wdata has a memory registered, the MR can be freed
4303 * The number of MRs available is limited, it's important to recover
4304 * used MR as soon as I/O is finished. Hold MR longer in the later
4305 * I/O process can possibly result in I/O deadlock due to lack of MR
4306 * to send request on I/O retry
4309 smbd_deregister_mr(wdata->mr);
4313 if (wdata->result) {
4314 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
4315 trace_smb3_write_err(0 /* no xid */,
4316 wdata->cfile->fid.persistent_fid,
4317 tcon->tid, tcon->ses->Suid, wdata->offset,
4318 wdata->bytes, wdata->result);
4319 if (wdata->result == -ENOSPC)
4320 pr_warn_once("Out of space writing to %s\n",
4323 trace_smb3_write_done(0 /* no xid */,
4324 wdata->cfile->fid.persistent_fid,
4325 tcon->tid, tcon->ses->Suid,
4326 wdata->offset, wdata->bytes);
4328 queue_work(cifsiod_wq, &wdata->work);
4329 DeleteMidQEntry(mid);
4330 add_credits(server, &credits, 0);
4333 /* smb2_async_writev - send an async write, and set up mid to handle result */
4335 smb2_async_writev(struct cifs_writedata *wdata,
4336 void (*release)(struct kref *kref))
4338 int rc = -EACCES, flags = 0;
4339 struct smb2_write_req *req = NULL;
4340 struct smb2_hdr *shdr;
4341 struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
4342 struct TCP_Server_Info *server = wdata->server;
4344 struct smb_rqst rqst = { };
4345 unsigned int total_len;
4348 server = wdata->server = cifs_pick_channel(tcon->ses);
4350 rc = smb2_plain_req_init(SMB2_WRITE, tcon, server,
4351 (void **) &req, &total_len);
4355 if (smb3_encryption_required(tcon))
4356 flags |= CIFS_TRANSFORM_REQ;
4358 shdr = (struct smb2_hdr *)req;
4359 shdr->Id.SyncId.ProcessId = cpu_to_le32(wdata->cfile->pid);
4361 req->PersistentFileId = cpu_to_le64(wdata->cfile->fid.persistent_fid);
4362 req->VolatileFileId = cpu_to_le64(wdata->cfile->fid.volatile_fid);
4363 req->WriteChannelInfoOffset = 0;
4364 req->WriteChannelInfoLength = 0;
4366 req->Offset = cpu_to_le64(wdata->offset);
4367 req->DataOffset = cpu_to_le16(
4368 offsetof(struct smb2_write_req, Buffer));
4369 req->RemainingBytes = 0;
4371 trace_smb3_write_enter(0 /* xid */, wdata->cfile->fid.persistent_fid,
4372 tcon->tid, tcon->ses->Suid, wdata->offset, wdata->bytes);
4373 #ifdef CONFIG_CIFS_SMB_DIRECT
4375 * If we want to do a server RDMA read, fill in and append
4376 * smbd_buffer_descriptor_v1 to the end of write request
4378 if (server->rdma && !server->sign && wdata->bytes >=
4379 server->smbd_conn->rdma_readwrite_threshold) {
4381 struct smbd_buffer_descriptor_v1 *v1;
4382 bool need_invalidate = server->dialect == SMB30_PROT_ID;
4384 wdata->mr = smbd_register_mr(
4385 server->smbd_conn, wdata->pages,
4386 wdata->nr_pages, wdata->page_offset,
4387 wdata->tailsz, false, need_invalidate);
4390 goto async_writev_out;
4393 req->DataOffset = 0;
4394 if (wdata->nr_pages > 1)
4395 req->RemainingBytes =
4397 (wdata->nr_pages - 1) * wdata->pagesz -
4398 wdata->page_offset + wdata->tailsz
4401 req->RemainingBytes = cpu_to_le32(wdata->tailsz);
4402 req->Channel = SMB2_CHANNEL_RDMA_V1_INVALIDATE;
4403 if (need_invalidate)
4404 req->Channel = SMB2_CHANNEL_RDMA_V1;
4405 req->WriteChannelInfoOffset =
4406 cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
4407 req->WriteChannelInfoLength =
4408 cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
4409 v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
4410 v1->offset = cpu_to_le64(wdata->mr->mr->iova);
4411 v1->token = cpu_to_le32(wdata->mr->mr->rkey);
4412 v1->length = cpu_to_le32(wdata->mr->mr->length);
4415 iov[0].iov_len = total_len - 1;
4416 iov[0].iov_base = (char *)req;
4420 rqst.rq_pages = wdata->pages;
4421 rqst.rq_offset = wdata->page_offset;
4422 rqst.rq_npages = wdata->nr_pages;
4423 rqst.rq_pagesz = wdata->pagesz;
4424 rqst.rq_tailsz = wdata->tailsz;
4425 #ifdef CONFIG_CIFS_SMB_DIRECT
4427 iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
4431 cifs_dbg(FYI, "async write at %llu %u bytes\n",
4432 wdata->offset, wdata->bytes);
4434 #ifdef CONFIG_CIFS_SMB_DIRECT
4435 /* For RDMA read, I/O size is in RemainingBytes not in Length */
4437 req->Length = cpu_to_le32(wdata->bytes);
4439 req->Length = cpu_to_le32(wdata->bytes);
4442 if (wdata->credits.value > 0) {
4443 shdr->CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
4444 SMB2_MAX_BUFFER_SIZE));
4445 shdr->CreditRequest = cpu_to_le16(le16_to_cpu(shdr->CreditCharge) + 8);
4447 rc = adjust_credits(server, &wdata->credits, wdata->bytes);
4449 goto async_writev_out;
4451 flags |= CIFS_HAS_CREDITS;
4454 kref_get(&wdata->refcount);
4455 rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, NULL,
4456 wdata, flags, &wdata->credits);
4459 trace_smb3_write_err(0 /* no xid */,
4460 le64_to_cpu(req->PersistentFileId),
4461 tcon->tid, tcon->ses->Suid, wdata->offset,
4463 kref_put(&wdata->refcount, release);
4464 cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
4468 cifs_small_buf_release(req);
4473 * SMB2_write function gets iov pointer to kvec array with n_vec as a length.
4474 * The length field from io_parms must be at least 1 and indicates a number of
4475 * elements with data to write that begins with position 1 in iov array. All
4476 * data length is specified by count.
4479 SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
4480 unsigned int *nbytes, struct kvec *iov, int n_vec)
4482 struct smb_rqst rqst;
4484 struct smb2_write_req *req = NULL;
4485 struct smb2_write_rsp *rsp = NULL;
4487 struct kvec rsp_iov;
4489 unsigned int total_len;
4490 struct TCP_Server_Info *server;
4497 if (!io_parms->server)
4498 io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
4499 server = io_parms->server;
4501 return -ECONNABORTED;
4503 rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
4504 (void **) &req, &total_len);
4508 if (smb3_encryption_required(io_parms->tcon))
4509 flags |= CIFS_TRANSFORM_REQ;
4511 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(io_parms->pid);
4513 req->PersistentFileId = cpu_to_le64(io_parms->persistent_fid);
4514 req->VolatileFileId = cpu_to_le64(io_parms->volatile_fid);
4515 req->WriteChannelInfoOffset = 0;
4516 req->WriteChannelInfoLength = 0;
4518 req->Length = cpu_to_le32(io_parms->length);
4519 req->Offset = cpu_to_le64(io_parms->offset);
4520 req->DataOffset = cpu_to_le16(
4521 offsetof(struct smb2_write_req, Buffer));
4522 req->RemainingBytes = 0;
4524 trace_smb3_write_enter(xid, io_parms->persistent_fid,
4525 io_parms->tcon->tid, io_parms->tcon->ses->Suid,
4526 io_parms->offset, io_parms->length);
4528 iov[0].iov_base = (char *)req;
4530 iov[0].iov_len = total_len - 1;
4532 memset(&rqst, 0, sizeof(struct smb_rqst));
4534 rqst.rq_nvec = n_vec + 1;
4536 rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
4538 &resp_buftype, flags, &rsp_iov);
4539 rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
4542 trace_smb3_write_err(xid,
4543 le64_to_cpu(req->PersistentFileId),
4544 io_parms->tcon->tid,
4545 io_parms->tcon->ses->Suid,
4546 io_parms->offset, io_parms->length, rc);
4547 cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
4548 cifs_dbg(VFS, "Send error in write = %d\n", rc);
4550 *nbytes = le32_to_cpu(rsp->DataLength);
4551 trace_smb3_write_done(xid,
4552 le64_to_cpu(req->PersistentFileId),
4553 io_parms->tcon->tid,
4554 io_parms->tcon->ses->Suid,
4555 io_parms->offset, *nbytes);
4558 cifs_small_buf_release(req);
4559 free_rsp_buf(resp_buftype, rsp);
4563 int posix_info_sid_size(const void *beg, const void *end)
4571 subauth = *(u8 *)(beg+1);
4572 if (subauth < 1 || subauth > 15)
4575 total = 1 + 1 + 6 + 4*subauth;
4576 if (beg + total > end)
4582 int posix_info_parse(const void *beg, const void *end,
4583 struct smb2_posix_info_parsed *out)
4587 int owner_len, group_len;
4589 const void *owner_sid;
4590 const void *group_sid;
4593 /* if no end bound given, assume payload to be correct */
4595 const struct smb2_posix_info *p = beg;
4597 end = beg + le32_to_cpu(p->NextEntryOffset);
4598 /* last element will have a 0 offset, pick a sensible bound */
4603 /* check base buf */
4604 if (beg + sizeof(struct smb2_posix_info) > end)
4606 total_len = sizeof(struct smb2_posix_info);
4608 /* check owner sid */
4609 owner_sid = beg + total_len;
4610 owner_len = posix_info_sid_size(owner_sid, end);
4613 total_len += owner_len;
4615 /* check group sid */
4616 group_sid = beg + total_len;
4617 group_len = posix_info_sid_size(group_sid, end);
4620 total_len += group_len;
4622 /* check name len */
4623 if (beg + total_len + 4 > end)
4625 name_len = le32_to_cpu(*(__le32 *)(beg + total_len));
4626 if (name_len < 1 || name_len > 0xFFFF)
4631 name = beg + total_len;
4632 if (name + name_len > end)
4634 total_len += name_len;
4638 out->size = total_len;
4639 out->name_len = name_len;
4641 memcpy(&out->owner, owner_sid, owner_len);
4642 memcpy(&out->group, group_sid, group_len);
4647 static int posix_info_extra_size(const void *beg, const void *end)
4649 int len = posix_info_parse(beg, end, NULL);
4653 return len - sizeof(struct smb2_posix_info);
4657 num_entries(int infotype, char *bufstart, char *end_of_buf, char **lastentry,
4661 unsigned int entrycount = 0;
4662 unsigned int next_offset = 0;
4664 FILE_DIRECTORY_INFO *dir_info;
4666 if (bufstart == NULL)
4669 entryptr = bufstart;
4672 if (entryptr + next_offset < entryptr ||
4673 entryptr + next_offset > end_of_buf ||
4674 entryptr + next_offset + size > end_of_buf) {
4675 cifs_dbg(VFS, "malformed search entry would overflow\n");
4679 entryptr = entryptr + next_offset;
4680 dir_info = (FILE_DIRECTORY_INFO *)entryptr;
4682 if (infotype == SMB_FIND_FILE_POSIX_INFO)
4683 len = posix_info_extra_size(entryptr, end_of_buf);
4685 len = le32_to_cpu(dir_info->FileNameLength);
4688 entryptr + len < entryptr ||
4689 entryptr + len > end_of_buf ||
4690 entryptr + len + size > end_of_buf) {
4691 cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
4696 *lastentry = entryptr;
4699 next_offset = le32_to_cpu(dir_info->NextEntryOffset);
4710 int SMB2_query_directory_init(const unsigned int xid,
4711 struct cifs_tcon *tcon,
4712 struct TCP_Server_Info *server,
4713 struct smb_rqst *rqst,
4714 u64 persistent_fid, u64 volatile_fid,
4715 int index, int info_level)
4717 struct smb2_query_directory_req *req;
4718 unsigned char *bufptr;
4719 __le16 asteriks = cpu_to_le16('*');
4720 unsigned int output_size = CIFSMaxBufSize -
4721 MAX_SMB2_CREATE_RESPONSE_SIZE -
4722 MAX_SMB2_CLOSE_RESPONSE_SIZE;
4723 unsigned int total_len;
4724 struct kvec *iov = rqst->rq_iov;
4727 rc = smb2_plain_req_init(SMB2_QUERY_DIRECTORY, tcon, server,
4728 (void **) &req, &total_len);
4732 switch (info_level) {
4733 case SMB_FIND_FILE_DIRECTORY_INFO:
4734 req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
4736 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
4737 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
4739 case SMB_FIND_FILE_POSIX_INFO:
4740 req->FileInformationClass = SMB_FIND_FILE_POSIX_INFO;
4743 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
4748 req->FileIndex = cpu_to_le32(index);
4749 req->PersistentFileId = persistent_fid;
4750 req->VolatileFileId = volatile_fid;
4753 bufptr = req->Buffer;
4754 memcpy(bufptr, &asteriks, len);
4756 req->FileNameOffset =
4757 cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1);
4758 req->FileNameLength = cpu_to_le16(len);
4760 * BB could be 30 bytes or so longer if we used SMB2 specific
4761 * buffer lengths, but this is safe and close enough.
4763 output_size = min_t(unsigned int, output_size, server->maxBuf);
4764 output_size = min_t(unsigned int, output_size, 2 << 15);
4765 req->OutputBufferLength = cpu_to_le32(output_size);
4767 iov[0].iov_base = (char *)req;
4769 iov[0].iov_len = total_len - 1;
4771 iov[1].iov_base = (char *)(req->Buffer);
4772 iov[1].iov_len = len;
4774 trace_smb3_query_dir_enter(xid, persistent_fid, tcon->tid,
4775 tcon->ses->Suid, index, output_size);
4780 void SMB2_query_directory_free(struct smb_rqst *rqst)
4782 if (rqst && rqst->rq_iov) {
4783 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4788 smb2_parse_query_directory(struct cifs_tcon *tcon,
4789 struct kvec *rsp_iov,
4791 struct cifs_search_info *srch_inf)
4793 struct smb2_query_directory_rsp *rsp;
4794 size_t info_buf_size;
4798 rsp = (struct smb2_query_directory_rsp *)rsp_iov->iov_base;
4800 switch (srch_inf->info_level) {
4801 case SMB_FIND_FILE_DIRECTORY_INFO:
4802 info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
4804 case SMB_FIND_FILE_ID_FULL_DIR_INFO:
4805 info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
4807 case SMB_FIND_FILE_POSIX_INFO:
4808 /* note that posix payload are variable size */
4809 info_buf_size = sizeof(struct smb2_posix_info);
4812 cifs_tcon_dbg(VFS, "info level %u isn't supported\n",
4813 srch_inf->info_level);
4817 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
4818 le32_to_cpu(rsp->OutputBufferLength), rsp_iov,
4821 cifs_tcon_dbg(VFS, "bad info payload");
4825 srch_inf->unicode = true;
4827 if (srch_inf->ntwrk_buf_start) {
4828 if (srch_inf->smallBuf)
4829 cifs_small_buf_release(srch_inf->ntwrk_buf_start);
4831 cifs_buf_release(srch_inf->ntwrk_buf_start);
4833 srch_inf->ntwrk_buf_start = (char *)rsp;
4834 srch_inf->srch_entries_start = srch_inf->last_entry =
4835 (char *)rsp + le16_to_cpu(rsp->OutputBufferOffset);
4836 end_of_smb = rsp_iov->iov_len + (char *)rsp;
4838 srch_inf->entries_in_buffer = num_entries(
4839 srch_inf->info_level,
4840 srch_inf->srch_entries_start,
4842 &srch_inf->last_entry,
4845 srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
4846 cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
4847 srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
4848 srch_inf->srch_entries_start, srch_inf->last_entry);
4849 if (resp_buftype == CIFS_LARGE_BUFFER)
4850 srch_inf->smallBuf = false;
4851 else if (resp_buftype == CIFS_SMALL_BUFFER)
4852 srch_inf->smallBuf = true;
4854 cifs_tcon_dbg(VFS, "Invalid search buffer type\n");
4860 SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
4861 u64 persistent_fid, u64 volatile_fid, int index,
4862 struct cifs_search_info *srch_inf)
4864 struct smb_rqst rqst;
4865 struct kvec iov[SMB2_QUERY_DIRECTORY_IOV_SIZE];
4866 struct smb2_query_directory_rsp *rsp = NULL;
4867 int resp_buftype = CIFS_NO_BUFFER;
4868 struct kvec rsp_iov;
4870 struct cifs_ses *ses = tcon->ses;
4871 struct TCP_Server_Info *server = cifs_pick_channel(ses);
4874 if (!ses || !(ses->server))
4877 if (smb3_encryption_required(tcon))
4878 flags |= CIFS_TRANSFORM_REQ;
4880 memset(&rqst, 0, sizeof(struct smb_rqst));
4881 memset(&iov, 0, sizeof(iov));
4883 rqst.rq_nvec = SMB2_QUERY_DIRECTORY_IOV_SIZE;
4885 rc = SMB2_query_directory_init(xid, tcon, server,
4886 &rqst, persistent_fid,
4887 volatile_fid, index,
4888 srch_inf->info_level);
4892 rc = cifs_send_recv(xid, ses, server,
4893 &rqst, &resp_buftype, flags, &rsp_iov);
4894 rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
4897 if (rc == -ENODATA &&
4898 rsp->hdr.Status == STATUS_NO_MORE_FILES) {
4899 trace_smb3_query_dir_done(xid, persistent_fid,
4900 tcon->tid, tcon->ses->Suid, index, 0);
4901 srch_inf->endOfSearch = true;
4904 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
4905 tcon->ses->Suid, index, 0, rc);
4906 cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
4911 rc = smb2_parse_query_directory(tcon, &rsp_iov, resp_buftype,
4914 trace_smb3_query_dir_err(xid, persistent_fid, tcon->tid,
4915 tcon->ses->Suid, index, 0, rc);
4918 resp_buftype = CIFS_NO_BUFFER;
4920 trace_smb3_query_dir_done(xid, persistent_fid, tcon->tid,
4921 tcon->ses->Suid, index, srch_inf->entries_in_buffer);
4924 SMB2_query_directory_free(&rqst);
4925 free_rsp_buf(resp_buftype, rsp);
4930 SMB2_set_info_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
4931 struct smb_rqst *rqst,
4932 u64 persistent_fid, u64 volatile_fid, u32 pid,
4933 u8 info_class, u8 info_type, u32 additional_info,
4934 void **data, unsigned int *size)
4936 struct smb2_set_info_req *req;
4937 struct kvec *iov = rqst->rq_iov;
4938 unsigned int i, total_len;
4941 rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, server,
4942 (void **) &req, &total_len);
4946 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
4947 req->InfoType = info_type;
4948 req->FileInfoClass = info_class;
4949 req->PersistentFileId = persistent_fid;
4950 req->VolatileFileId = volatile_fid;
4951 req->AdditionalInformation = cpu_to_le32(additional_info);
4954 cpu_to_le16(sizeof(struct smb2_set_info_req) - 1);
4955 req->BufferLength = cpu_to_le32(*size);
4957 memcpy(req->Buffer, *data, *size);
4960 iov[0].iov_base = (char *)req;
4962 iov[0].iov_len = total_len - 1;
4964 for (i = 1; i < rqst->rq_nvec; i++) {
4965 le32_add_cpu(&req->BufferLength, size[i]);
4966 iov[i].iov_base = (char *)data[i];
4967 iov[i].iov_len = size[i];
4974 SMB2_set_info_free(struct smb_rqst *rqst)
4976 if (rqst && rqst->rq_iov)
4977 cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
4981 send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
4982 u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
4983 u8 info_type, u32 additional_info, unsigned int num,
4984 void **data, unsigned int *size)
4986 struct smb_rqst rqst;
4987 struct smb2_set_info_rsp *rsp = NULL;
4989 struct kvec rsp_iov;
4992 struct cifs_ses *ses = tcon->ses;
4993 struct TCP_Server_Info *server = cifs_pick_channel(ses);
4996 if (!ses || !server)
5002 if (smb3_encryption_required(tcon))
5003 flags |= CIFS_TRANSFORM_REQ;
5005 iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
5009 memset(&rqst, 0, sizeof(struct smb_rqst));
5013 rc = SMB2_set_info_init(tcon, server,
5014 &rqst, persistent_fid, volatile_fid, pid,
5015 info_class, info_type, additional_info,
5023 rc = cifs_send_recv(xid, ses, server,
5024 &rqst, &resp_buftype, flags,
5026 SMB2_set_info_free(&rqst);
5027 rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
5030 cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
5031 trace_smb3_set_info_err(xid, persistent_fid, tcon->tid,
5032 ses->Suid, info_class, (__u32)info_type, rc);
5035 free_rsp_buf(resp_buftype, rsp);
5041 SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
5042 u64 volatile_fid, u32 pid, __le64 *eof)
5044 struct smb2_file_eof_info info;
5048 info.EndOfFile = *eof;
5051 size = sizeof(struct smb2_file_eof_info);
5053 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5054 pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
5055 0, 1, &data, &size);
5059 SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
5060 u64 persistent_fid, u64 volatile_fid,
5061 struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
5063 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5064 current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
5065 1, (void **)&pnntsd, &pacllen);
5069 SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
5070 u64 persistent_fid, u64 volatile_fid,
5071 struct smb2_file_full_ea_info *buf, int len)
5073 return send_set_info(xid, tcon, persistent_fid, volatile_fid,
5074 current->tgid, FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE,
5075 0, 1, (void **)&buf, &len);
5079 SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
5080 const u64 persistent_fid, const u64 volatile_fid,
5083 struct smb_rqst rqst;
5085 struct smb2_oplock_break *req = NULL;
5086 struct cifs_ses *ses = tcon->ses;
5087 struct TCP_Server_Info *server = cifs_pick_channel(ses);
5088 int flags = CIFS_OBREAK_OP;
5089 unsigned int total_len;
5091 struct kvec rsp_iov;
5094 cifs_dbg(FYI, "SMB2_oplock_break\n");
5095 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5096 (void **) &req, &total_len);
5100 if (smb3_encryption_required(tcon))
5101 flags |= CIFS_TRANSFORM_REQ;
5103 req->VolatileFid = volatile_fid;
5104 req->PersistentFid = persistent_fid;
5105 req->OplockLevel = oplock_level;
5106 req->hdr.CreditRequest = cpu_to_le16(1);
5108 flags |= CIFS_NO_RSP_BUF;
5110 iov[0].iov_base = (char *)req;
5111 iov[0].iov_len = total_len;
5113 memset(&rqst, 0, sizeof(struct smb_rqst));
5117 rc = cifs_send_recv(xid, ses, server,
5118 &rqst, &resp_buf_type, flags, &rsp_iov);
5119 cifs_small_buf_release(req);
5122 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
5123 cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
5130 smb2_copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
5131 struct kstatfs *kst)
5133 kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
5134 le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
5135 kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
5136 kst->f_bfree = kst->f_bavail =
5137 le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
5142 copy_posix_fs_info_to_kstatfs(FILE_SYSTEM_POSIX_INFO *response_data,
5143 struct kstatfs *kst)
5145 kst->f_bsize = le32_to_cpu(response_data->BlockSize);
5146 kst->f_blocks = le64_to_cpu(response_data->TotalBlocks);
5147 kst->f_bfree = le64_to_cpu(response_data->BlocksAvail);
5148 if (response_data->UserBlocksAvail == cpu_to_le64(-1))
5149 kst->f_bavail = kst->f_bfree;
5151 kst->f_bavail = le64_to_cpu(response_data->UserBlocksAvail);
5152 if (response_data->TotalFileNodes != cpu_to_le64(-1))
5153 kst->f_files = le64_to_cpu(response_data->TotalFileNodes);
5154 if (response_data->FreeFileNodes != cpu_to_le64(-1))
5155 kst->f_ffree = le64_to_cpu(response_data->FreeFileNodes);
5161 build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon,
5162 struct TCP_Server_Info *server,
5163 int level, int outbuf_len, u64 persistent_fid,
5167 struct smb2_query_info_req *req;
5168 unsigned int total_len;
5170 cifs_dbg(FYI, "Query FSInfo level %d\n", level);
5172 if ((tcon->ses == NULL) || server == NULL)
5175 rc = smb2_plain_req_init(SMB2_QUERY_INFO, tcon, server,
5176 (void **) &req, &total_len);
5180 req->InfoType = SMB2_O_INFO_FILESYSTEM;
5181 req->FileInfoClass = level;
5182 req->PersistentFileId = persistent_fid;
5183 req->VolatileFileId = volatile_fid;
5185 req->InputBufferOffset =
5186 cpu_to_le16(sizeof(struct smb2_query_info_req) - 1);
5187 req->OutputBufferLength = cpu_to_le32(
5188 outbuf_len + sizeof(struct smb2_query_info_rsp) - 1);
5190 iov->iov_base = (char *)req;
5191 iov->iov_len = total_len;
5196 SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
5197 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5199 struct smb_rqst rqst;
5200 struct smb2_query_info_rsp *rsp = NULL;
5202 struct kvec rsp_iov;
5205 struct cifs_ses *ses = tcon->ses;
5206 struct TCP_Server_Info *server = cifs_pick_channel(ses);
5207 FILE_SYSTEM_POSIX_INFO *info = NULL;
5210 rc = build_qfs_info_req(&iov, tcon, server,
5211 FS_POSIX_INFORMATION,
5212 sizeof(FILE_SYSTEM_POSIX_INFO),
5213 persistent_fid, volatile_fid);
5217 if (smb3_encryption_required(tcon))
5218 flags |= CIFS_TRANSFORM_REQ;
5220 memset(&rqst, 0, sizeof(struct smb_rqst));
5224 rc = cifs_send_recv(xid, ses, server,
5225 &rqst, &resp_buftype, flags, &rsp_iov);
5226 cifs_small_buf_release(iov.iov_base);
5228 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5229 goto posix_qfsinf_exit;
5231 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5233 info = (FILE_SYSTEM_POSIX_INFO *)(
5234 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
5235 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5236 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5237 sizeof(FILE_SYSTEM_POSIX_INFO));
5239 copy_posix_fs_info_to_kstatfs(info, fsdata);
5242 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
5247 SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
5248 u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
5250 struct smb_rqst rqst;
5251 struct smb2_query_info_rsp *rsp = NULL;
5253 struct kvec rsp_iov;
5256 struct cifs_ses *ses = tcon->ses;
5257 struct TCP_Server_Info *server = cifs_pick_channel(ses);
5258 struct smb2_fs_full_size_info *info = NULL;
5261 rc = build_qfs_info_req(&iov, tcon, server,
5262 FS_FULL_SIZE_INFORMATION,
5263 sizeof(struct smb2_fs_full_size_info),
5264 persistent_fid, volatile_fid);
5268 if (smb3_encryption_required(tcon))
5269 flags |= CIFS_TRANSFORM_REQ;
5271 memset(&rqst, 0, sizeof(struct smb_rqst));
5275 rc = cifs_send_recv(xid, ses, server,
5276 &rqst, &resp_buftype, flags, &rsp_iov);
5277 cifs_small_buf_release(iov.iov_base);
5279 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5282 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5284 info = (struct smb2_fs_full_size_info *)(
5285 le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
5286 rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
5287 le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
5288 sizeof(struct smb2_fs_full_size_info));
5290 smb2_copy_fs_info_to_kstatfs(info, fsdata);
5293 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
5298 SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
5299 u64 persistent_fid, u64 volatile_fid, int level)
5301 struct smb_rqst rqst;
5302 struct smb2_query_info_rsp *rsp = NULL;
5304 struct kvec rsp_iov;
5306 int resp_buftype, max_len, min_len;
5307 struct cifs_ses *ses = tcon->ses;
5308 struct TCP_Server_Info *server = cifs_pick_channel(ses);
5309 unsigned int rsp_len, offset;
5312 if (level == FS_DEVICE_INFORMATION) {
5313 max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
5314 min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
5315 } else if (level == FS_ATTRIBUTE_INFORMATION) {
5316 max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
5317 min_len = MIN_FS_ATTR_INFO_SIZE;
5318 } else if (level == FS_SECTOR_SIZE_INFORMATION) {
5319 max_len = sizeof(struct smb3_fs_ss_info);
5320 min_len = sizeof(struct smb3_fs_ss_info);
5321 } else if (level == FS_VOLUME_INFORMATION) {
5322 max_len = sizeof(struct smb3_fs_vol_info) + MAX_VOL_LABEL_LEN;
5323 min_len = sizeof(struct smb3_fs_vol_info);
5325 cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
5329 rc = build_qfs_info_req(&iov, tcon, server,
5331 persistent_fid, volatile_fid);
5335 if (smb3_encryption_required(tcon))
5336 flags |= CIFS_TRANSFORM_REQ;
5338 memset(&rqst, 0, sizeof(struct smb_rqst));
5342 rc = cifs_send_recv(xid, ses, server,
5343 &rqst, &resp_buftype, flags, &rsp_iov);
5344 cifs_small_buf_release(iov.iov_base);
5346 cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
5349 rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
5351 rsp_len = le32_to_cpu(rsp->OutputBufferLength);
5352 offset = le16_to_cpu(rsp->OutputBufferOffset);
5353 rc = smb2_validate_iov(offset, rsp_len, &rsp_iov, min_len);
5357 if (level == FS_ATTRIBUTE_INFORMATION)
5358 memcpy(&tcon->fsAttrInfo, offset
5359 + (char *)rsp, min_t(unsigned int,
5361 else if (level == FS_DEVICE_INFORMATION)
5362 memcpy(&tcon->fsDevInfo, offset
5363 + (char *)rsp, sizeof(FILE_SYSTEM_DEVICE_INFO));
5364 else if (level == FS_SECTOR_SIZE_INFORMATION) {
5365 struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
5366 (offset + (char *)rsp);
5367 tcon->ss_flags = le32_to_cpu(ss_info->Flags);
5368 tcon->perf_sector_size =
5369 le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
5370 } else if (level == FS_VOLUME_INFORMATION) {
5371 struct smb3_fs_vol_info *vol_info = (struct smb3_fs_vol_info *)
5372 (offset + (char *)rsp);
5373 tcon->vol_serial_number = vol_info->VolumeSerialNumber;
5374 tcon->vol_create_time = vol_info->VolumeCreationTime;
5378 free_rsp_buf(resp_buftype, rsp_iov.iov_base);
5383 smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
5384 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
5385 const __u32 num_lock, struct smb2_lock_element *buf)
5387 struct smb_rqst rqst;
5389 struct smb2_lock_req *req = NULL;
5391 struct kvec rsp_iov;
5394 int flags = CIFS_NO_RSP_BUF;
5395 unsigned int total_len;
5396 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
5398 cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
5400 rc = smb2_plain_req_init(SMB2_LOCK, tcon, server,
5401 (void **) &req, &total_len);
5405 if (smb3_encryption_required(tcon))
5406 flags |= CIFS_TRANSFORM_REQ;
5408 req->hdr.Id.SyncId.ProcessId = cpu_to_le32(pid);
5409 req->LockCount = cpu_to_le16(num_lock);
5411 req->PersistentFileId = persist_fid;
5412 req->VolatileFileId = volatile_fid;
5414 count = num_lock * sizeof(struct smb2_lock_element);
5416 iov[0].iov_base = (char *)req;
5417 iov[0].iov_len = total_len - sizeof(struct smb2_lock_element);
5418 iov[1].iov_base = (char *)buf;
5419 iov[1].iov_len = count;
5421 cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
5423 memset(&rqst, 0, sizeof(struct smb_rqst));
5427 rc = cifs_send_recv(xid, tcon->ses, server,
5428 &rqst, &resp_buf_type, flags,
5430 cifs_small_buf_release(req);
5432 cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
5433 cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
5434 trace_smb3_lock_err(xid, persist_fid, tcon->tid,
5435 tcon->ses->Suid, rc);
5442 SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
5443 const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
5444 const __u64 length, const __u64 offset, const __u32 lock_flags,
5447 struct smb2_lock_element lock;
5449 lock.Offset = cpu_to_le64(offset);
5450 lock.Length = cpu_to_le64(length);
5451 lock.Flags = cpu_to_le32(lock_flags);
5452 if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
5453 lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
5455 return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
5459 SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
5460 __u8 *lease_key, const __le32 lease_state)
5462 struct smb_rqst rqst;
5464 struct smb2_lease_ack *req = NULL;
5465 struct cifs_ses *ses = tcon->ses;
5466 int flags = CIFS_OBREAK_OP;
5467 unsigned int total_len;
5469 struct kvec rsp_iov;
5471 __u64 *please_key_high;
5472 __u64 *please_key_low;
5473 struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
5475 cifs_dbg(FYI, "SMB2_lease_break\n");
5476 rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
5477 (void **) &req, &total_len);
5481 if (smb3_encryption_required(tcon))
5482 flags |= CIFS_TRANSFORM_REQ;
5484 req->hdr.CreditRequest = cpu_to_le16(1);
5485 req->StructureSize = cpu_to_le16(36);
5488 memcpy(req->LeaseKey, lease_key, 16);
5489 req->LeaseState = lease_state;
5491 flags |= CIFS_NO_RSP_BUF;
5493 iov[0].iov_base = (char *)req;
5494 iov[0].iov_len = total_len;
5496 memset(&rqst, 0, sizeof(struct smb_rqst));
5500 rc = cifs_send_recv(xid, ses, server,
5501 &rqst, &resp_buf_type, flags, &rsp_iov);
5502 cifs_small_buf_release(req);
5504 please_key_low = (__u64 *)lease_key;
5505 please_key_high = (__u64 *)(lease_key+8);
5507 cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
5508 trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
5509 ses->Suid, *please_key_low, *please_key_high, rc);
5510 cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
5512 trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
5513 ses->Suid, *please_key_low, *please_key_high);