4 * Copyright (C) International Business Machines Corp., 2002,2008
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #include <linux/slab.h>
23 #include <linux/ctype.h>
24 #include <linux/mempool.h>
25 #include <linux/vmalloc.h>
28 #include "cifsproto.h"
29 #include "cifs_debug.h"
32 #include "cifs_unicode.h"
35 extern mempool_t *cifs_sm_req_poolp;
36 extern mempool_t *cifs_req_poolp;
38 /* The xid serves as a useful identifier for each incoming vfs request,
39 in a similar way to the mid which is useful to track each sent smb,
40 and CurrentXid can also provide a running counter (although it
41 will eventually wrap past zero) of the total vfs operations handled
42 since the cifs fs was mounted */
49 spin_lock(&GlobalMid_Lock);
50 GlobalTotalActiveXid++;
52 /* keep high water mark for number of simultaneous ops in filesystem */
53 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
54 GlobalMaxActiveXid = GlobalTotalActiveXid;
55 if (GlobalTotalActiveXid > 65000)
56 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
57 xid = GlobalCurrentXid++;
58 spin_unlock(&GlobalMid_Lock);
63 _free_xid(unsigned int xid)
65 spin_lock(&GlobalMid_Lock);
66 /* if (GlobalTotalActiveXid == 0)
68 GlobalTotalActiveXid--;
69 spin_unlock(&GlobalMid_Lock);
75 struct cifs_ses *ret_buf;
77 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
79 atomic_inc(&sesInfoAllocCount);
80 ret_buf->status = CifsNew;
82 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
83 INIT_LIST_HEAD(&ret_buf->tcon_list);
84 mutex_init(&ret_buf->session_mutex);
85 spin_lock_init(&ret_buf->iface_lock);
91 sesInfoFree(struct cifs_ses *buf_to_free)
93 if (buf_to_free == NULL) {
94 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
98 atomic_dec(&sesInfoAllocCount);
99 kfree(buf_to_free->serverOS);
100 kfree(buf_to_free->serverDomain);
101 kfree(buf_to_free->serverNOS);
102 kzfree(buf_to_free->password);
103 kfree(buf_to_free->user_name);
104 kfree(buf_to_free->domainName);
105 kzfree(buf_to_free->auth_key.response);
106 kfree(buf_to_free->iface_list);
113 struct cifs_tcon *ret_buf;
114 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
116 atomic_inc(&tconInfoAllocCount);
117 ret_buf->tidStatus = CifsNew;
119 INIT_LIST_HEAD(&ret_buf->openFileList);
120 INIT_LIST_HEAD(&ret_buf->tcon_list);
121 spin_lock_init(&ret_buf->open_file_lock);
122 mutex_init(&ret_buf->crfid.fid_mutex);
123 ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
125 spin_lock_init(&ret_buf->stat_lock);
131 tconInfoFree(struct cifs_tcon *buf_to_free)
133 if (buf_to_free == NULL) {
134 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
137 atomic_dec(&tconInfoAllocCount);
138 kfree(buf_to_free->nativeFileSystem);
139 kzfree(buf_to_free->password);
140 kfree(buf_to_free->crfid.fid);
147 struct smb_hdr *ret_buf = NULL;
149 * SMB2 header is bigger than CIFS one - no problems to clean some
150 * more bytes for CIFS.
152 size_t buf_size = sizeof(struct smb2_sync_hdr);
155 * We could use negotiated size instead of max_msgsize -
156 * but it may be more efficient to always alloc same size
157 * albeit slightly larger than necessary and maxbuffersize
158 * defaults to this and can not be bigger.
160 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
162 /* clear the first few header bytes */
163 /* for most paths, more is cleared in header_assemble */
164 memset(ret_buf, 0, buf_size + 3);
165 atomic_inc(&bufAllocCount);
166 #ifdef CONFIG_CIFS_STATS2
167 atomic_inc(&totBufAllocCount);
168 #endif /* CONFIG_CIFS_STATS2 */
174 cifs_buf_release(void *buf_to_free)
176 if (buf_to_free == NULL) {
177 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
180 mempool_free(buf_to_free, cifs_req_poolp);
182 atomic_dec(&bufAllocCount);
187 cifs_small_buf_get(void)
189 struct smb_hdr *ret_buf = NULL;
191 /* We could use negotiated size instead of max_msgsize -
192 but it may be more efficient to always alloc same size
193 albeit slightly larger than necessary and maxbuffersize
194 defaults to this and can not be bigger */
195 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
196 /* No need to clear memory here, cleared in header assemble */
197 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
198 atomic_inc(&smBufAllocCount);
199 #ifdef CONFIG_CIFS_STATS2
200 atomic_inc(&totSmBufAllocCount);
201 #endif /* CONFIG_CIFS_STATS2 */
207 cifs_small_buf_release(void *buf_to_free)
210 if (buf_to_free == NULL) {
211 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
214 mempool_free(buf_to_free, cifs_sm_req_poolp);
216 atomic_dec(&smBufAllocCount);
221 free_rsp_buf(int resp_buftype, void *rsp)
223 if (resp_buftype == CIFS_SMALL_BUFFER)
224 cifs_small_buf_release(rsp);
225 else if (resp_buftype == CIFS_LARGE_BUFFER)
226 cifs_buf_release(rsp);
229 /* NB: MID can not be set if treeCon not passed in, in that
230 case it is responsbility of caller to set the mid */
232 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
233 const struct cifs_tcon *treeCon, int word_count
234 /* length of fixed section (word count) in two byte units */)
236 char *temp = (char *) buffer;
238 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
240 buffer->smb_buf_length = cpu_to_be32(
241 (2 * word_count) + sizeof(struct smb_hdr) -
242 4 /* RFC 1001 length field does not count */ +
243 2 /* for bcc field itself */) ;
245 buffer->Protocol[0] = 0xFF;
246 buffer->Protocol[1] = 'S';
247 buffer->Protocol[2] = 'M';
248 buffer->Protocol[3] = 'B';
249 buffer->Command = smb_command;
250 buffer->Flags = 0x00; /* case sensitive */
251 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
252 buffer->Pid = cpu_to_le16((__u16)current->tgid);
253 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
255 buffer->Tid = treeCon->tid;
257 if (treeCon->ses->capabilities & CAP_UNICODE)
258 buffer->Flags2 |= SMBFLG2_UNICODE;
259 if (treeCon->ses->capabilities & CAP_STATUS32)
260 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
262 /* Uid is not converted */
263 buffer->Uid = treeCon->ses->Suid;
264 buffer->Mid = get_next_mid(treeCon->ses->server);
266 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
267 buffer->Flags2 |= SMBFLG2_DFS;
269 buffer->Flags |= SMBFLG_CASELESS;
270 if ((treeCon->ses) && (treeCon->ses->server))
271 if (treeCon->ses->server->sign)
272 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
275 /* endian conversion of flags is now done just before sending */
276 buffer->WordCount = (char) word_count;
281 check_smb_hdr(struct smb_hdr *smb)
283 /* does it have the right SMB "signature" ? */
284 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
285 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
286 *(unsigned int *)smb->Protocol);
290 /* if it's a response then accept */
291 if (smb->Flags & SMBFLG_RESPONSE)
294 /* only one valid case where server sends us request */
295 if (smb->Command == SMB_COM_LOCKING_ANDX)
298 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
304 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
306 struct smb_hdr *smb = (struct smb_hdr *)buf;
307 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
308 __u32 clc_len; /* calculated length */
309 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
312 /* is this frame too small to even get to a BCC? */
313 if (total_read < 2 + sizeof(struct smb_hdr)) {
314 if ((total_read >= sizeof(struct smb_hdr) - 1)
315 && (smb->Status.CifsError != 0)) {
316 /* it's an error return */
318 /* some error cases do not return wct and bcc */
320 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
321 (smb->WordCount == 0)) {
322 char *tmp = (char *)smb;
323 /* Need to work around a bug in two servers here */
324 /* First, check if the part of bcc they sent was zero */
325 if (tmp[sizeof(struct smb_hdr)] == 0) {
326 /* some servers return only half of bcc
327 * on simple responses (wct, bcc both zero)
328 * in particular have seen this on
329 * ulogoffX and FindClose. This leaves
330 * one byte of bcc potentially unitialized
332 /* zero rest of bcc */
333 tmp[sizeof(struct smb_hdr)+1] = 0;
336 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
338 cifs_dbg(VFS, "Length less than smb header size\n");
343 /* otherwise, there is enough to get to the BCC */
344 if (check_smb_hdr(smb))
346 clc_len = smbCalcSize(smb, server);
348 if (4 + rfclen != total_read) {
349 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
354 if (4 + rfclen != clc_len) {
355 __u16 mid = get_mid(smb);
356 /* check if bcc wrapped around for large read responses */
357 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
358 /* check if lengths match mod 64K */
359 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
360 return 0; /* bcc wrapped */
362 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
363 clc_len, 4 + rfclen, mid);
365 if (4 + rfclen < clc_len) {
366 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
369 } else if (rfclen > clc_len + 512) {
371 * Some servers (Windows XP in particular) send more
372 * data than the lengths in the SMB packet would
373 * indicate on certain calls (byte range locks and
374 * trans2 find first calls in particular). While the
375 * client can handle such a frame by ignoring the
376 * trailing data, we choose limit the amount of extra
379 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
388 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
390 struct smb_hdr *buf = (struct smb_hdr *)buffer;
391 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
392 struct list_head *tmp, *tmp1, *tmp2;
393 struct cifs_ses *ses;
394 struct cifs_tcon *tcon;
395 struct cifsInodeInfo *pCifsInode;
396 struct cifsFileInfo *netfile;
398 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
399 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
400 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
401 struct smb_com_transaction_change_notify_rsp *pSMBr =
402 (struct smb_com_transaction_change_notify_rsp *)buf;
403 struct file_notify_information *pnotify;
404 __u32 data_offset = 0;
405 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
406 data_offset = le32_to_cpu(pSMBr->DataOffset);
408 pnotify = (struct file_notify_information *)
409 ((char *)&pSMBr->hdr.Protocol + data_offset);
410 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
411 pnotify->FileName, pnotify->Action);
412 /* cifs_dump_mem("Rcvd notify Data: ",buf,
413 sizeof(struct smb_hdr)+60); */
416 if (pSMBr->hdr.Status.CifsError) {
417 cifs_dbg(FYI, "notify err 0x%x\n",
418 pSMBr->hdr.Status.CifsError);
423 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
425 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
426 /* no sense logging error on invalid handle on oplock
427 break - harmless race between close request and oplock
428 break response is expected from time to time writing out
429 large dirty files cached on the client */
430 if ((NT_STATUS_INVALID_HANDLE) ==
431 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
432 cifs_dbg(FYI, "invalid handle on oplock break\n");
434 } else if (ERRbadfid ==
435 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
438 return false; /* on valid oplock brk we get "request" */
441 if (pSMB->hdr.WordCount != 8)
444 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
445 pSMB->LockType, pSMB->OplockLevel);
446 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
449 /* look up tcon based on tid & uid */
450 spin_lock(&cifs_tcp_ses_lock);
451 list_for_each(tmp, &srv->smb_ses_list) {
452 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
453 list_for_each(tmp1, &ses->tcon_list) {
454 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
455 if (tcon->tid != buf->Tid)
458 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
459 spin_lock(&tcon->open_file_lock);
460 list_for_each(tmp2, &tcon->openFileList) {
461 netfile = list_entry(tmp2, struct cifsFileInfo,
463 if (pSMB->Fid != netfile->fid.netfid)
466 cifs_dbg(FYI, "file id match, oplock break\n");
467 pCifsInode = CIFS_I(d_inode(netfile->dentry));
469 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
473 * Set flag if the server downgrades the oplock
476 if (pSMB->OplockLevel)
478 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
482 CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
485 queue_work(cifsoplockd_wq,
486 &netfile->oplock_break);
487 netfile->oplock_break_cancelled = false;
489 spin_unlock(&tcon->open_file_lock);
490 spin_unlock(&cifs_tcp_ses_lock);
493 spin_unlock(&tcon->open_file_lock);
494 spin_unlock(&cifs_tcp_ses_lock);
495 cifs_dbg(FYI, "No matching file for oplock break\n");
499 spin_unlock(&cifs_tcp_ses_lock);
500 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
505 dump_smb(void *buf, int smb_buf_length)
510 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
511 smb_buf_length, true);
515 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
517 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
518 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
519 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
520 cifs_sb_master_tcon(cifs_sb)->treeName);
524 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
528 if (oplock == OPLOCK_EXCLUSIVE) {
529 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
530 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
532 } else if (oplock == OPLOCK_READ) {
533 cinode->oplock = CIFS_CACHE_READ_FLG;
534 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
541 * We wait for oplock breaks to be processed before we attempt to perform
544 int cifs_get_writer(struct cifsInodeInfo *cinode)
549 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
554 spin_lock(&cinode->writers_lock);
555 if (!cinode->writers)
556 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
558 /* Check to see if we have started servicing an oplock break */
559 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
561 if (cinode->writers == 0) {
562 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
563 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
565 spin_unlock(&cinode->writers_lock);
568 spin_unlock(&cinode->writers_lock);
572 void cifs_put_writer(struct cifsInodeInfo *cinode)
574 spin_lock(&cinode->writers_lock);
576 if (cinode->writers == 0) {
577 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
578 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
580 spin_unlock(&cinode->writers_lock);
583 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
585 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
586 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
590 backup_cred(struct cifs_sb_info *cifs_sb)
592 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
593 if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
596 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
597 if (in_group_p(cifs_sb->mnt_backupgid))
605 cifs_del_pending_open(struct cifs_pending_open *open)
607 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
608 list_del(&open->olist);
609 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
613 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
614 struct cifs_pending_open *open)
616 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
617 open->oplock = CIFS_OPLOCK_NO_CHANGE;
619 fid->pending_open = open;
620 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
624 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
625 struct cifs_pending_open *open)
627 spin_lock(&tlink_tcon(tlink)->open_file_lock);
628 cifs_add_pending_open_locked(fid, tlink, open);
629 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
632 /* parses DFS refferal V3 structure
633 * caller is responsible for freeing target_nodes
636 * - on failure - errno
639 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
640 unsigned int *num_of_nodes,
641 struct dfs_info3_param **target_nodes,
642 const struct nls_table *nls_codepage, int remap,
643 const char *searchName, bool is_unicode)
647 struct dfs_referral_level_3 *ref;
649 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
651 if (*num_of_nodes < 1) {
652 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
655 goto parse_DFS_referrals_exit;
658 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
659 if (ref->VersionNumber != cpu_to_le16(3)) {
660 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
661 le16_to_cpu(ref->VersionNumber));
663 goto parse_DFS_referrals_exit;
666 /* get the upper boundary of the resp buffer */
667 data_end = (char *)rsp + rsp_size;
669 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
670 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
672 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
674 if (*target_nodes == NULL) {
676 goto parse_DFS_referrals_exit;
679 /* collect necessary data from referrals */
680 for (i = 0; i < *num_of_nodes; i++) {
683 struct dfs_info3_param *node = (*target_nodes)+i;
685 node->flags = le32_to_cpu(rsp->DFSFlags);
687 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
691 goto parse_DFS_referrals_exit;
693 cifsConvertToUTF16((__le16 *) tmp, searchName,
694 PATH_MAX, nls_codepage, remap);
695 node->path_consumed = cifs_utf16_bytes(tmp,
696 le16_to_cpu(rsp->PathConsumed),
700 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
702 node->server_type = le16_to_cpu(ref->ServerType);
703 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
706 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
707 max_len = data_end - temp;
708 node->path_name = cifs_strndup_from_utf16(temp, max_len,
709 is_unicode, nls_codepage);
710 if (!node->path_name) {
712 goto parse_DFS_referrals_exit;
715 /* copy link target UNC */
716 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
717 max_len = data_end - temp;
718 node->node_name = cifs_strndup_from_utf16(temp, max_len,
719 is_unicode, nls_codepage);
720 if (!node->node_name) {
722 goto parse_DFS_referrals_exit;
728 parse_DFS_referrals_exit:
730 free_dfs_info_array(*target_nodes, *num_of_nodes);
731 *target_nodes = NULL;
737 struct cifs_aio_ctx *
738 cifs_aio_ctx_alloc(void)
740 struct cifs_aio_ctx *ctx;
742 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
746 INIT_LIST_HEAD(&ctx->list);
747 mutex_init(&ctx->aio_mutex);
748 init_completion(&ctx->done);
749 kref_init(&ctx->refcount);
754 cifs_aio_ctx_release(struct kref *refcount)
756 struct cifs_aio_ctx *ctx = container_of(refcount,
757 struct cifs_aio_ctx, refcount);
759 cifsFileInfo_put(ctx->cfile);
764 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
767 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
770 unsigned int cur_npages;
771 unsigned int npages = 0;
774 size_t count = iov_iter_count(iter);
775 unsigned int saved_len;
777 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
778 struct page **pages = NULL;
779 struct bio_vec *bv = NULL;
781 if (iter->type & ITER_KVEC) {
782 memcpy(&ctx->iter, iter, sizeof(struct iov_iter));
784 iov_iter_advance(iter, count);
788 if (max_pages * sizeof(struct bio_vec) <= CIFS_AIO_KMALLOC_LIMIT)
789 bv = kmalloc_array(max_pages, sizeof(struct bio_vec),
793 bv = vmalloc(array_size(max_pages, sizeof(struct bio_vec)));
798 if (max_pages * sizeof(struct page *) <= CIFS_AIO_KMALLOC_LIMIT)
799 pages = kmalloc_array(max_pages, sizeof(struct page *),
803 pages = vmalloc(array_size(max_pages, sizeof(struct page *)));
812 while (count && npages < max_pages) {
813 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start);
815 cifs_dbg(VFS, "couldn't get user pages (rc=%zd)\n", rc);
820 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
825 iov_iter_advance(iter, rc);
828 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
830 if (npages + cur_npages > max_pages) {
831 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
832 npages + cur_npages, max_pages);
836 for (i = 0; i < cur_npages; i++) {
837 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
838 bv[npages + i].bv_page = pages[i];
839 bv[npages + i].bv_offset = start;
840 bv[npages + i].bv_len = len - start;
845 npages += cur_npages;
850 ctx->len = saved_len - count;
851 ctx->npages = npages;
852 iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
857 * cifs_alloc_hash - allocate hash and hash context together
859 * The caller has to make sure @sdesc is initialized to either NULL or
860 * a valid context. Both can be freed via cifs_free_hash().
863 cifs_alloc_hash(const char *name,
864 struct crypto_shash **shash, struct sdesc **sdesc)
872 *shash = crypto_alloc_shash(name, 0, 0);
873 if (IS_ERR(*shash)) {
874 cifs_dbg(VFS, "could not allocate crypto %s\n", name);
875 rc = PTR_ERR(*shash);
881 size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
882 *sdesc = kmalloc(size, GFP_KERNEL);
883 if (*sdesc == NULL) {
884 cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
885 crypto_free_shash(*shash);
890 (*sdesc)->shash.tfm = *shash;
891 (*sdesc)->shash.flags = 0x0;
896 * cifs_free_hash - free hash and hash context together
898 * Freeing a NULL hash or context is safe.
901 cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
906 crypto_free_shash(*shash);
911 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
912 * Input: rqst - a smb_rqst, page - a page index for rqst
913 * Output: *len - the length for this page, *offset - the offset for this page
915 void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
916 unsigned int *len, unsigned int *offset)
918 *len = rqst->rq_pagesz;
919 *offset = (page == 0) ? rqst->rq_offset : 0;
921 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
922 *len = rqst->rq_tailsz;
924 *len = rqst->rq_pagesz - rqst->rq_offset;