]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * fs/cifs/transport.c | |
3 | * | |
b8643e1b | 4 | * Copyright (C) International Business Machines Corp., 2002,2005 |
1da177e4 LT |
5 | * Author(s): Steve French ([email protected]) |
6 | * | |
7 | * This library is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU Lesser General Public License as published | |
9 | * by the Free Software Foundation; either version 2.1 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | |
15 | * the GNU Lesser General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU Lesser General Public License | |
18 | * along with this library; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
20 | */ | |
21 | ||
22 | #include <linux/fs.h> | |
23 | #include <linux/list.h> | |
24 | #include <linux/wait.h> | |
25 | #include <linux/net.h> | |
26 | #include <linux/delay.h> | |
27 | #include <asm/uaccess.h> | |
28 | #include <asm/processor.h> | |
29 | #include <linux/mempool.h> | |
30 | #include "cifspdu.h" | |
31 | #include "cifsglob.h" | |
32 | #include "cifsproto.h" | |
33 | #include "cifs_debug.h" | |
34 | ||
35 | extern mempool_t *cifs_mid_poolp; | |
36 | extern kmem_cache_t *cifs_oplock_cachep; | |
37 | ||
38 | static struct mid_q_entry * | |
39 | AllocMidQEntry(struct smb_hdr *smb_buffer, struct cifsSesInfo *ses) | |
40 | { | |
41 | struct mid_q_entry *temp; | |
42 | ||
43 | if (ses == NULL) { | |
44 | cERROR(1, ("Null session passed in to AllocMidQEntry ")); | |
45 | return NULL; | |
46 | } | |
47 | if (ses->server == NULL) { | |
48 | cERROR(1, ("Null TCP session in AllocMidQEntry")); | |
49 | return NULL; | |
50 | } | |
51 | ||
52 | temp = (struct mid_q_entry *) mempool_alloc(cifs_mid_poolp,SLAB_KERNEL | SLAB_NOFS); | |
53 | if (temp == NULL) | |
54 | return temp; | |
55 | else { | |
56 | memset(temp, 0, sizeof (struct mid_q_entry)); | |
57 | temp->mid = smb_buffer->Mid; /* always LE */ | |
58 | temp->pid = current->pid; | |
59 | temp->command = smb_buffer->Command; | |
60 | cFYI(1, ("For smb_command %d", temp->command)); | |
61 | do_gettimeofday(&temp->when_sent); | |
62 | temp->ses = ses; | |
63 | temp->tsk = current; | |
64 | } | |
65 | ||
66 | spin_lock(&GlobalMid_Lock); | |
67 | list_add_tail(&temp->qhead, &ses->server->pending_mid_q); | |
68 | atomic_inc(&midCount); | |
69 | temp->midState = MID_REQUEST_ALLOCATED; | |
70 | spin_unlock(&GlobalMid_Lock); | |
71 | return temp; | |
72 | } | |
73 | ||
74 | static void | |
75 | DeleteMidQEntry(struct mid_q_entry *midEntry) | |
76 | { | |
77 | spin_lock(&GlobalMid_Lock); | |
78 | midEntry->midState = MID_FREE; | |
79 | list_del(&midEntry->qhead); | |
80 | atomic_dec(&midCount); | |
81 | spin_unlock(&GlobalMid_Lock); | |
b8643e1b SF |
82 | if(midEntry->largeBuf) |
83 | cifs_buf_release(midEntry->resp_buf); | |
84 | else | |
85 | cifs_small_buf_release(midEntry->resp_buf); | |
1da177e4 LT |
86 | mempool_free(midEntry, cifs_mid_poolp); |
87 | } | |
88 | ||
89 | struct oplock_q_entry * | |
90 | AllocOplockQEntry(struct inode * pinode, __u16 fid, struct cifsTconInfo * tcon) | |
91 | { | |
92 | struct oplock_q_entry *temp; | |
93 | if ((pinode== NULL) || (tcon == NULL)) { | |
94 | cERROR(1, ("Null parms passed to AllocOplockQEntry")); | |
95 | return NULL; | |
96 | } | |
97 | temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep, | |
98 | SLAB_KERNEL); | |
99 | if (temp == NULL) | |
100 | return temp; | |
101 | else { | |
102 | temp->pinode = pinode; | |
103 | temp->tcon = tcon; | |
104 | temp->netfid = fid; | |
105 | spin_lock(&GlobalMid_Lock); | |
106 | list_add_tail(&temp->qhead, &GlobalOplock_Q); | |
107 | spin_unlock(&GlobalMid_Lock); | |
108 | } | |
109 | return temp; | |
110 | ||
111 | } | |
112 | ||
113 | void DeleteOplockQEntry(struct oplock_q_entry * oplockEntry) | |
114 | { | |
115 | spin_lock(&GlobalMid_Lock); | |
116 | /* should we check if list empty first? */ | |
117 | list_del(&oplockEntry->qhead); | |
118 | spin_unlock(&GlobalMid_Lock); | |
119 | kmem_cache_free(cifs_oplock_cachep, oplockEntry); | |
120 | } | |
121 | ||
122 | int | |
123 | smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer, | |
124 | unsigned int smb_buf_length, struct sockaddr *sin) | |
125 | { | |
126 | int rc = 0; | |
127 | int i = 0; | |
128 | struct msghdr smb_msg; | |
129 | struct kvec iov; | |
130 | unsigned len = smb_buf_length + 4; | |
131 | ||
132 | if(ssocket == NULL) | |
133 | return -ENOTSOCK; /* BB eventually add reconnect code here */ | |
134 | iov.iov_base = smb_buffer; | |
135 | iov.iov_len = len; | |
136 | ||
137 | smb_msg.msg_name = sin; | |
138 | smb_msg.msg_namelen = sizeof (struct sockaddr); | |
139 | smb_msg.msg_control = NULL; | |
140 | smb_msg.msg_controllen = 0; | |
141 | smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/ | |
142 | ||
143 | /* smb header is converted in header_assemble. bcc and rest of SMB word | |
144 | area, and byte area if necessary, is converted to littleendian in | |
145 | cifssmb.c and RFC1001 len is converted to bigendian in smb_send | |
146 | Flags2 is converted in SendReceive */ | |
147 | ||
148 | smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length); | |
149 | cFYI(1, ("Sending smb of length %d ", smb_buf_length)); | |
150 | dump_smb(smb_buffer, len); | |
151 | ||
152 | while (len > 0) { | |
153 | rc = kernel_sendmsg(ssocket, &smb_msg, &iov, 1, len); | |
154 | if ((rc == -ENOSPC) || (rc == -EAGAIN)) { | |
155 | i++; | |
156 | if(i > 60) { | |
157 | cERROR(1, | |
158 | ("sends on sock %p stuck for 30 seconds", | |
159 | ssocket)); | |
160 | rc = -EAGAIN; | |
161 | break; | |
162 | } | |
163 | msleep(500); | |
164 | continue; | |
165 | } | |
166 | if (rc < 0) | |
167 | break; | |
168 | iov.iov_base += rc; | |
169 | iov.iov_len -= rc; | |
170 | len -= rc; | |
171 | } | |
172 | ||
173 | if (rc < 0) { | |
174 | cERROR(1,("Error %d sending data on socket to server.", rc)); | |
175 | } else { | |
176 | rc = 0; | |
177 | } | |
178 | ||
179 | return rc; | |
180 | } | |
181 | ||
182 | #ifdef CIFS_EXPERIMENTAL | |
183 | /* BB finish off this function, adding support for writing set of pages as iovec */ | |
184 | /* and also adding support for operations that need to parse the response smb */ | |
185 | ||
186 | int | |
187 | smb_sendv(struct socket *ssocket, struct smb_hdr *smb_buffer, | |
188 | unsigned int smb_buf_length, struct kvec * write_vector /* page list */, struct sockaddr *sin) | |
189 | { | |
190 | int rc = 0; | |
191 | int i = 0; | |
192 | struct msghdr smb_msg; | |
193 | number_of_pages += 1; /* account for SMB header */ | |
194 | struct kvec * piov = kmalloc(number_of_pages * sizeof(struct kvec)); | |
1da177e4 LT |
195 | unsigned len = smb_buf_length + 4; |
196 | ||
197 | if(ssocket == NULL) | |
198 | return -ENOTSOCK; /* BB eventually add reconnect code here */ | |
199 | iov.iov_base = smb_buffer; | |
200 | iov.iov_len = len; | |
201 | ||
202 | smb_msg.msg_name = sin; | |
203 | smb_msg.msg_namelen = sizeof (struct sockaddr); | |
204 | smb_msg.msg_control = NULL; | |
205 | smb_msg.msg_controllen = 0; | |
206 | smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; /* BB add more flags?*/ | |
207 | ||
208 | /* smb header is converted in header_assemble. bcc and rest of SMB word | |
209 | area, and byte area if necessary, is converted to littleendian in | |
210 | cifssmb.c and RFC1001 len is converted to bigendian in smb_send | |
211 | Flags2 is converted in SendReceive */ | |
212 | ||
213 | smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length); | |
214 | cFYI(1, ("Sending smb of length %d ", smb_buf_length)); | |
215 | dump_smb(smb_buffer, len); | |
216 | ||
217 | while (len > 0) { | |
218 | rc = kernel_sendmsg(ssocket, &smb_msg, &iov, number_of_pages, len?); | |
219 | if ((rc == -ENOSPC) || (rc == -EAGAIN)) { | |
220 | i++; | |
221 | if(i > 60) { | |
222 | cERROR(1, | |
223 | ("sends on sock %p stuck for 30 seconds", | |
224 | ssocket)); | |
225 | rc = -EAGAIN; | |
226 | break; | |
227 | } | |
228 | msleep(500); | |
229 | continue; | |
230 | } | |
231 | if (rc < 0) | |
232 | break; | |
233 | iov.iov_base += rc; | |
234 | iov.iov_len -= rc; | |
235 | len -= rc; | |
236 | } | |
237 | ||
238 | if (rc < 0) { | |
239 | cERROR(1,("Error %d sending data on socket to server.", rc)); | |
240 | } else { | |
241 | rc = 0; | |
242 | } | |
243 | ||
244 | return rc; | |
245 | } | |
246 | ||
247 | ||
248 | int | |
249 | CIFSSendRcv(const unsigned int xid, struct cifsSesInfo *ses, | |
250 | struct smb_hdr *in_buf, struct kvec * write_vector /* page list */, int *pbytes_returned, const int long_op) | |
251 | { | |
252 | int rc = 0; | |
253 | unsigned long timeout = 15 * HZ; | |
254 | struct mid_q_entry *midQ = NULL; | |
255 | ||
256 | if (ses == NULL) { | |
257 | cERROR(1,("Null smb session")); | |
258 | return -EIO; | |
259 | } | |
260 | if(ses->server == NULL) { | |
261 | cERROR(1,("Null tcp session")); | |
262 | return -EIO; | |
263 | } | |
264 | if(pbytes_returned == NULL) | |
265 | return -EIO; | |
266 | else | |
267 | *pbytes_returned = 0; | |
268 | ||
269 | ||
270 | ||
271 | /* Ensure that we do not send more than 50 overlapping requests | |
272 | to the same server. We may make this configurable later or | |
273 | use ses->maxReq */ | |
274 | if(long_op == -1) { | |
275 | /* oplock breaks must not be held up */ | |
276 | atomic_inc(&ses->server->inFlight); | |
277 | } else { | |
278 | spin_lock(&GlobalMid_Lock); | |
279 | while(1) { | |
280 | if(atomic_read(&ses->server->inFlight) >= cifs_max_pending){ | |
281 | spin_unlock(&GlobalMid_Lock); | |
282 | wait_event(ses->server->request_q, | |
283 | atomic_read(&ses->server->inFlight) | |
284 | < cifs_max_pending); | |
285 | spin_lock(&GlobalMid_Lock); | |
286 | } else { | |
287 | if(ses->server->tcpStatus == CifsExiting) { | |
288 | spin_unlock(&GlobalMid_Lock); | |
289 | return -ENOENT; | |
290 | } | |
291 | ||
292 | /* can not count locking commands against total since | |
293 | they are allowed to block on server */ | |
294 | ||
295 | if(long_op < 3) { | |
296 | /* update # of requests on the wire to server */ | |
297 | atomic_inc(&ses->server->inFlight); | |
298 | } | |
299 | spin_unlock(&GlobalMid_Lock); | |
300 | break; | |
301 | } | |
302 | } | |
303 | } | |
304 | /* make sure that we sign in the same order that we send on this socket | |
305 | and avoid races inside tcp sendmsg code that could cause corruption | |
306 | of smb data */ | |
307 | ||
308 | down(&ses->server->tcpSem); | |
309 | ||
310 | if (ses->server->tcpStatus == CifsExiting) { | |
311 | rc = -ENOENT; | |
312 | goto cifs_out_label; | |
313 | } else if (ses->server->tcpStatus == CifsNeedReconnect) { | |
314 | cFYI(1,("tcp session dead - return to caller to retry")); | |
315 | rc = -EAGAIN; | |
316 | goto cifs_out_label; | |
317 | } else if (ses->status != CifsGood) { | |
318 | /* check if SMB session is bad because we are setting it up */ | |
319 | if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && | |
320 | (in_buf->Command != SMB_COM_NEGOTIATE)) { | |
321 | rc = -EAGAIN; | |
322 | goto cifs_out_label; | |
323 | } /* else ok - we are setting up session */ | |
324 | } | |
325 | midQ = AllocMidQEntry(in_buf, ses); | |
326 | if (midQ == NULL) { | |
327 | up(&ses->server->tcpSem); | |
328 | /* If not lock req, update # of requests on wire to server */ | |
329 | if(long_op < 3) { | |
330 | atomic_dec(&ses->server->inFlight); | |
331 | wake_up(&ses->server->request_q); | |
332 | } | |
333 | return -ENOMEM; | |
334 | } | |
335 | ||
336 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { | |
337 | up(&ses->server->tcpSem); | |
338 | cERROR(1, | |
339 | ("Illegal length, greater than maximum frame, %d ", | |
340 | in_buf->smb_buf_length)); | |
341 | DeleteMidQEntry(midQ); | |
342 | /* If not lock req, update # of requests on wire to server */ | |
343 | if(long_op < 3) { | |
344 | atomic_dec(&ses->server->inFlight); | |
345 | wake_up(&ses->server->request_q); | |
346 | } | |
347 | return -EIO; | |
348 | } | |
349 | ||
350 | /* BB can we sign efficiently in this path? */ | |
ad009ac9 | 351 | rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); |
1da177e4 LT |
352 | |
353 | midQ->midState = MID_REQUEST_SUBMITTED; | |
354 | /* rc = smb_sendv(ses->server->ssocket, in_buf, in_buf->smb_buf_length, piovec, | |
355 | (struct sockaddr *) &(ses->server->addr.sockAddr));*/ | |
356 | if(rc < 0) { | |
357 | DeleteMidQEntry(midQ); | |
358 | up(&ses->server->tcpSem); | |
359 | /* If not lock req, update # of requests on wire to server */ | |
360 | if(long_op < 3) { | |
361 | atomic_dec(&ses->server->inFlight); | |
362 | wake_up(&ses->server->request_q); | |
363 | } | |
364 | return rc; | |
365 | } else | |
366 | up(&ses->server->tcpSem); | |
367 | cifs_out_label: | |
368 | if(midQ) | |
369 | DeleteMidQEntry(midQ); | |
370 | ||
371 | if(long_op < 3) { | |
372 | atomic_dec(&ses->server->inFlight); | |
373 | wake_up(&ses->server->request_q); | |
374 | } | |
375 | ||
376 | return rc; | |
377 | } | |
378 | ||
379 | ||
380 | #endif /* CIFS_EXPERIMENTAL */ | |
381 | ||
382 | int | |
383 | SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |
384 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, | |
385 | int *pbytes_returned, const int long_op) | |
386 | { | |
387 | int rc = 0; | |
388 | unsigned int receive_len; | |
389 | unsigned long timeout; | |
390 | struct mid_q_entry *midQ; | |
391 | ||
392 | if (ses == NULL) { | |
393 | cERROR(1,("Null smb session")); | |
394 | return -EIO; | |
395 | } | |
396 | if(ses->server == NULL) { | |
397 | cERROR(1,("Null tcp session")); | |
398 | return -EIO; | |
399 | } | |
400 | ||
401 | /* Ensure that we do not send more than 50 overlapping requests | |
402 | to the same server. We may make this configurable later or | |
403 | use ses->maxReq */ | |
404 | if(long_op == -1) { | |
405 | /* oplock breaks must not be held up */ | |
406 | atomic_inc(&ses->server->inFlight); | |
407 | } else { | |
408 | spin_lock(&GlobalMid_Lock); | |
409 | while(1) { | |
410 | if(atomic_read(&ses->server->inFlight) >= cifs_max_pending){ | |
411 | spin_unlock(&GlobalMid_Lock); | |
412 | wait_event(ses->server->request_q, | |
413 | atomic_read(&ses->server->inFlight) | |
414 | < cifs_max_pending); | |
415 | spin_lock(&GlobalMid_Lock); | |
416 | } else { | |
417 | if(ses->server->tcpStatus == CifsExiting) { | |
418 | spin_unlock(&GlobalMid_Lock); | |
419 | return -ENOENT; | |
420 | } | |
421 | ||
422 | /* can not count locking commands against total since | |
423 | they are allowed to block on server */ | |
424 | ||
425 | if(long_op < 3) { | |
426 | /* update # of requests on the wire to server */ | |
427 | atomic_inc(&ses->server->inFlight); | |
428 | } | |
429 | spin_unlock(&GlobalMid_Lock); | |
430 | break; | |
431 | } | |
432 | } | |
433 | } | |
434 | /* make sure that we sign in the same order that we send on this socket | |
435 | and avoid races inside tcp sendmsg code that could cause corruption | |
436 | of smb data */ | |
437 | ||
438 | down(&ses->server->tcpSem); | |
439 | ||
440 | if (ses->server->tcpStatus == CifsExiting) { | |
441 | rc = -ENOENT; | |
442 | goto out_unlock; | |
443 | } else if (ses->server->tcpStatus == CifsNeedReconnect) { | |
444 | cFYI(1,("tcp session dead - return to caller to retry")); | |
445 | rc = -EAGAIN; | |
446 | goto out_unlock; | |
447 | } else if (ses->status != CifsGood) { | |
448 | /* check if SMB session is bad because we are setting it up */ | |
449 | if((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && | |
450 | (in_buf->Command != SMB_COM_NEGOTIATE)) { | |
451 | rc = -EAGAIN; | |
452 | goto out_unlock; | |
453 | } /* else ok - we are setting up session */ | |
454 | } | |
455 | midQ = AllocMidQEntry(in_buf, ses); | |
456 | if (midQ == NULL) { | |
457 | up(&ses->server->tcpSem); | |
458 | /* If not lock req, update # of requests on wire to server */ | |
459 | if(long_op < 3) { | |
460 | atomic_dec(&ses->server->inFlight); | |
461 | wake_up(&ses->server->request_q); | |
462 | } | |
463 | return -ENOMEM; | |
464 | } | |
465 | ||
466 | if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { | |
467 | up(&ses->server->tcpSem); | |
468 | cERROR(1, | |
469 | ("Illegal length, greater than maximum frame, %d ", | |
470 | in_buf->smb_buf_length)); | |
471 | DeleteMidQEntry(midQ); | |
472 | /* If not lock req, update # of requests on wire to server */ | |
473 | if(long_op < 3) { | |
474 | atomic_dec(&ses->server->inFlight); | |
475 | wake_up(&ses->server->request_q); | |
476 | } | |
477 | return -EIO; | |
478 | } | |
479 | ||
ad009ac9 | 480 | rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); |
1da177e4 LT |
481 | |
482 | midQ->midState = MID_REQUEST_SUBMITTED; | |
483 | rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length, | |
484 | (struct sockaddr *) &(ses->server->addr.sockAddr)); | |
485 | if(rc < 0) { | |
486 | DeleteMidQEntry(midQ); | |
487 | up(&ses->server->tcpSem); | |
488 | /* If not lock req, update # of requests on wire to server */ | |
489 | if(long_op < 3) { | |
490 | atomic_dec(&ses->server->inFlight); | |
491 | wake_up(&ses->server->request_q); | |
492 | } | |
493 | return rc; | |
494 | } else | |
495 | up(&ses->server->tcpSem); | |
496 | if (long_op == -1) | |
497 | goto cifs_no_response_exit; | |
498 | else if (long_op == 2) /* writes past end of file can take looooong time */ | |
499 | timeout = 300 * HZ; | |
500 | else if (long_op == 1) | |
501 | timeout = 45 * HZ; /* should be greater than | |
502 | servers oplock break timeout (about 43 seconds) */ | |
503 | else if (long_op > 2) { | |
504 | timeout = MAX_SCHEDULE_TIMEOUT; | |
505 | } else | |
506 | timeout = 15 * HZ; | |
507 | /* wait for 15 seconds or until woken up due to response arriving or | |
508 | due to last connection to this server being unmounted */ | |
509 | if (signal_pending(current)) { | |
510 | /* if signal pending do not hold up user for full smb timeout | |
511 | but we still give response a change to complete */ | |
512 | timeout = 2 * HZ; | |
513 | } | |
514 | ||
515 | /* No user interrupts in wait - wreaks havoc with performance */ | |
516 | if(timeout != MAX_SCHEDULE_TIMEOUT) { | |
517 | timeout += jiffies; | |
518 | wait_event(ses->server->response_q, | |
519 | (!(midQ->midState & MID_REQUEST_SUBMITTED)) || | |
520 | time_after(jiffies, timeout) || | |
521 | ((ses->server->tcpStatus != CifsGood) && | |
522 | (ses->server->tcpStatus != CifsNew))); | |
523 | } else { | |
524 | wait_event(ses->server->response_q, | |
525 | (!(midQ->midState & MID_REQUEST_SUBMITTED)) || | |
526 | ((ses->server->tcpStatus != CifsGood) && | |
527 | (ses->server->tcpStatus != CifsNew))); | |
528 | } | |
529 | ||
530 | spin_lock(&GlobalMid_Lock); | |
531 | if (midQ->resp_buf) { | |
532 | spin_unlock(&GlobalMid_Lock); | |
533 | receive_len = be32_to_cpu(*(__be32 *)midQ->resp_buf); | |
534 | } else { | |
535 | cERROR(1,("No response buffer")); | |
536 | if(midQ->midState == MID_REQUEST_SUBMITTED) { | |
537 | if(ses->server->tcpStatus == CifsExiting) | |
538 | rc = -EHOSTDOWN; | |
539 | else { | |
540 | ses->server->tcpStatus = CifsNeedReconnect; | |
541 | midQ->midState = MID_RETRY_NEEDED; | |
542 | } | |
543 | } | |
544 | ||
545 | if (rc != -EHOSTDOWN) { | |
546 | if(midQ->midState == MID_RETRY_NEEDED) { | |
547 | rc = -EAGAIN; | |
548 | cFYI(1,("marking request for retry")); | |
549 | } else { | |
550 | rc = -EIO; | |
551 | } | |
552 | } | |
553 | spin_unlock(&GlobalMid_Lock); | |
554 | DeleteMidQEntry(midQ); | |
555 | /* If not lock req, update # of requests on wire to server */ | |
556 | if(long_op < 3) { | |
557 | atomic_dec(&ses->server->inFlight); | |
558 | wake_up(&ses->server->request_q); | |
559 | } | |
560 | return rc; | |
561 | } | |
562 | ||
563 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { | |
ad009ac9 | 564 | cERROR(1, ("Frame too large received. Length: %d Xid: %d", |
1da177e4 LT |
565 | receive_len, xid)); |
566 | rc = -EIO; | |
567 | } else { /* rcvd frame is ok */ | |
568 | ||
569 | if (midQ->resp_buf && out_buf | |
570 | && (midQ->midState == MID_RESPONSE_RECEIVED)) { | |
571 | out_buf->smb_buf_length = receive_len; | |
572 | memcpy((char *)out_buf + 4, | |
573 | (char *)midQ->resp_buf + 4, | |
574 | receive_len); | |
575 | ||
576 | dump_smb(out_buf, 92); | |
577 | /* convert the length into a more usable form */ | |
578 | if((receive_len > 24) && | |
ad009ac9 SF |
579 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | |
580 | SECMODE_SIGN_ENABLED))) { | |
581 | rc = cifs_verify_signature(out_buf, | |
582 | ses->server->mac_signing_key, | |
583 | midQ->sequence_number+1); | |
584 | if(rc) { | |
585 | cERROR(1,("Unexpected packet signature received from server")); | |
586 | /* BB FIXME - add code to kill session here */ | |
587 | } | |
1da177e4 LT |
588 | } |
589 | ||
590 | *pbytes_returned = out_buf->smb_buf_length; | |
591 | ||
ad009ac9 | 592 | /* BB special case reconnect tid and uid here? */ |
1da177e4 LT |
593 | rc = map_smb_to_linux_error(out_buf); |
594 | ||
595 | /* convert ByteCount if necessary */ | |
596 | if (receive_len >= | |
597 | sizeof (struct smb_hdr) - | |
598 | 4 /* do not count RFC1001 header */ + | |
599 | (2 * out_buf->WordCount) + 2 /* bcc */ ) | |
600 | BCC(out_buf) = le16_to_cpu(BCC(out_buf)); | |
601 | } else { | |
602 | rc = -EIO; | |
603 | cFYI(1,("Bad MID state? ")); | |
604 | } | |
605 | } | |
606 | cifs_no_response_exit: | |
607 | DeleteMidQEntry(midQ); | |
608 | ||
609 | if(long_op < 3) { | |
610 | atomic_dec(&ses->server->inFlight); | |
611 | wake_up(&ses->server->request_q); | |
612 | } | |
613 | ||
614 | return rc; | |
615 | ||
616 | out_unlock: | |
617 | up(&ses->server->tcpSem); | |
618 | /* If not lock req, update # of requests on wire to server */ | |
619 | if(long_op < 3) { | |
620 | atomic_dec(&ses->server->inFlight); | |
621 | wake_up(&ses->server->request_q); | |
622 | } | |
623 | ||
624 | return rc; | |
625 | } |