Merge tag 'cris-for-linus' of git://jni.nu/cris
[deliverable/linux.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <asm/uaccess.h>
31 #include <asm/processor.h>
32 #include <linux/mempool.h>
33 #include "cifspdu.h"
34 #include "cifsglob.h"
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37
38 extern mempool_t *cifs_mid_poolp;
39
40 static void
41 wake_up_task(struct mid_q_entry *mid)
42 {
43 wake_up_process(mid->callback_data);
44 }
45
46 struct mid_q_entry *
47 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 {
49 struct mid_q_entry *temp;
50
51 if (server == NULL) {
52 cERROR(1, "Null TCP session in AllocMidQEntry");
53 return NULL;
54 }
55
56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
57 if (temp == NULL)
58 return temp;
59 else {
60 memset(temp, 0, sizeof(struct mid_q_entry));
61 temp->mid = smb_buffer->Mid; /* always LE */
62 temp->pid = current->pid;
63 temp->command = cpu_to_le16(smb_buffer->Command);
64 cFYI(1, "For smb_command %d", smb_buffer->Command);
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies;
68
69 /*
70 * The default is for the mid to be synchronous, so the
71 * default callback just wakes up the current task.
72 */
73 temp->callback = wake_up_task;
74 temp->callback_data = current;
75 }
76
77 atomic_inc(&midCount);
78 temp->mid_state = MID_REQUEST_ALLOCATED;
79 return temp;
80 }
81
82 void
83 DeleteMidQEntry(struct mid_q_entry *midEntry)
84 {
85 #ifdef CONFIG_CIFS_STATS2
86 unsigned long now;
87 #endif
88 midEntry->mid_state = MID_FREE;
89 atomic_dec(&midCount);
90 if (midEntry->large_buf)
91 cifs_buf_release(midEntry->resp_buf);
92 else
93 cifs_small_buf_release(midEntry->resp_buf);
94 #ifdef CONFIG_CIFS_STATS2
95 now = jiffies;
96 /* commands taking longer than one second are indications that
97 something is wrong, unless it is quite a slow link or server */
98 if ((now - midEntry->when_alloc) > HZ) {
99 if ((cifsFYI & CIFS_TIMER) &&
100 (midEntry->command != cpu_to_le16(SMB_COM_LOCKING_ANDX))) {
101 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
102 midEntry->command, midEntry->mid);
103 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
104 now - midEntry->when_alloc,
105 now - midEntry->when_sent,
106 now - midEntry->when_received);
107 }
108 }
109 #endif
110 mempool_free(midEntry, cifs_mid_poolp);
111 }
112
113 static void
114 delete_mid(struct mid_q_entry *mid)
115 {
116 spin_lock(&GlobalMid_Lock);
117 list_del(&mid->qhead);
118 spin_unlock(&GlobalMid_Lock);
119
120 DeleteMidQEntry(mid);
121 }
122
123 static int
124 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
125 {
126 int rc = 0;
127 int i = 0;
128 struct msghdr smb_msg;
129 __be32 *buf_len = (__be32 *)(iov[0].iov_base);
130 unsigned int len = iov[0].iov_len;
131 unsigned int total_len;
132 int first_vec = 0;
133 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
134 struct socket *ssocket = server->ssocket;
135
136 if (ssocket == NULL)
137 return -ENOTSOCK; /* BB eventually add reconnect code here */
138
139 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
140 smb_msg.msg_namelen = sizeof(struct sockaddr);
141 smb_msg.msg_control = NULL;
142 smb_msg.msg_controllen = 0;
143 if (server->noblocksnd)
144 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
145 else
146 smb_msg.msg_flags = MSG_NOSIGNAL;
147
148 total_len = 0;
149 for (i = 0; i < n_vec; i++)
150 total_len += iov[i].iov_len;
151
152 cFYI(1, "Sending smb: total_len %d", total_len);
153 dump_smb(iov[0].iov_base, len);
154
155 i = 0;
156 while (total_len) {
157 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
158 n_vec - first_vec, total_len);
159 if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
160 i++;
161 /*
162 * If blocking send we try 3 times, since each can block
163 * for 5 seconds. For nonblocking we have to try more
164 * but wait increasing amounts of time allowing time for
165 * socket to clear. The overall time we wait in either
166 * case to send on the socket is about 15 seconds.
167 * Similarly we wait for 15 seconds for a response from
168 * the server in SendReceive[2] for the server to send
169 * a response back for most types of requests (except
170 * SMB Write past end of file which can be slow, and
171 * blocking lock operations). NFS waits slightly longer
172 * than CIFS, but this can make it take longer for
173 * nonresponsive servers to be detected and 15 seconds
174 * is more than enough time for modern networks to
175 * send a packet. In most cases if we fail to send
176 * after the retries we will kill the socket and
177 * reconnect which may clear the network problem.
178 */
179 if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
180 cERROR(1, "sends on sock %p stuck for 15 seconds",
181 ssocket);
182 rc = -EAGAIN;
183 break;
184 }
185 msleep(1 << i);
186 continue;
187 }
188 if (rc < 0)
189 break;
190
191 if (rc == total_len) {
192 total_len = 0;
193 break;
194 } else if (rc > total_len) {
195 cERROR(1, "sent %d requested %d", rc, total_len);
196 break;
197 }
198 if (rc == 0) {
199 /* should never happen, letting socket clear before
200 retrying is our only obvious option here */
201 cERROR(1, "tcp sent no data");
202 msleep(500);
203 continue;
204 }
205 total_len -= rc;
206 /* the line below resets i */
207 for (i = first_vec; i < n_vec; i++) {
208 if (iov[i].iov_len) {
209 if (rc > iov[i].iov_len) {
210 rc -= iov[i].iov_len;
211 iov[i].iov_len = 0;
212 } else {
213 iov[i].iov_base += rc;
214 iov[i].iov_len -= rc;
215 first_vec = i;
216 break;
217 }
218 }
219 }
220 i = 0; /* in case we get ENOSPC on the next send */
221 }
222
223 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
224 cFYI(1, "partial send (%d remaining), terminating session",
225 total_len);
226 /* If we have only sent part of an SMB then the next SMB
227 could be taken as the remainder of this one. We need
228 to kill the socket so the server throws away the partial
229 SMB */
230 server->tcpStatus = CifsNeedReconnect;
231 }
232
233 if (rc < 0 && rc != -EINTR)
234 cERROR(1, "Error %d sending data on socket to server", rc);
235 else
236 rc = 0;
237
238 /* Don't want to modify the buffer as a side effect of this call. */
239 *buf_len = cpu_to_be32(smb_buf_length);
240
241 return rc;
242 }
243
244 int
245 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
246 unsigned int smb_buf_length)
247 {
248 struct kvec iov;
249
250 iov.iov_base = smb_buffer;
251 iov.iov_len = smb_buf_length + 4;
252
253 return smb_sendv(server, &iov, 1);
254 }
255
256 static int
257 wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
258 int *credits)
259 {
260 int rc;
261
262 spin_lock(&server->req_lock);
263 if (optype == CIFS_ASYNC_OP) {
264 /* oplock breaks must not be held up */
265 server->in_flight++;
266 *credits -= 1;
267 spin_unlock(&server->req_lock);
268 return 0;
269 }
270
271 while (1) {
272 if (*credits <= 0) {
273 spin_unlock(&server->req_lock);
274 cifs_num_waiters_inc(server);
275 rc = wait_event_killable(server->request_q,
276 has_credits(server, credits));
277 cifs_num_waiters_dec(server);
278 if (rc)
279 return rc;
280 spin_lock(&server->req_lock);
281 } else {
282 if (server->tcpStatus == CifsExiting) {
283 spin_unlock(&server->req_lock);
284 return -ENOENT;
285 }
286
287 /*
288 * Can not count locking commands against total
289 * as they are allowed to block on server.
290 */
291
292 /* update # of requests on the wire to server */
293 if (optype != CIFS_BLOCKING_OP) {
294 *credits -= 1;
295 server->in_flight++;
296 }
297 spin_unlock(&server->req_lock);
298 break;
299 }
300 }
301 return 0;
302 }
303
304 static int
305 wait_for_free_request(struct TCP_Server_Info *server, const int optype)
306 {
307 return wait_for_free_credits(server, optype, get_credits_field(server));
308 }
309
310 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
311 struct mid_q_entry **ppmidQ)
312 {
313 if (ses->server->tcpStatus == CifsExiting) {
314 return -ENOENT;
315 }
316
317 if (ses->server->tcpStatus == CifsNeedReconnect) {
318 cFYI(1, "tcp session dead - return to caller to retry");
319 return -EAGAIN;
320 }
321
322 if (ses->status != CifsGood) {
323 /* check if SMB session is bad because we are setting it up */
324 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
325 (in_buf->Command != SMB_COM_NEGOTIATE))
326 return -EAGAIN;
327 /* else ok - we are setting up session */
328 }
329 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
330 if (*ppmidQ == NULL)
331 return -ENOMEM;
332 spin_lock(&GlobalMid_Lock);
333 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
334 spin_unlock(&GlobalMid_Lock);
335 return 0;
336 }
337
338 static int
339 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
340 {
341 int error;
342
343 error = wait_event_freezekillable(server->response_q,
344 midQ->mid_state != MID_REQUEST_SUBMITTED);
345 if (error < 0)
346 return -ERESTARTSYS;
347
348 return 0;
349 }
350
351 static int
352 cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov,
353 unsigned int nvec, struct mid_q_entry **ret_mid)
354 {
355 int rc;
356 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
357 struct mid_q_entry *mid;
358
359 /* enable signing if server requires it */
360 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
361 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
362
363 mid = AllocMidQEntry(hdr, server);
364 if (mid == NULL)
365 return -ENOMEM;
366
367 /* put it on the pending_mid_q */
368 spin_lock(&GlobalMid_Lock);
369 list_add_tail(&mid->qhead, &server->pending_mid_q);
370 spin_unlock(&GlobalMid_Lock);
371
372 rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
373 if (rc)
374 delete_mid(mid);
375 *ret_mid = mid;
376 return rc;
377 }
378
379 /*
380 * Send a SMB request and set the callback function in the mid to handle
381 * the result. Caller is responsible for dealing with timeouts.
382 */
383 int
384 cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
385 unsigned int nvec, mid_receive_t *receive,
386 mid_callback_t *callback, void *cbdata, bool ignore_pend)
387 {
388 int rc;
389 struct mid_q_entry *mid;
390
391 rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
392 if (rc)
393 return rc;
394
395 mutex_lock(&server->srv_mutex);
396 rc = cifs_setup_async_request(server, iov, nvec, &mid);
397 if (rc) {
398 mutex_unlock(&server->srv_mutex);
399 cifs_add_credits(server, 1);
400 wake_up(&server->request_q);
401 return rc;
402 }
403
404 mid->receive = receive;
405 mid->callback = callback;
406 mid->callback_data = cbdata;
407 mid->mid_state = MID_REQUEST_SUBMITTED;
408
409 cifs_in_send_inc(server);
410 rc = smb_sendv(server, iov, nvec);
411 cifs_in_send_dec(server);
412 cifs_save_when_sent(mid);
413 mutex_unlock(&server->srv_mutex);
414
415 if (rc)
416 goto out_err;
417
418 return rc;
419 out_err:
420 delete_mid(mid);
421 cifs_add_credits(server, 1);
422 wake_up(&server->request_q);
423 return rc;
424 }
425
426 /*
427 *
428 * Send an SMB Request. No response info (other than return code)
429 * needs to be parsed.
430 *
431 * flags indicate the type of request buffer and how long to wait
432 * and whether to log NT STATUS code (error) before mapping it to POSIX error
433 *
434 */
435 int
436 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
437 char *in_buf, int flags)
438 {
439 int rc;
440 struct kvec iov[1];
441 int resp_buf_type;
442
443 iov[0].iov_base = in_buf;
444 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
445 flags |= CIFS_NO_RESP;
446 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
447 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
448
449 return rc;
450 }
451
452 static int
453 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
454 {
455 int rc = 0;
456
457 cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
458 le16_to_cpu(mid->command), mid->mid, mid->mid_state);
459
460 spin_lock(&GlobalMid_Lock);
461 switch (mid->mid_state) {
462 case MID_RESPONSE_RECEIVED:
463 spin_unlock(&GlobalMid_Lock);
464 return rc;
465 case MID_RETRY_NEEDED:
466 rc = -EAGAIN;
467 break;
468 case MID_RESPONSE_MALFORMED:
469 rc = -EIO;
470 break;
471 case MID_SHUTDOWN:
472 rc = -EHOSTDOWN;
473 break;
474 default:
475 list_del_init(&mid->qhead);
476 cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
477 mid->mid, mid->mid_state);
478 rc = -EIO;
479 }
480 spin_unlock(&GlobalMid_Lock);
481
482 DeleteMidQEntry(mid);
483 return rc;
484 }
485
486 /*
487 * An NT cancel request header looks just like the original request except:
488 *
489 * The Command is SMB_COM_NT_CANCEL
490 * The WordCount is zeroed out
491 * The ByteCount is zeroed out
492 *
493 * This function mangles an existing request buffer into a
494 * SMB_COM_NT_CANCEL request and then sends it.
495 */
496 static int
497 send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
498 struct mid_q_entry *mid)
499 {
500 int rc = 0;
501
502 /* -4 for RFC1001 length and +2 for BCC field */
503 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
504 in_buf->Command = SMB_COM_NT_CANCEL;
505 in_buf->WordCount = 0;
506 put_bcc(0, in_buf);
507
508 mutex_lock(&server->srv_mutex);
509 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
510 if (rc) {
511 mutex_unlock(&server->srv_mutex);
512 return rc;
513 }
514 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
515 mutex_unlock(&server->srv_mutex);
516
517 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
518 in_buf->Mid, rc);
519
520 return rc;
521 }
522
523 int
524 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
525 bool log_error)
526 {
527 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
528
529 dump_smb(mid->resp_buf, min_t(u32, 92, len));
530
531 /* convert the length into a more usable form */
532 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
533 struct kvec iov;
534
535 iov.iov_base = mid->resp_buf;
536 iov.iov_len = len;
537 /* FIXME: add code to kill session */
538 if (cifs_verify_signature(&iov, 1, server,
539 mid->sequence_number + 1) != 0)
540 cERROR(1, "Unexpected SMB signature");
541 }
542
543 /* BB special case reconnect tid and uid here? */
544 return map_smb_to_linux_error(mid->resp_buf, log_error);
545 }
546
547 static int
548 cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
549 unsigned int nvec, struct mid_q_entry **ret_mid)
550 {
551 int rc;
552 struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
553 struct mid_q_entry *mid;
554
555 rc = allocate_mid(ses, hdr, &mid);
556 if (rc)
557 return rc;
558 rc = cifs_sign_smb2(iov, nvec, ses->server, &mid->sequence_number);
559 if (rc)
560 delete_mid(mid);
561 *ret_mid = mid;
562 return rc;
563 }
564
565 int
566 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
567 struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
568 const int flags)
569 {
570 int rc = 0;
571 int long_op;
572 struct mid_q_entry *midQ;
573 char *buf = iov[0].iov_base;
574
575 long_op = flags & CIFS_TIMEOUT_MASK;
576
577 *pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
578
579 if ((ses == NULL) || (ses->server == NULL)) {
580 cifs_small_buf_release(buf);
581 cERROR(1, "Null session");
582 return -EIO;
583 }
584
585 if (ses->server->tcpStatus == CifsExiting) {
586 cifs_small_buf_release(buf);
587 return -ENOENT;
588 }
589
590 /*
591 * Ensure that we do not send more than 50 overlapping requests
592 * to the same server. We may make this configurable later or
593 * use ses->maxReq.
594 */
595
596 rc = wait_for_free_request(ses->server, long_op);
597 if (rc) {
598 cifs_small_buf_release(buf);
599 return rc;
600 }
601
602 /*
603 * Make sure that we sign in the same order that we send on this socket
604 * and avoid races inside tcp sendmsg code that could cause corruption
605 * of smb data.
606 */
607
608 mutex_lock(&ses->server->srv_mutex);
609
610 rc = cifs_setup_request(ses, iov, n_vec, &midQ);
611 if (rc) {
612 mutex_unlock(&ses->server->srv_mutex);
613 cifs_small_buf_release(buf);
614 /* Update # of requests on wire to server */
615 cifs_add_credits(ses->server, 1);
616 return rc;
617 }
618
619 midQ->mid_state = MID_REQUEST_SUBMITTED;
620 cifs_in_send_inc(ses->server);
621 rc = smb_sendv(ses->server, iov, n_vec);
622 cifs_in_send_dec(ses->server);
623 cifs_save_when_sent(midQ);
624
625 mutex_unlock(&ses->server->srv_mutex);
626
627 if (rc < 0) {
628 cifs_small_buf_release(buf);
629 goto out;
630 }
631
632 if (long_op == CIFS_ASYNC_OP) {
633 cifs_small_buf_release(buf);
634 goto out;
635 }
636
637 rc = wait_for_response(ses->server, midQ);
638 if (rc != 0) {
639 send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ);
640 spin_lock(&GlobalMid_Lock);
641 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
642 midQ->callback = DeleteMidQEntry;
643 spin_unlock(&GlobalMid_Lock);
644 cifs_small_buf_release(buf);
645 cifs_add_credits(ses->server, 1);
646 return rc;
647 }
648 spin_unlock(&GlobalMid_Lock);
649 }
650
651 cifs_small_buf_release(buf);
652
653 rc = cifs_sync_mid_result(midQ, ses->server);
654 if (rc != 0) {
655 cifs_add_credits(ses->server, 1);
656 return rc;
657 }
658
659 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
660 rc = -EIO;
661 cFYI(1, "Bad MID state?");
662 goto out;
663 }
664
665 buf = (char *)midQ->resp_buf;
666 iov[0].iov_base = buf;
667 iov[0].iov_len = get_rfc1002_length(buf) + 4;
668 if (midQ->large_buf)
669 *pRespBufType = CIFS_LARGE_BUFFER;
670 else
671 *pRespBufType = CIFS_SMALL_BUFFER;
672
673 rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
674
675 /* mark it so buf will not be freed by delete_mid */
676 if ((flags & CIFS_NO_RESP) == 0)
677 midQ->resp_buf = NULL;
678 out:
679 delete_mid(midQ);
680 cifs_add_credits(ses->server, 1);
681
682 return rc;
683 }
684
685 int
686 SendReceive(const unsigned int xid, struct cifs_ses *ses,
687 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
688 int *pbytes_returned, const int long_op)
689 {
690 int rc = 0;
691 struct mid_q_entry *midQ;
692
693 if (ses == NULL) {
694 cERROR(1, "Null smb session");
695 return -EIO;
696 }
697 if (ses->server == NULL) {
698 cERROR(1, "Null tcp session");
699 return -EIO;
700 }
701
702 if (ses->server->tcpStatus == CifsExiting)
703 return -ENOENT;
704
705 /* Ensure that we do not send more than 50 overlapping requests
706 to the same server. We may make this configurable later or
707 use ses->maxReq */
708
709 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
710 MAX_CIFS_HDR_SIZE - 4) {
711 cERROR(1, "Illegal length, greater than maximum frame, %d",
712 be32_to_cpu(in_buf->smb_buf_length));
713 return -EIO;
714 }
715
716 rc = wait_for_free_request(ses->server, long_op);
717 if (rc)
718 return rc;
719
720 /* make sure that we sign in the same order that we send on this socket
721 and avoid races inside tcp sendmsg code that could cause corruption
722 of smb data */
723
724 mutex_lock(&ses->server->srv_mutex);
725
726 rc = allocate_mid(ses, in_buf, &midQ);
727 if (rc) {
728 mutex_unlock(&ses->server->srv_mutex);
729 /* Update # of requests on wire to server */
730 cifs_add_credits(ses->server, 1);
731 return rc;
732 }
733
734 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
735 if (rc) {
736 mutex_unlock(&ses->server->srv_mutex);
737 goto out;
738 }
739
740 midQ->mid_state = MID_REQUEST_SUBMITTED;
741
742 cifs_in_send_inc(ses->server);
743 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
744 cifs_in_send_dec(ses->server);
745 cifs_save_when_sent(midQ);
746 mutex_unlock(&ses->server->srv_mutex);
747
748 if (rc < 0)
749 goto out;
750
751 if (long_op == CIFS_ASYNC_OP)
752 goto out;
753
754 rc = wait_for_response(ses->server, midQ);
755 if (rc != 0) {
756 send_nt_cancel(ses->server, in_buf, midQ);
757 spin_lock(&GlobalMid_Lock);
758 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
759 /* no longer considered to be "in-flight" */
760 midQ->callback = DeleteMidQEntry;
761 spin_unlock(&GlobalMid_Lock);
762 cifs_add_credits(ses->server, 1);
763 return rc;
764 }
765 spin_unlock(&GlobalMid_Lock);
766 }
767
768 rc = cifs_sync_mid_result(midQ, ses->server);
769 if (rc != 0) {
770 cifs_add_credits(ses->server, 1);
771 return rc;
772 }
773
774 if (!midQ->resp_buf || !out_buf ||
775 midQ->mid_state != MID_RESPONSE_RECEIVED) {
776 rc = -EIO;
777 cERROR(1, "Bad MID state?");
778 goto out;
779 }
780
781 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
782 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
783 rc = cifs_check_receive(midQ, ses->server, 0);
784 out:
785 delete_mid(midQ);
786 cifs_add_credits(ses->server, 1);
787
788 return rc;
789 }
790
791 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
792 blocking lock to return. */
793
794 static int
795 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
796 struct smb_hdr *in_buf,
797 struct smb_hdr *out_buf)
798 {
799 int bytes_returned;
800 struct cifs_ses *ses = tcon->ses;
801 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
802
803 /* We just modify the current in_buf to change
804 the type of lock from LOCKING_ANDX_SHARED_LOCK
805 or LOCKING_ANDX_EXCLUSIVE_LOCK to
806 LOCKING_ANDX_CANCEL_LOCK. */
807
808 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
809 pSMB->Timeout = 0;
810 pSMB->hdr.Mid = GetNextMid(ses->server);
811
812 return SendReceive(xid, ses, in_buf, out_buf,
813 &bytes_returned, 0);
814 }
815
816 int
817 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
818 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
819 int *pbytes_returned)
820 {
821 int rc = 0;
822 int rstart = 0;
823 struct mid_q_entry *midQ;
824 struct cifs_ses *ses;
825
826 if (tcon == NULL || tcon->ses == NULL) {
827 cERROR(1, "Null smb session");
828 return -EIO;
829 }
830 ses = tcon->ses;
831
832 if (ses->server == NULL) {
833 cERROR(1, "Null tcp session");
834 return -EIO;
835 }
836
837 if (ses->server->tcpStatus == CifsExiting)
838 return -ENOENT;
839
840 /* Ensure that we do not send more than 50 overlapping requests
841 to the same server. We may make this configurable later or
842 use ses->maxReq */
843
844 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
845 MAX_CIFS_HDR_SIZE - 4) {
846 cERROR(1, "Illegal length, greater than maximum frame, %d",
847 be32_to_cpu(in_buf->smb_buf_length));
848 return -EIO;
849 }
850
851 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP);
852 if (rc)
853 return rc;
854
855 /* make sure that we sign in the same order that we send on this socket
856 and avoid races inside tcp sendmsg code that could cause corruption
857 of smb data */
858
859 mutex_lock(&ses->server->srv_mutex);
860
861 rc = allocate_mid(ses, in_buf, &midQ);
862 if (rc) {
863 mutex_unlock(&ses->server->srv_mutex);
864 return rc;
865 }
866
867 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
868 if (rc) {
869 delete_mid(midQ);
870 mutex_unlock(&ses->server->srv_mutex);
871 return rc;
872 }
873
874 midQ->mid_state = MID_REQUEST_SUBMITTED;
875 cifs_in_send_inc(ses->server);
876 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
877 cifs_in_send_dec(ses->server);
878 cifs_save_when_sent(midQ);
879 mutex_unlock(&ses->server->srv_mutex);
880
881 if (rc < 0) {
882 delete_mid(midQ);
883 return rc;
884 }
885
886 /* Wait for a reply - allow signals to interrupt. */
887 rc = wait_event_interruptible(ses->server->response_q,
888 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
889 ((ses->server->tcpStatus != CifsGood) &&
890 (ses->server->tcpStatus != CifsNew)));
891
892 /* Were we interrupted by a signal ? */
893 if ((rc == -ERESTARTSYS) &&
894 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
895 ((ses->server->tcpStatus == CifsGood) ||
896 (ses->server->tcpStatus == CifsNew))) {
897
898 if (in_buf->Command == SMB_COM_TRANSACTION2) {
899 /* POSIX lock. We send a NT_CANCEL SMB to cause the
900 blocking lock to return. */
901 rc = send_nt_cancel(ses->server, in_buf, midQ);
902 if (rc) {
903 delete_mid(midQ);
904 return rc;
905 }
906 } else {
907 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
908 to cause the blocking lock to return. */
909
910 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
911
912 /* If we get -ENOLCK back the lock may have
913 already been removed. Don't exit in this case. */
914 if (rc && rc != -ENOLCK) {
915 delete_mid(midQ);
916 return rc;
917 }
918 }
919
920 rc = wait_for_response(ses->server, midQ);
921 if (rc) {
922 send_nt_cancel(ses->server, in_buf, midQ);
923 spin_lock(&GlobalMid_Lock);
924 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
925 /* no longer considered to be "in-flight" */
926 midQ->callback = DeleteMidQEntry;
927 spin_unlock(&GlobalMid_Lock);
928 return rc;
929 }
930 spin_unlock(&GlobalMid_Lock);
931 }
932
933 /* We got the response - restart system call. */
934 rstart = 1;
935 }
936
937 rc = cifs_sync_mid_result(midQ, ses->server);
938 if (rc != 0)
939 return rc;
940
941 /* rcvd frame is ok */
942 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
943 rc = -EIO;
944 cERROR(1, "Bad MID state?");
945 goto out;
946 }
947
948 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
949 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
950 rc = cifs_check_receive(midQ, ses->server, 0);
951 out:
952 delete_mid(midQ);
953 if (rstart && rc == -EACCES)
954 return -ERESTARTSYS;
955 return rc;
956 }
This page took 0.051031 seconds and 5 git commands to generate.