Merge branch 'rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / fs / cifs / transport.c
1 /*
2 * fs/cifs/transport.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * Jeremy Allison (jra@samba.org) 2006.
7 *
8 * This library is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published
10 * by the Free Software Foundation; either version 2.1 of the License, or
11 * (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
16 * the GNU Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public License
19 * along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23 #include <linux/fs.h>
24 #include <linux/list.h>
25 #include <linux/gfp.h>
26 #include <linux/wait.h>
27 #include <linux/net.h>
28 #include <linux/delay.h>
29 #include <linux/freezer.h>
30 #include <linux/tcp.h>
31 #include <linux/highmem.h>
32 #include <asm/uaccess.h>
33 #include <asm/processor.h>
34 #include <linux/mempool.h>
35 #include "cifspdu.h"
36 #include "cifsglob.h"
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39
40 void
41 cifs_wake_up_task(struct mid_q_entry *mid)
42 {
43 wake_up_process(mid->callback_data);
44 }
45
46 struct mid_q_entry *
47 AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
48 {
49 struct mid_q_entry *temp;
50
51 if (server == NULL) {
52 cERROR(1, "Null TCP session in AllocMidQEntry");
53 return NULL;
54 }
55
56 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
57 if (temp == NULL)
58 return temp;
59 else {
60 memset(temp, 0, sizeof(struct mid_q_entry));
61 temp->mid = smb_buffer->Mid; /* always LE */
62 temp->pid = current->pid;
63 temp->command = cpu_to_le16(smb_buffer->Command);
64 cFYI(1, "For smb_command %d", smb_buffer->Command);
65 /* do_gettimeofday(&temp->when_sent);*/ /* easier to use jiffies */
66 /* when mid allocated can be before when sent */
67 temp->when_alloc = jiffies;
68 temp->server = server;
69
70 /*
71 * The default is for the mid to be synchronous, so the
72 * default callback just wakes up the current task.
73 */
74 temp->callback = cifs_wake_up_task;
75 temp->callback_data = current;
76 }
77
78 atomic_inc(&midCount);
79 temp->mid_state = MID_REQUEST_ALLOCATED;
80 return temp;
81 }
82
83 void
84 DeleteMidQEntry(struct mid_q_entry *midEntry)
85 {
86 #ifdef CONFIG_CIFS_STATS2
87 __le16 command = midEntry->server->vals->lock_cmd;
88 unsigned long now;
89 #endif
90 midEntry->mid_state = MID_FREE;
91 atomic_dec(&midCount);
92 if (midEntry->large_buf)
93 cifs_buf_release(midEntry->resp_buf);
94 else
95 cifs_small_buf_release(midEntry->resp_buf);
96 #ifdef CONFIG_CIFS_STATS2
97 now = jiffies;
98 /* commands taking longer than one second are indications that
99 something is wrong, unless it is quite a slow link or server */
100 if ((now - midEntry->when_alloc) > HZ) {
101 if ((cifsFYI & CIFS_TIMER) && (midEntry->command != command)) {
102 printk(KERN_DEBUG " CIFS slow rsp: cmd %d mid %llu",
103 midEntry->command, midEntry->mid);
104 printk(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
105 now - midEntry->when_alloc,
106 now - midEntry->when_sent,
107 now - midEntry->when_received);
108 }
109 }
110 #endif
111 mempool_free(midEntry, cifs_mid_poolp);
112 }
113
114 void
115 cifs_delete_mid(struct mid_q_entry *mid)
116 {
117 spin_lock(&GlobalMid_Lock);
118 list_del(&mid->qhead);
119 spin_unlock(&GlobalMid_Lock);
120
121 DeleteMidQEntry(mid);
122 }
123
124 /*
125 * smb_send_kvec - send an array of kvecs to the server
126 * @server: Server to send the data to
127 * @iov: Pointer to array of kvecs
128 * @n_vec: length of kvec array
129 * @sent: amount of data sent on socket is stored here
130 *
131 * Our basic "send data to server" function. Should be called with srv_mutex
132 * held. The caller is responsible for handling the results.
133 */
134 static int
135 smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec,
136 size_t *sent)
137 {
138 int rc = 0;
139 int i = 0;
140 struct msghdr smb_msg;
141 unsigned int remaining;
142 size_t first_vec = 0;
143 struct socket *ssocket = server->ssocket;
144
145 *sent = 0;
146
147 if (ssocket == NULL)
148 return -ENOTSOCK; /* BB eventually add reconnect code here */
149
150 smb_msg.msg_name = (struct sockaddr *) &server->dstaddr;
151 smb_msg.msg_namelen = sizeof(struct sockaddr);
152 smb_msg.msg_control = NULL;
153 smb_msg.msg_controllen = 0;
154 if (server->noblocksnd)
155 smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
156 else
157 smb_msg.msg_flags = MSG_NOSIGNAL;
158
159 remaining = 0;
160 for (i = 0; i < n_vec; i++)
161 remaining += iov[i].iov_len;
162
163 i = 0;
164 while (remaining) {
165 /*
166 * If blocking send, we try 3 times, since each can block
167 * for 5 seconds. For nonblocking we have to try more
168 * but wait increasing amounts of time allowing time for
169 * socket to clear. The overall time we wait in either
170 * case to send on the socket is about 15 seconds.
171 * Similarly we wait for 15 seconds for a response from
172 * the server in SendReceive[2] for the server to send
173 * a response back for most types of requests (except
174 * SMB Write past end of file which can be slow, and
175 * blocking lock operations). NFS waits slightly longer
176 * than CIFS, but this can make it take longer for
177 * nonresponsive servers to be detected and 15 seconds
178 * is more than enough time for modern networks to
179 * send a packet. In most cases if we fail to send
180 * after the retries we will kill the socket and
181 * reconnect which may clear the network problem.
182 */
183 rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec],
184 n_vec - first_vec, remaining);
185 if (rc == -ENOSPC || rc == -EAGAIN) {
186 i++;
187 if (i >= 14 || (!server->noblocksnd && (i > 2))) {
188 cERROR(1, "sends on sock %p stuck for 15 "
189 "seconds", ssocket);
190 rc = -EAGAIN;
191 break;
192 }
193 msleep(1 << i);
194 continue;
195 }
196
197 if (rc < 0)
198 break;
199
200 /* send was at least partially successful */
201 *sent += rc;
202
203 if (rc == remaining) {
204 remaining = 0;
205 break;
206 }
207
208 if (rc > remaining) {
209 cERROR(1, "sent %d requested %d", rc, remaining);
210 break;
211 }
212
213 if (rc == 0) {
214 /* should never happen, letting socket clear before
215 retrying is our only obvious option here */
216 cERROR(1, "tcp sent no data");
217 msleep(500);
218 continue;
219 }
220
221 remaining -= rc;
222
223 /* the line below resets i */
224 for (i = first_vec; i < n_vec; i++) {
225 if (iov[i].iov_len) {
226 if (rc > iov[i].iov_len) {
227 rc -= iov[i].iov_len;
228 iov[i].iov_len = 0;
229 } else {
230 iov[i].iov_base += rc;
231 iov[i].iov_len -= rc;
232 first_vec = i;
233 break;
234 }
235 }
236 }
237
238 i = 0; /* in case we get ENOSPC on the next send */
239 rc = 0;
240 }
241 return rc;
242 }
243
244 /**
245 * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec
246 * @rqst: pointer to smb_rqst
247 * @idx: index into the array of the page
248 * @iov: pointer to struct kvec that will hold the result
249 *
250 * Helper function to convert a slot in the rqst->rq_pages array into a kvec.
251 * The page will be kmapped and the address placed into iov_base. The length
252 * will then be adjusted according to the ptailoff.
253 */
254 void
255 cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
256 struct kvec *iov)
257 {
258 /*
259 * FIXME: We could avoid this kmap altogether if we used
260 * kernel_sendpage instead of kernel_sendmsg. That will only
261 * work if signing is disabled though as sendpage inlines the
262 * page directly into the fraglist. If userspace modifies the
263 * page after we calculate the signature, then the server will
264 * reject it and may break the connection. kernel_sendmsg does
265 * an extra copy of the data and avoids that issue.
266 */
267 iov->iov_base = kmap(rqst->rq_pages[idx]);
268
269 /* if last page, don't send beyond this offset into page */
270 if (idx == (rqst->rq_npages - 1))
271 iov->iov_len = rqst->rq_tailsz;
272 else
273 iov->iov_len = rqst->rq_pagesz;
274 }
275
276 static int
277 smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
278 {
279 int rc;
280 struct kvec *iov = rqst->rq_iov;
281 int n_vec = rqst->rq_nvec;
282 unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
283 unsigned int i;
284 size_t total_len = 0, sent;
285 struct socket *ssocket = server->ssocket;
286 int val = 1;
287
288 cFYI(1, "Sending smb: smb_len=%u", smb_buf_length);
289 dump_smb(iov[0].iov_base, iov[0].iov_len);
290
291 /* cork the socket */
292 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
293 (char *)&val, sizeof(val));
294
295 rc = smb_send_kvec(server, iov, n_vec, &sent);
296 if (rc < 0)
297 goto uncork;
298
299 total_len += sent;
300
301 /* now walk the page array and send each page in it */
302 for (i = 0; i < rqst->rq_npages; i++) {
303 struct kvec p_iov;
304
305 cifs_rqst_page_to_kvec(rqst, i, &p_iov);
306 rc = smb_send_kvec(server, &p_iov, 1, &sent);
307 kunmap(rqst->rq_pages[i]);
308 if (rc < 0)
309 break;
310
311 total_len += sent;
312 }
313
314 uncork:
315 /* uncork it */
316 val = 0;
317 kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
318 (char *)&val, sizeof(val));
319
320 if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
321 cFYI(1, "partial send (wanted=%u sent=%zu): terminating "
322 "session", smb_buf_length + 4, total_len);
323 /*
324 * If we have only sent part of an SMB then the next SMB could
325 * be taken as the remainder of this one. We need to kill the
326 * socket so the server throws away the partial SMB
327 */
328 server->tcpStatus = CifsNeedReconnect;
329 }
330
331 if (rc < 0 && rc != -EINTR)
332 cERROR(1, "Error %d sending data on socket to server", rc);
333 else
334 rc = 0;
335
336 return rc;
337 }
338
339 static int
340 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
341 {
342 struct smb_rqst rqst = { .rq_iov = iov,
343 .rq_nvec = n_vec };
344
345 return smb_send_rqst(server, &rqst);
346 }
347
348 int
349 smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
350 unsigned int smb_buf_length)
351 {
352 struct kvec iov;
353
354 iov.iov_base = smb_buffer;
355 iov.iov_len = smb_buf_length + 4;
356
357 return smb_sendv(server, &iov, 1);
358 }
359
360 static int
361 wait_for_free_credits(struct TCP_Server_Info *server, const int timeout,
362 int *credits)
363 {
364 int rc;
365
366 spin_lock(&server->req_lock);
367 if (timeout == CIFS_ASYNC_OP) {
368 /* oplock breaks must not be held up */
369 server->in_flight++;
370 *credits -= 1;
371 spin_unlock(&server->req_lock);
372 return 0;
373 }
374
375 while (1) {
376 if (*credits <= 0) {
377 spin_unlock(&server->req_lock);
378 cifs_num_waiters_inc(server);
379 rc = wait_event_killable(server->request_q,
380 has_credits(server, credits));
381 cifs_num_waiters_dec(server);
382 if (rc)
383 return rc;
384 spin_lock(&server->req_lock);
385 } else {
386 if (server->tcpStatus == CifsExiting) {
387 spin_unlock(&server->req_lock);
388 return -ENOENT;
389 }
390
391 /*
392 * Can not count locking commands against total
393 * as they are allowed to block on server.
394 */
395
396 /* update # of requests on the wire to server */
397 if (timeout != CIFS_BLOCKING_OP) {
398 *credits -= 1;
399 server->in_flight++;
400 }
401 spin_unlock(&server->req_lock);
402 break;
403 }
404 }
405 return 0;
406 }
407
408 static int
409 wait_for_free_request(struct TCP_Server_Info *server, const int timeout,
410 const int optype)
411 {
412 return wait_for_free_credits(server, timeout,
413 server->ops->get_credits_field(server, optype));
414 }
415
416 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
417 struct mid_q_entry **ppmidQ)
418 {
419 if (ses->server->tcpStatus == CifsExiting) {
420 return -ENOENT;
421 }
422
423 if (ses->server->tcpStatus == CifsNeedReconnect) {
424 cFYI(1, "tcp session dead - return to caller to retry");
425 return -EAGAIN;
426 }
427
428 if (ses->status != CifsGood) {
429 /* check if SMB session is bad because we are setting it up */
430 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
431 (in_buf->Command != SMB_COM_NEGOTIATE))
432 return -EAGAIN;
433 /* else ok - we are setting up session */
434 }
435 *ppmidQ = AllocMidQEntry(in_buf, ses->server);
436 if (*ppmidQ == NULL)
437 return -ENOMEM;
438 spin_lock(&GlobalMid_Lock);
439 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
440 spin_unlock(&GlobalMid_Lock);
441 return 0;
442 }
443
444 static int
445 wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
446 {
447 int error;
448
449 error = wait_event_freezekillable(server->response_q,
450 midQ->mid_state != MID_REQUEST_SUBMITTED);
451 if (error < 0)
452 return -ERESTARTSYS;
453
454 return 0;
455 }
456
457 struct mid_q_entry *
458 cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
459 {
460 int rc;
461 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
462 struct mid_q_entry *mid;
463
464 /* enable signing if server requires it */
465 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
466 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
467
468 mid = AllocMidQEntry(hdr, server);
469 if (mid == NULL)
470 return ERR_PTR(-ENOMEM);
471
472 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
473 if (rc) {
474 DeleteMidQEntry(mid);
475 return ERR_PTR(rc);
476 }
477
478 return mid;
479 }
480
481 /*
482 * Send a SMB request and set the callback function in the mid to handle
483 * the result. Caller is responsible for dealing with timeouts.
484 */
485 int
486 cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
487 mid_receive_t *receive, mid_callback_t *callback,
488 void *cbdata, const int flags)
489 {
490 int rc, timeout, optype;
491 struct mid_q_entry *mid;
492
493 timeout = flags & CIFS_TIMEOUT_MASK;
494 optype = flags & CIFS_OP_MASK;
495
496 rc = wait_for_free_request(server, timeout, optype);
497 if (rc)
498 return rc;
499
500 mutex_lock(&server->srv_mutex);
501 mid = server->ops->setup_async_request(server, rqst);
502 if (IS_ERR(mid)) {
503 mutex_unlock(&server->srv_mutex);
504 add_credits(server, 1, optype);
505 wake_up(&server->request_q);
506 return PTR_ERR(mid);
507 }
508
509 mid->receive = receive;
510 mid->callback = callback;
511 mid->callback_data = cbdata;
512 mid->mid_state = MID_REQUEST_SUBMITTED;
513
514 /* put it on the pending_mid_q */
515 spin_lock(&GlobalMid_Lock);
516 list_add_tail(&mid->qhead, &server->pending_mid_q);
517 spin_unlock(&GlobalMid_Lock);
518
519
520 cifs_in_send_inc(server);
521 rc = smb_send_rqst(server, rqst);
522 cifs_in_send_dec(server);
523 cifs_save_when_sent(mid);
524 mutex_unlock(&server->srv_mutex);
525
526 if (rc == 0)
527 return 0;
528
529 cifs_delete_mid(mid);
530 add_credits(server, 1, optype);
531 wake_up(&server->request_q);
532 return rc;
533 }
534
535 /*
536 *
537 * Send an SMB Request. No response info (other than return code)
538 * needs to be parsed.
539 *
540 * flags indicate the type of request buffer and how long to wait
541 * and whether to log NT STATUS code (error) before mapping it to POSIX error
542 *
543 */
544 int
545 SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
546 char *in_buf, int flags)
547 {
548 int rc;
549 struct kvec iov[1];
550 int resp_buf_type;
551
552 iov[0].iov_base = in_buf;
553 iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
554 flags |= CIFS_NO_RESP;
555 rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
556 cFYI(DBG2, "SendRcvNoRsp flags %d rc %d", flags, rc);
557
558 return rc;
559 }
560
561 static int
562 cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
563 {
564 int rc = 0;
565
566 cFYI(1, "%s: cmd=%d mid=%llu state=%d", __func__,
567 le16_to_cpu(mid->command), mid->mid, mid->mid_state);
568
569 spin_lock(&GlobalMid_Lock);
570 switch (mid->mid_state) {
571 case MID_RESPONSE_RECEIVED:
572 spin_unlock(&GlobalMid_Lock);
573 return rc;
574 case MID_RETRY_NEEDED:
575 rc = -EAGAIN;
576 break;
577 case MID_RESPONSE_MALFORMED:
578 rc = -EIO;
579 break;
580 case MID_SHUTDOWN:
581 rc = -EHOSTDOWN;
582 break;
583 default:
584 list_del_init(&mid->qhead);
585 cERROR(1, "%s: invalid mid state mid=%llu state=%d", __func__,
586 mid->mid, mid->mid_state);
587 rc = -EIO;
588 }
589 spin_unlock(&GlobalMid_Lock);
590
591 DeleteMidQEntry(mid);
592 return rc;
593 }
594
595 static inline int
596 send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
597 {
598 return server->ops->send_cancel ?
599 server->ops->send_cancel(server, buf, mid) : 0;
600 }
601
602 int
603 cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
604 bool log_error)
605 {
606 unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
607
608 dump_smb(mid->resp_buf, min_t(u32, 92, len));
609
610 /* convert the length into a more usable form */
611 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
612 struct kvec iov;
613 int rc = 0;
614 struct smb_rqst rqst = { .rq_iov = &iov,
615 .rq_nvec = 1 };
616
617 iov.iov_base = mid->resp_buf;
618 iov.iov_len = len;
619 /* FIXME: add code to kill session */
620 rc = cifs_verify_signature(&rqst, server,
621 mid->sequence_number + 1);
622 if (rc)
623 cERROR(1, "SMB signature verification returned error = "
624 "%d", rc);
625 }
626
627 /* BB special case reconnect tid and uid here? */
628 return map_smb_to_linux_error(mid->resp_buf, log_error);
629 }
630
631 struct mid_q_entry *
632 cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
633 {
634 int rc;
635 struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
636 struct mid_q_entry *mid;
637
638 rc = allocate_mid(ses, hdr, &mid);
639 if (rc)
640 return ERR_PTR(rc);
641 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
642 if (rc) {
643 cifs_delete_mid(mid);
644 return ERR_PTR(rc);
645 }
646 return mid;
647 }
648
649 int
650 SendReceive2(const unsigned int xid, struct cifs_ses *ses,
651 struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
652 const int flags)
653 {
654 int rc = 0;
655 int timeout, optype;
656 struct mid_q_entry *midQ;
657 char *buf = iov[0].iov_base;
658 unsigned int credits = 1;
659 struct smb_rqst rqst = { .rq_iov = iov,
660 .rq_nvec = n_vec };
661
662 timeout = flags & CIFS_TIMEOUT_MASK;
663 optype = flags & CIFS_OP_MASK;
664
665 *resp_buf_type = CIFS_NO_BUFFER; /* no response buf yet */
666
667 if ((ses == NULL) || (ses->server == NULL)) {
668 cifs_small_buf_release(buf);
669 cERROR(1, "Null session");
670 return -EIO;
671 }
672
673 if (ses->server->tcpStatus == CifsExiting) {
674 cifs_small_buf_release(buf);
675 return -ENOENT;
676 }
677
678 /*
679 * Ensure that we do not send more than 50 overlapping requests
680 * to the same server. We may make this configurable later or
681 * use ses->maxReq.
682 */
683
684 rc = wait_for_free_request(ses->server, timeout, optype);
685 if (rc) {
686 cifs_small_buf_release(buf);
687 return rc;
688 }
689
690 /*
691 * Make sure that we sign in the same order that we send on this socket
692 * and avoid races inside tcp sendmsg code that could cause corruption
693 * of smb data.
694 */
695
696 mutex_lock(&ses->server->srv_mutex);
697
698 midQ = ses->server->ops->setup_request(ses, &rqst);
699 if (IS_ERR(midQ)) {
700 mutex_unlock(&ses->server->srv_mutex);
701 cifs_small_buf_release(buf);
702 /* Update # of requests on wire to server */
703 add_credits(ses->server, 1, optype);
704 return PTR_ERR(midQ);
705 }
706
707 midQ->mid_state = MID_REQUEST_SUBMITTED;
708 cifs_in_send_inc(ses->server);
709 rc = smb_sendv(ses->server, iov, n_vec);
710 cifs_in_send_dec(ses->server);
711 cifs_save_when_sent(midQ);
712
713 mutex_unlock(&ses->server->srv_mutex);
714
715 if (rc < 0) {
716 cifs_small_buf_release(buf);
717 goto out;
718 }
719
720 if (timeout == CIFS_ASYNC_OP) {
721 cifs_small_buf_release(buf);
722 goto out;
723 }
724
725 rc = wait_for_response(ses->server, midQ);
726 if (rc != 0) {
727 send_cancel(ses->server, buf, midQ);
728 spin_lock(&GlobalMid_Lock);
729 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
730 midQ->callback = DeleteMidQEntry;
731 spin_unlock(&GlobalMid_Lock);
732 cifs_small_buf_release(buf);
733 add_credits(ses->server, 1, optype);
734 return rc;
735 }
736 spin_unlock(&GlobalMid_Lock);
737 }
738
739 cifs_small_buf_release(buf);
740
741 rc = cifs_sync_mid_result(midQ, ses->server);
742 if (rc != 0) {
743 add_credits(ses->server, 1, optype);
744 return rc;
745 }
746
747 if (!midQ->resp_buf || midQ->mid_state != MID_RESPONSE_RECEIVED) {
748 rc = -EIO;
749 cFYI(1, "Bad MID state?");
750 goto out;
751 }
752
753 buf = (char *)midQ->resp_buf;
754 iov[0].iov_base = buf;
755 iov[0].iov_len = get_rfc1002_length(buf) + 4;
756 if (midQ->large_buf)
757 *resp_buf_type = CIFS_LARGE_BUFFER;
758 else
759 *resp_buf_type = CIFS_SMALL_BUFFER;
760
761 credits = ses->server->ops->get_credits(midQ);
762
763 rc = ses->server->ops->check_receive(midQ, ses->server,
764 flags & CIFS_LOG_ERROR);
765
766 /* mark it so buf will not be freed by cifs_delete_mid */
767 if ((flags & CIFS_NO_RESP) == 0)
768 midQ->resp_buf = NULL;
769 out:
770 cifs_delete_mid(midQ);
771 add_credits(ses->server, credits, optype);
772
773 return rc;
774 }
775
776 int
777 SendReceive(const unsigned int xid, struct cifs_ses *ses,
778 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
779 int *pbytes_returned, const int timeout)
780 {
781 int rc = 0;
782 struct mid_q_entry *midQ;
783
784 if (ses == NULL) {
785 cERROR(1, "Null smb session");
786 return -EIO;
787 }
788 if (ses->server == NULL) {
789 cERROR(1, "Null tcp session");
790 return -EIO;
791 }
792
793 if (ses->server->tcpStatus == CifsExiting)
794 return -ENOENT;
795
796 /* Ensure that we do not send more than 50 overlapping requests
797 to the same server. We may make this configurable later or
798 use ses->maxReq */
799
800 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
801 MAX_CIFS_HDR_SIZE - 4) {
802 cERROR(1, "Illegal length, greater than maximum frame, %d",
803 be32_to_cpu(in_buf->smb_buf_length));
804 return -EIO;
805 }
806
807 rc = wait_for_free_request(ses->server, timeout, 0);
808 if (rc)
809 return rc;
810
811 /* make sure that we sign in the same order that we send on this socket
812 and avoid races inside tcp sendmsg code that could cause corruption
813 of smb data */
814
815 mutex_lock(&ses->server->srv_mutex);
816
817 rc = allocate_mid(ses, in_buf, &midQ);
818 if (rc) {
819 mutex_unlock(&ses->server->srv_mutex);
820 /* Update # of requests on wire to server */
821 add_credits(ses->server, 1, 0);
822 return rc;
823 }
824
825 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
826 if (rc) {
827 mutex_unlock(&ses->server->srv_mutex);
828 goto out;
829 }
830
831 midQ->mid_state = MID_REQUEST_SUBMITTED;
832
833 cifs_in_send_inc(ses->server);
834 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
835 cifs_in_send_dec(ses->server);
836 cifs_save_when_sent(midQ);
837 mutex_unlock(&ses->server->srv_mutex);
838
839 if (rc < 0)
840 goto out;
841
842 if (timeout == CIFS_ASYNC_OP)
843 goto out;
844
845 rc = wait_for_response(ses->server, midQ);
846 if (rc != 0) {
847 send_cancel(ses->server, in_buf, midQ);
848 spin_lock(&GlobalMid_Lock);
849 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
850 /* no longer considered to be "in-flight" */
851 midQ->callback = DeleteMidQEntry;
852 spin_unlock(&GlobalMid_Lock);
853 add_credits(ses->server, 1, 0);
854 return rc;
855 }
856 spin_unlock(&GlobalMid_Lock);
857 }
858
859 rc = cifs_sync_mid_result(midQ, ses->server);
860 if (rc != 0) {
861 add_credits(ses->server, 1, 0);
862 return rc;
863 }
864
865 if (!midQ->resp_buf || !out_buf ||
866 midQ->mid_state != MID_RESPONSE_RECEIVED) {
867 rc = -EIO;
868 cERROR(1, "Bad MID state?");
869 goto out;
870 }
871
872 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
873 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
874 rc = cifs_check_receive(midQ, ses->server, 0);
875 out:
876 cifs_delete_mid(midQ);
877 add_credits(ses->server, 1, 0);
878
879 return rc;
880 }
881
882 /* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
883 blocking lock to return. */
884
885 static int
886 send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
887 struct smb_hdr *in_buf,
888 struct smb_hdr *out_buf)
889 {
890 int bytes_returned;
891 struct cifs_ses *ses = tcon->ses;
892 LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
893
894 /* We just modify the current in_buf to change
895 the type of lock from LOCKING_ANDX_SHARED_LOCK
896 or LOCKING_ANDX_EXCLUSIVE_LOCK to
897 LOCKING_ANDX_CANCEL_LOCK. */
898
899 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
900 pSMB->Timeout = 0;
901 pSMB->hdr.Mid = get_next_mid(ses->server);
902
903 return SendReceive(xid, ses, in_buf, out_buf,
904 &bytes_returned, 0);
905 }
906
907 int
908 SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
909 struct smb_hdr *in_buf, struct smb_hdr *out_buf,
910 int *pbytes_returned)
911 {
912 int rc = 0;
913 int rstart = 0;
914 struct mid_q_entry *midQ;
915 struct cifs_ses *ses;
916
917 if (tcon == NULL || tcon->ses == NULL) {
918 cERROR(1, "Null smb session");
919 return -EIO;
920 }
921 ses = tcon->ses;
922
923 if (ses->server == NULL) {
924 cERROR(1, "Null tcp session");
925 return -EIO;
926 }
927
928 if (ses->server->tcpStatus == CifsExiting)
929 return -ENOENT;
930
931 /* Ensure that we do not send more than 50 overlapping requests
932 to the same server. We may make this configurable later or
933 use ses->maxReq */
934
935 if (be32_to_cpu(in_buf->smb_buf_length) > CIFSMaxBufSize +
936 MAX_CIFS_HDR_SIZE - 4) {
937 cERROR(1, "Illegal length, greater than maximum frame, %d",
938 be32_to_cpu(in_buf->smb_buf_length));
939 return -EIO;
940 }
941
942 rc = wait_for_free_request(ses->server, CIFS_BLOCKING_OP, 0);
943 if (rc)
944 return rc;
945
946 /* make sure that we sign in the same order that we send on this socket
947 and avoid races inside tcp sendmsg code that could cause corruption
948 of smb data */
949
950 mutex_lock(&ses->server->srv_mutex);
951
952 rc = allocate_mid(ses, in_buf, &midQ);
953 if (rc) {
954 mutex_unlock(&ses->server->srv_mutex);
955 return rc;
956 }
957
958 rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number);
959 if (rc) {
960 cifs_delete_mid(midQ);
961 mutex_unlock(&ses->server->srv_mutex);
962 return rc;
963 }
964
965 midQ->mid_state = MID_REQUEST_SUBMITTED;
966 cifs_in_send_inc(ses->server);
967 rc = smb_send(ses->server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
968 cifs_in_send_dec(ses->server);
969 cifs_save_when_sent(midQ);
970 mutex_unlock(&ses->server->srv_mutex);
971
972 if (rc < 0) {
973 cifs_delete_mid(midQ);
974 return rc;
975 }
976
977 /* Wait for a reply - allow signals to interrupt. */
978 rc = wait_event_interruptible(ses->server->response_q,
979 (!(midQ->mid_state == MID_REQUEST_SUBMITTED)) ||
980 ((ses->server->tcpStatus != CifsGood) &&
981 (ses->server->tcpStatus != CifsNew)));
982
983 /* Were we interrupted by a signal ? */
984 if ((rc == -ERESTARTSYS) &&
985 (midQ->mid_state == MID_REQUEST_SUBMITTED) &&
986 ((ses->server->tcpStatus == CifsGood) ||
987 (ses->server->tcpStatus == CifsNew))) {
988
989 if (in_buf->Command == SMB_COM_TRANSACTION2) {
990 /* POSIX lock. We send a NT_CANCEL SMB to cause the
991 blocking lock to return. */
992 rc = send_cancel(ses->server, in_buf, midQ);
993 if (rc) {
994 cifs_delete_mid(midQ);
995 return rc;
996 }
997 } else {
998 /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
999 to cause the blocking lock to return. */
1000
1001 rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
1002
1003 /* If we get -ENOLCK back the lock may have
1004 already been removed. Don't exit in this case. */
1005 if (rc && rc != -ENOLCK) {
1006 cifs_delete_mid(midQ);
1007 return rc;
1008 }
1009 }
1010
1011 rc = wait_for_response(ses->server, midQ);
1012 if (rc) {
1013 send_cancel(ses->server, in_buf, midQ);
1014 spin_lock(&GlobalMid_Lock);
1015 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
1016 /* no longer considered to be "in-flight" */
1017 midQ->callback = DeleteMidQEntry;
1018 spin_unlock(&GlobalMid_Lock);
1019 return rc;
1020 }
1021 spin_unlock(&GlobalMid_Lock);
1022 }
1023
1024 /* We got the response - restart system call. */
1025 rstart = 1;
1026 }
1027
1028 rc = cifs_sync_mid_result(midQ, ses->server);
1029 if (rc != 0)
1030 return rc;
1031
1032 /* rcvd frame is ok */
1033 if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_RECEIVED) {
1034 rc = -EIO;
1035 cERROR(1, "Bad MID state?");
1036 goto out;
1037 }
1038
1039 *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
1040 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
1041 rc = cifs_check_receive(midQ, ses->server, 0);
1042 out:
1043 cifs_delete_mid(midQ);
1044 if (rstart && rc == -EACCES)
1045 return -ERESTARTSYS;
1046 return rc;
1047 }
This page took 0.053202 seconds and 5 git commands to generate.