fs: cifs: use kmemdup instead of kmalloc + memcpy
[deliverable/linux.git] / fs / cifs / misc.c
... / ...
CommitLineData
1/*
2 * fs/cifs/misc.c
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published
9 * by the Free Software Foundation; either version 2.1 of the License, or
10 * (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
15 * the GNU Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */
21
22#include <linux/slab.h>
23#include <linux/ctype.h>
24#include <linux/mempool.h>
25#include "cifspdu.h"
26#include "cifsglob.h"
27#include "cifsproto.h"
28#include "cifs_debug.h"
29#include "smberr.h"
30#include "nterr.h"
31#include "cifs_unicode.h"
32#ifdef CONFIG_CIFS_SMB2
33#include "smb2pdu.h"
34#endif
35
36extern mempool_t *cifs_sm_req_poolp;
37extern mempool_t *cifs_req_poolp;
38
39/* The xid serves as a useful identifier for each incoming vfs request,
40 in a similar way to the mid which is useful to track each sent smb,
41 and CurrentXid can also provide a running counter (although it
42 will eventually wrap past zero) of the total vfs operations handled
43 since the cifs fs was mounted */
44
45unsigned int
46_get_xid(void)
47{
48 unsigned int xid;
49
50 spin_lock(&GlobalMid_Lock);
51 GlobalTotalActiveXid++;
52
53 /* keep high water mark for number of simultaneous ops in filesystem */
54 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
55 GlobalMaxActiveXid = GlobalTotalActiveXid;
56 if (GlobalTotalActiveXid > 65000)
57 cFYI(1, "warning: more than 65000 requests active");
58 xid = GlobalCurrentXid++;
59 spin_unlock(&GlobalMid_Lock);
60 return xid;
61}
62
63void
64_free_xid(unsigned int xid)
65{
66 spin_lock(&GlobalMid_Lock);
67 /* if (GlobalTotalActiveXid == 0)
68 BUG(); */
69 GlobalTotalActiveXid--;
70 spin_unlock(&GlobalMid_Lock);
71}
72
73struct cifs_ses *
74sesInfoAlloc(void)
75{
76 struct cifs_ses *ret_buf;
77
78 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
79 if (ret_buf) {
80 atomic_inc(&sesInfoAllocCount);
81 ret_buf->status = CifsNew;
82 ++ret_buf->ses_count;
83 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
84 INIT_LIST_HEAD(&ret_buf->tcon_list);
85 mutex_init(&ret_buf->session_mutex);
86 }
87 return ret_buf;
88}
89
90void
91sesInfoFree(struct cifs_ses *buf_to_free)
92{
93 if (buf_to_free == NULL) {
94 cFYI(1, "Null buffer passed to sesInfoFree");
95 return;
96 }
97
98 atomic_dec(&sesInfoAllocCount);
99 kfree(buf_to_free->serverOS);
100 kfree(buf_to_free->serverDomain);
101 kfree(buf_to_free->serverNOS);
102 if (buf_to_free->password) {
103 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
104 kfree(buf_to_free->password);
105 }
106 kfree(buf_to_free->user_name);
107 kfree(buf_to_free->domainName);
108 kfree(buf_to_free);
109}
110
111struct cifs_tcon *
112tconInfoAlloc(void)
113{
114 struct cifs_tcon *ret_buf;
115 ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
116 if (ret_buf) {
117 atomic_inc(&tconInfoAllocCount);
118 ret_buf->tidStatus = CifsNew;
119 ++ret_buf->tc_count;
120 INIT_LIST_HEAD(&ret_buf->openFileList);
121 INIT_LIST_HEAD(&ret_buf->tcon_list);
122#ifdef CONFIG_CIFS_STATS
123 spin_lock_init(&ret_buf->stat_lock);
124#endif
125 }
126 return ret_buf;
127}
128
129void
130tconInfoFree(struct cifs_tcon *buf_to_free)
131{
132 if (buf_to_free == NULL) {
133 cFYI(1, "Null buffer passed to tconInfoFree");
134 return;
135 }
136 atomic_dec(&tconInfoAllocCount);
137 kfree(buf_to_free->nativeFileSystem);
138 if (buf_to_free->password) {
139 memset(buf_to_free->password, 0, strlen(buf_to_free->password));
140 kfree(buf_to_free->password);
141 }
142 kfree(buf_to_free);
143}
144
145struct smb_hdr *
146cifs_buf_get(void)
147{
148 struct smb_hdr *ret_buf = NULL;
149 size_t buf_size = sizeof(struct smb_hdr);
150
151#ifdef CONFIG_CIFS_SMB2
152 /*
153 * SMB2 header is bigger than CIFS one - no problems to clean some
154 * more bytes for CIFS.
155 */
156 buf_size = sizeof(struct smb2_hdr);
157#endif
158 /*
159 * We could use negotiated size instead of max_msgsize -
160 * but it may be more efficient to always alloc same size
161 * albeit slightly larger than necessary and maxbuffersize
162 * defaults to this and can not be bigger.
163 */
164 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
165
166 /* clear the first few header bytes */
167 /* for most paths, more is cleared in header_assemble */
168 if (ret_buf) {
169 memset(ret_buf, 0, buf_size + 3);
170 atomic_inc(&bufAllocCount);
171#ifdef CONFIG_CIFS_STATS2
172 atomic_inc(&totBufAllocCount);
173#endif /* CONFIG_CIFS_STATS2 */
174 }
175
176 return ret_buf;
177}
178
179void
180cifs_buf_release(void *buf_to_free)
181{
182 if (buf_to_free == NULL) {
183 /* cFYI(1, "Null buffer passed to cifs_buf_release");*/
184 return;
185 }
186 mempool_free(buf_to_free, cifs_req_poolp);
187
188 atomic_dec(&bufAllocCount);
189 return;
190}
191
192struct smb_hdr *
193cifs_small_buf_get(void)
194{
195 struct smb_hdr *ret_buf = NULL;
196
197/* We could use negotiated size instead of max_msgsize -
198 but it may be more efficient to always alloc same size
199 albeit slightly larger than necessary and maxbuffersize
200 defaults to this and can not be bigger */
201 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
202 if (ret_buf) {
203 /* No need to clear memory here, cleared in header assemble */
204 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
205 atomic_inc(&smBufAllocCount);
206#ifdef CONFIG_CIFS_STATS2
207 atomic_inc(&totSmBufAllocCount);
208#endif /* CONFIG_CIFS_STATS2 */
209
210 }
211 return ret_buf;
212}
213
214void
215cifs_small_buf_release(void *buf_to_free)
216{
217
218 if (buf_to_free == NULL) {
219 cFYI(1, "Null buffer passed to cifs_small_buf_release");
220 return;
221 }
222 mempool_free(buf_to_free, cifs_sm_req_poolp);
223
224 atomic_dec(&smBufAllocCount);
225 return;
226}
227
228/* NB: MID can not be set if treeCon not passed in, in that
229 case it is responsbility of caller to set the mid */
230void
231header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
232 const struct cifs_tcon *treeCon, int word_count
233 /* length of fixed section (word count) in two byte units */)
234{
235 char *temp = (char *) buffer;
236
237 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
238
239 buffer->smb_buf_length = cpu_to_be32(
240 (2 * word_count) + sizeof(struct smb_hdr) -
241 4 /* RFC 1001 length field does not count */ +
242 2 /* for bcc field itself */) ;
243
244 buffer->Protocol[0] = 0xFF;
245 buffer->Protocol[1] = 'S';
246 buffer->Protocol[2] = 'M';
247 buffer->Protocol[3] = 'B';
248 buffer->Command = smb_command;
249 buffer->Flags = 0x00; /* case sensitive */
250 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
251 buffer->Pid = cpu_to_le16((__u16)current->tgid);
252 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
253 if (treeCon) {
254 buffer->Tid = treeCon->tid;
255 if (treeCon->ses) {
256 if (treeCon->ses->capabilities & CAP_UNICODE)
257 buffer->Flags2 |= SMBFLG2_UNICODE;
258 if (treeCon->ses->capabilities & CAP_STATUS32)
259 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
260
261 /* Uid is not converted */
262 buffer->Uid = treeCon->ses->Suid;
263 buffer->Mid = get_next_mid(treeCon->ses->server);
264 }
265 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
266 buffer->Flags2 |= SMBFLG2_DFS;
267 if (treeCon->nocase)
268 buffer->Flags |= SMBFLG_CASELESS;
269 if ((treeCon->ses) && (treeCon->ses->server))
270 if (treeCon->ses->server->sec_mode &
271 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
272 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
273 }
274
275/* endian conversion of flags is now done just before sending */
276 buffer->WordCount = (char) word_count;
277 return;
278}
279
280static int
281check_smb_hdr(struct smb_hdr *smb, __u16 mid)
282{
283 /* does it have the right SMB "signature" ? */
284 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
285 cERROR(1, "Bad protocol string signature header 0x%x",
286 *(unsigned int *)smb->Protocol);
287 return 1;
288 }
289
290 /* Make sure that message ids match */
291 if (mid != smb->Mid) {
292 cERROR(1, "Mids do not match. received=%u expected=%u",
293 smb->Mid, mid);
294 return 1;
295 }
296
297 /* if it's a response then accept */
298 if (smb->Flags & SMBFLG_RESPONSE)
299 return 0;
300
301 /* only one valid case where server sends us request */
302 if (smb->Command == SMB_COM_LOCKING_ANDX)
303 return 0;
304
305 cERROR(1, "Server sent request, not response. mid=%u", smb->Mid);
306 return 1;
307}
308
309int
310checkSMB(char *buf, unsigned int total_read)
311{
312 struct smb_hdr *smb = (struct smb_hdr *)buf;
313 __u16 mid = smb->Mid;
314 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
315 __u32 clc_len; /* calculated length */
316 cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
317 total_read, rfclen);
318
319 /* is this frame too small to even get to a BCC? */
320 if (total_read < 2 + sizeof(struct smb_hdr)) {
321 if ((total_read >= sizeof(struct smb_hdr) - 1)
322 && (smb->Status.CifsError != 0)) {
323 /* it's an error return */
324 smb->WordCount = 0;
325 /* some error cases do not return wct and bcc */
326 return 0;
327 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
328 (smb->WordCount == 0)) {
329 char *tmp = (char *)smb;
330 /* Need to work around a bug in two servers here */
331 /* First, check if the part of bcc they sent was zero */
332 if (tmp[sizeof(struct smb_hdr)] == 0) {
333 /* some servers return only half of bcc
334 * on simple responses (wct, bcc both zero)
335 * in particular have seen this on
336 * ulogoffX and FindClose. This leaves
337 * one byte of bcc potentially unitialized
338 */
339 /* zero rest of bcc */
340 tmp[sizeof(struct smb_hdr)+1] = 0;
341 return 0;
342 }
343 cERROR(1, "rcvd invalid byte count (bcc)");
344 } else {
345 cERROR(1, "Length less than smb header size");
346 }
347 return -EIO;
348 }
349
350 /* otherwise, there is enough to get to the BCC */
351 if (check_smb_hdr(smb, mid))
352 return -EIO;
353 clc_len = smbCalcSize(smb);
354
355 if (4 + rfclen != total_read) {
356 cERROR(1, "Length read does not match RFC1001 length %d",
357 rfclen);
358 return -EIO;
359 }
360
361 if (4 + rfclen != clc_len) {
362 /* check if bcc wrapped around for large read responses */
363 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
364 /* check if lengths match mod 64K */
365 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
366 return 0; /* bcc wrapped */
367 }
368 cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
369 clc_len, 4 + rfclen, smb->Mid);
370
371 if (4 + rfclen < clc_len) {
372 cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
373 rfclen, smb->Mid);
374 return -EIO;
375 } else if (rfclen > clc_len + 512) {
376 /*
377 * Some servers (Windows XP in particular) send more
378 * data than the lengths in the SMB packet would
379 * indicate on certain calls (byte range locks and
380 * trans2 find first calls in particular). While the
381 * client can handle such a frame by ignoring the
382 * trailing data, we choose limit the amount of extra
383 * data to 512 bytes.
384 */
385 cERROR(1, "RFC1001 size %u more than 512 bytes larger "
386 "than SMB for mid=%u", rfclen, smb->Mid);
387 return -EIO;
388 }
389 }
390 return 0;
391}
392
393bool
394is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
395{
396 struct smb_hdr *buf = (struct smb_hdr *)buffer;
397 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
398 struct list_head *tmp, *tmp1, *tmp2;
399 struct cifs_ses *ses;
400 struct cifs_tcon *tcon;
401 struct cifsInodeInfo *pCifsInode;
402 struct cifsFileInfo *netfile;
403
404 cFYI(1, "Checking for oplock break or dnotify response");
405 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
406 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
407 struct smb_com_transaction_change_notify_rsp *pSMBr =
408 (struct smb_com_transaction_change_notify_rsp *)buf;
409 struct file_notify_information *pnotify;
410 __u32 data_offset = 0;
411 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
412 data_offset = le32_to_cpu(pSMBr->DataOffset);
413
414 pnotify = (struct file_notify_information *)
415 ((char *)&pSMBr->hdr.Protocol + data_offset);
416 cFYI(1, "dnotify on %s Action: 0x%x",
417 pnotify->FileName, pnotify->Action);
418 /* cifs_dump_mem("Rcvd notify Data: ",buf,
419 sizeof(struct smb_hdr)+60); */
420 return true;
421 }
422 if (pSMBr->hdr.Status.CifsError) {
423 cFYI(1, "notify err 0x%d",
424 pSMBr->hdr.Status.CifsError);
425 return true;
426 }
427 return false;
428 }
429 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
430 return false;
431 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
432 /* no sense logging error on invalid handle on oplock
433 break - harmless race between close request and oplock
434 break response is expected from time to time writing out
435 large dirty files cached on the client */
436 if ((NT_STATUS_INVALID_HANDLE) ==
437 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
438 cFYI(1, "invalid handle on oplock break");
439 return true;
440 } else if (ERRbadfid ==
441 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
442 return true;
443 } else {
444 return false; /* on valid oplock brk we get "request" */
445 }
446 }
447 if (pSMB->hdr.WordCount != 8)
448 return false;
449
450 cFYI(1, "oplock type 0x%d level 0x%d",
451 pSMB->LockType, pSMB->OplockLevel);
452 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
453 return false;
454
455 /* look up tcon based on tid & uid */
456 spin_lock(&cifs_tcp_ses_lock);
457 list_for_each(tmp, &srv->smb_ses_list) {
458 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
459 list_for_each(tmp1, &ses->tcon_list) {
460 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
461 if (tcon->tid != buf->Tid)
462 continue;
463
464 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
465 spin_lock(&cifs_file_list_lock);
466 list_for_each(tmp2, &tcon->openFileList) {
467 netfile = list_entry(tmp2, struct cifsFileInfo,
468 tlist);
469 if (pSMB->Fid != netfile->fid.netfid)
470 continue;
471
472 cFYI(1, "file id match, oplock break");
473 pCifsInode = CIFS_I(netfile->dentry->d_inode);
474
475 cifs_set_oplock_level(pCifsInode,
476 pSMB->OplockLevel ? OPLOCK_READ : 0);
477 queue_work(cifsiod_wq,
478 &netfile->oplock_break);
479 netfile->oplock_break_cancelled = false;
480
481 spin_unlock(&cifs_file_list_lock);
482 spin_unlock(&cifs_tcp_ses_lock);
483 return true;
484 }
485 spin_unlock(&cifs_file_list_lock);
486 spin_unlock(&cifs_tcp_ses_lock);
487 cFYI(1, "No matching file for oplock break");
488 return true;
489 }
490 }
491 spin_unlock(&cifs_tcp_ses_lock);
492 cFYI(1, "Can not process oplock break for non-existent connection");
493 return true;
494}
495
496void
497dump_smb(void *buf, int smb_buf_length)
498{
499 int i, j;
500 char debug_line[17];
501 unsigned char *buffer = buf;
502
503 if (traceSMB == 0)
504 return;
505
506 for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
507 if (i % 8 == 0) {
508 /* have reached the beginning of line */
509 printk(KERN_DEBUG "| ");
510 j = 0;
511 }
512 printk("%0#4x ", buffer[i]);
513 debug_line[2 * j] = ' ';
514 if (isprint(buffer[i]))
515 debug_line[1 + (2 * j)] = buffer[i];
516 else
517 debug_line[1 + (2 * j)] = '_';
518
519 if (i % 8 == 7) {
520 /* reached end of line, time to print ascii */
521 debug_line[16] = 0;
522 printk(" | %s\n", debug_line);
523 }
524 }
525 for (; j < 8; j++) {
526 printk(" ");
527 debug_line[2 * j] = ' ';
528 debug_line[1 + (2 * j)] = ' ';
529 }
530 printk(" | %s\n", debug_line);
531 return;
532}
533
534void
535cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
536{
537 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
538 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
539 cERROR(1, "Autodisabling the use of server inode numbers on "
540 "%s. This server doesn't seem to support them "
541 "properly. Hardlinks will not be recognized on this "
542 "mount. Consider mounting with the \"noserverino\" "
543 "option to silence this message.",
544 cifs_sb_master_tcon(cifs_sb)->treeName);
545 }
546}
547
548void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
549{
550 oplock &= 0xF;
551
552 if (oplock == OPLOCK_EXCLUSIVE) {
553 cinode->clientCanCacheAll = true;
554 cinode->clientCanCacheRead = true;
555 cFYI(1, "Exclusive Oplock granted on inode %p",
556 &cinode->vfs_inode);
557 } else if (oplock == OPLOCK_READ) {
558 cinode->clientCanCacheAll = false;
559 cinode->clientCanCacheRead = true;
560 cFYI(1, "Level II Oplock granted on inode %p",
561 &cinode->vfs_inode);
562 } else {
563 cinode->clientCanCacheAll = false;
564 cinode->clientCanCacheRead = false;
565 }
566}
567
568bool
569backup_cred(struct cifs_sb_info *cifs_sb)
570{
571 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
572 if (uid_eq(cifs_sb->mnt_backupuid, current_fsuid()))
573 return true;
574 }
575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
576 if (in_group_p(cifs_sb->mnt_backupgid))
577 return true;
578 }
579
580 return false;
581}
582
583void
584cifs_del_pending_open(struct cifs_pending_open *open)
585{
586 spin_lock(&cifs_file_list_lock);
587 list_del(&open->olist);
588 spin_unlock(&cifs_file_list_lock);
589}
590
591void
592cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
593 struct cifs_pending_open *open)
594{
595#ifdef CONFIG_CIFS_SMB2
596 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
597#endif
598 open->oplock = CIFS_OPLOCK_NO_CHANGE;
599 open->tlink = tlink;
600 fid->pending_open = open;
601 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
602}
603
604void
605cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
606 struct cifs_pending_open *open)
607{
608 spin_lock(&cifs_file_list_lock);
609 cifs_add_pending_open_locked(fid, tlink, open);
610 spin_unlock(&cifs_file_list_lock);
611}
This page took 0.026655 seconds and 5 git commands to generate.