Merge branch 'devicetree/merge' of git://git.secretlab.ca/git/linux-2.6
[deliverable/linux.git] / fs / cifs / cifsacl.c
1 /*
2 * fs/cifs/cifsacl.c
3 *
4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 * Contains the routines for mapping CIFS/NTFS ACLs
8 *
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24 #include <linux/fs.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
30 #include "cifspdu.h"
31 #include "cifsglob.h"
32 #include "cifsacl.h"
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
35
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone = {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers = {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
42 /* group users */
43 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
44
45 const struct cred *root_cred;
46
47 static void
48 shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
49 int *nr_del)
50 {
51 struct rb_node *node;
52 struct rb_node *tmp;
53 struct cifs_sid_id *psidid;
54
55 node = rb_first(root);
56 while (node) {
57 tmp = node;
58 node = rb_next(tmp);
59 psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60 if (nr_to_scan == 0 || *nr_del == nr_to_scan)
61 ++(*nr_rem);
62 else {
63 if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64 && psidid->refcount == 0) {
65 rb_erase(tmp, root);
66 ++(*nr_del);
67 } else
68 ++(*nr_rem);
69 }
70 }
71 }
72
73 /*
74 * Run idmap cache shrinker.
75 */
76 static int
77 cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
78 {
79 int nr_to_scan = sc->nr_to_scan;
80 int nr_del = 0;
81 int nr_rem = 0;
82 struct rb_root *root;
83
84 root = &uidtree;
85 spin_lock(&siduidlock);
86 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
87 spin_unlock(&siduidlock);
88
89 root = &gidtree;
90 spin_lock(&sidgidlock);
91 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
92 spin_unlock(&sidgidlock);
93
94 root = &siduidtree;
95 spin_lock(&uidsidlock);
96 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
97 spin_unlock(&uidsidlock);
98
99 root = &sidgidtree;
100 spin_lock(&gidsidlock);
101 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
102 spin_unlock(&gidsidlock);
103
104 return nr_rem;
105 }
106
107 static void
108 sid_rb_insert(struct rb_root *root, unsigned long cid,
109 struct cifs_sid_id **psidid, char *typestr)
110 {
111 char *strptr;
112 struct rb_node *node = root->rb_node;
113 struct rb_node *parent = NULL;
114 struct rb_node **linkto = &(root->rb_node);
115 struct cifs_sid_id *lsidid;
116
117 while (node) {
118 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
119 parent = node;
120 if (cid > lsidid->id) {
121 linkto = &(node->rb_left);
122 node = node->rb_left;
123 }
124 if (cid < lsidid->id) {
125 linkto = &(node->rb_right);
126 node = node->rb_right;
127 }
128 }
129
130 (*psidid)->id = cid;
131 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
132 (*psidid)->refcount = 0;
133
134 sprintf((*psidid)->sidstr, "%s", typestr);
135 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
136 sprintf(strptr, "%ld", cid);
137
138 clear_bit(SID_ID_PENDING, &(*psidid)->state);
139 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
140
141 rb_link_node(&(*psidid)->rbnode, parent, linkto);
142 rb_insert_color(&(*psidid)->rbnode, root);
143 }
144
145 static struct cifs_sid_id *
146 sid_rb_search(struct rb_root *root, unsigned long cid)
147 {
148 struct rb_node *node = root->rb_node;
149 struct cifs_sid_id *lsidid;
150
151 while (node) {
152 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
153 if (cid > lsidid->id)
154 node = node->rb_left;
155 else if (cid < lsidid->id)
156 node = node->rb_right;
157 else /* node found */
158 return lsidid;
159 }
160
161 return NULL;
162 }
163
164 static struct shrinker cifs_shrinker = {
165 .shrink = cifs_idmap_shrinker,
166 .seeks = DEFAULT_SEEKS,
167 };
168
169 static int
170 cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
171 {
172 char *payload;
173
174 payload = kmalloc(datalen, GFP_KERNEL);
175 if (!payload)
176 return -ENOMEM;
177
178 memcpy(payload, data, datalen);
179 key->payload.data = payload;
180 key->datalen = datalen;
181 return 0;
182 }
183
184 static inline void
185 cifs_idmap_key_destroy(struct key *key)
186 {
187 kfree(key->payload.data);
188 }
189
190 struct key_type cifs_idmap_key_type = {
191 .name = "cifs.idmap",
192 .instantiate = cifs_idmap_key_instantiate,
193 .destroy = cifs_idmap_key_destroy,
194 .describe = user_describe,
195 .match = user_match,
196 };
197
198 static void
199 sid_to_str(struct cifs_sid *sidptr, char *sidstr)
200 {
201 int i;
202 unsigned long saval;
203 char *strptr;
204
205 strptr = sidstr;
206
207 sprintf(strptr, "%s", "S");
208 strptr = sidstr + strlen(sidstr);
209
210 sprintf(strptr, "-%d", sidptr->revision);
211 strptr = sidstr + strlen(sidstr);
212
213 for (i = 0; i < 6; ++i) {
214 if (sidptr->authority[i]) {
215 sprintf(strptr, "-%d", sidptr->authority[i]);
216 strptr = sidstr + strlen(sidstr);
217 }
218 }
219
220 for (i = 0; i < sidptr->num_subauth; ++i) {
221 saval = le32_to_cpu(sidptr->sub_auth[i]);
222 sprintf(strptr, "-%ld", saval);
223 strptr = sidstr + strlen(sidstr);
224 }
225 }
226
227 static void
228 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
229 struct cifs_sid_id **psidid, char *typestr)
230 {
231 int rc;
232 char *strptr;
233 struct rb_node *node = root->rb_node;
234 struct rb_node *parent = NULL;
235 struct rb_node **linkto = &(root->rb_node);
236 struct cifs_sid_id *lsidid;
237
238 while (node) {
239 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
240 parent = node;
241 rc = compare_sids(sidptr, &((lsidid)->sid));
242 if (rc > 0) {
243 linkto = &(node->rb_left);
244 node = node->rb_left;
245 } else if (rc < 0) {
246 linkto = &(node->rb_right);
247 node = node->rb_right;
248 }
249 }
250
251 memcpy(&(*psidid)->sid, sidptr, sizeof(struct cifs_sid));
252 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
253 (*psidid)->refcount = 0;
254
255 sprintf((*psidid)->sidstr, "%s", typestr);
256 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
257 sid_to_str(&(*psidid)->sid, strptr);
258
259 clear_bit(SID_ID_PENDING, &(*psidid)->state);
260 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
261
262 rb_link_node(&(*psidid)->rbnode, parent, linkto);
263 rb_insert_color(&(*psidid)->rbnode, root);
264 }
265
266 static struct cifs_sid_id *
267 id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
268 {
269 int rc;
270 struct rb_node *node = root->rb_node;
271 struct cifs_sid_id *lsidid;
272
273 while (node) {
274 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
275 rc = compare_sids(sidptr, &((lsidid)->sid));
276 if (rc > 0) {
277 node = node->rb_left;
278 } else if (rc < 0) {
279 node = node->rb_right;
280 } else /* node found */
281 return lsidid;
282 }
283
284 return NULL;
285 }
286
287 static int
288 sidid_pending_wait(void *unused)
289 {
290 schedule();
291 return signal_pending(current) ? -ERESTARTSYS : 0;
292 }
293
294 static int
295 id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
296 {
297 int rc = 0;
298 struct key *sidkey;
299 const struct cred *saved_cred;
300 struct cifs_sid *lsid;
301 struct cifs_sid_id *psidid, *npsidid;
302 struct rb_root *cidtree;
303 spinlock_t *cidlock;
304
305 if (sidtype == SIDOWNER) {
306 cidlock = &siduidlock;
307 cidtree = &uidtree;
308 } else if (sidtype == SIDGROUP) {
309 cidlock = &sidgidlock;
310 cidtree = &gidtree;
311 } else
312 return -EINVAL;
313
314 spin_lock(cidlock);
315 psidid = sid_rb_search(cidtree, cid);
316
317 if (!psidid) { /* node does not exist, allocate one & attempt adding */
318 spin_unlock(cidlock);
319 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
320 if (!npsidid)
321 return -ENOMEM;
322
323 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
324 if (!npsidid->sidstr) {
325 kfree(npsidid);
326 return -ENOMEM;
327 }
328
329 spin_lock(cidlock);
330 psidid = sid_rb_search(cidtree, cid);
331 if (psidid) { /* node happened to get inserted meanwhile */
332 ++psidid->refcount;
333 spin_unlock(cidlock);
334 kfree(npsidid->sidstr);
335 kfree(npsidid);
336 } else {
337 psidid = npsidid;
338 sid_rb_insert(cidtree, cid, &psidid,
339 sidtype == SIDOWNER ? "oi:" : "gi:");
340 ++psidid->refcount;
341 spin_unlock(cidlock);
342 }
343 } else {
344 ++psidid->refcount;
345 spin_unlock(cidlock);
346 }
347
348 /*
349 * If we are here, it is safe to access psidid and its fields
350 * since a reference was taken earlier while holding the spinlock.
351 * A reference on the node is put without holding the spinlock
352 * and it is OK to do so in this case, shrinker will not erase
353 * this node until all references are put and we do not access
354 * any fields of the node after a reference is put .
355 */
356 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
357 memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
358 psidid->time = jiffies; /* update ts for accessing */
359 goto id_sid_out;
360 }
361
362 if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
363 rc = -EINVAL;
364 goto id_sid_out;
365 }
366
367 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
368 saved_cred = override_creds(root_cred);
369 sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
370 if (IS_ERR(sidkey)) {
371 rc = -EINVAL;
372 cFYI(1, "%s: Can't map and id to a SID", __func__);
373 } else {
374 lsid = (struct cifs_sid *)sidkey->payload.data;
375 memcpy(&psidid->sid, lsid,
376 sidkey->datalen < sizeof(struct cifs_sid) ?
377 sidkey->datalen : sizeof(struct cifs_sid));
378 memcpy(ssid, &psidid->sid,
379 sidkey->datalen < sizeof(struct cifs_sid) ?
380 sidkey->datalen : sizeof(struct cifs_sid));
381 set_bit(SID_ID_MAPPED, &psidid->state);
382 key_put(sidkey);
383 kfree(psidid->sidstr);
384 }
385 psidid->time = jiffies; /* update ts for accessing */
386 revert_creds(saved_cred);
387 clear_bit(SID_ID_PENDING, &psidid->state);
388 wake_up_bit(&psidid->state, SID_ID_PENDING);
389 } else {
390 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
391 sidid_pending_wait, TASK_INTERRUPTIBLE);
392 if (rc) {
393 cFYI(1, "%s: sidid_pending_wait interrupted %d",
394 __func__, rc);
395 --psidid->refcount;
396 return rc;
397 }
398 if (test_bit(SID_ID_MAPPED, &psidid->state))
399 memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
400 else
401 rc = -EINVAL;
402 }
403 id_sid_out:
404 --psidid->refcount;
405 return rc;
406 }
407
408 static int
409 sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
410 struct cifs_fattr *fattr, uint sidtype)
411 {
412 int rc;
413 unsigned long cid;
414 struct key *idkey;
415 const struct cred *saved_cred;
416 struct cifs_sid_id *psidid, *npsidid;
417 struct rb_root *cidtree;
418 spinlock_t *cidlock;
419
420 if (sidtype == SIDOWNER) {
421 cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
422 cidlock = &siduidlock;
423 cidtree = &uidtree;
424 } else if (sidtype == SIDGROUP) {
425 cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
426 cidlock = &sidgidlock;
427 cidtree = &gidtree;
428 } else
429 return -ENOENT;
430
431 spin_lock(cidlock);
432 psidid = id_rb_search(cidtree, psid);
433
434 if (!psidid) { /* node does not exist, allocate one & attempt adding */
435 spin_unlock(cidlock);
436 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
437 if (!npsidid)
438 return -ENOMEM;
439
440 npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
441 if (!npsidid->sidstr) {
442 kfree(npsidid);
443 return -ENOMEM;
444 }
445
446 spin_lock(cidlock);
447 psidid = id_rb_search(cidtree, psid);
448 if (psidid) { /* node happened to get inserted meanwhile */
449 ++psidid->refcount;
450 spin_unlock(cidlock);
451 kfree(npsidid->sidstr);
452 kfree(npsidid);
453 } else {
454 psidid = npsidid;
455 id_rb_insert(cidtree, psid, &psidid,
456 sidtype == SIDOWNER ? "os:" : "gs:");
457 ++psidid->refcount;
458 spin_unlock(cidlock);
459 }
460 } else {
461 ++psidid->refcount;
462 spin_unlock(cidlock);
463 }
464
465 /*
466 * If we are here, it is safe to access psidid and its fields
467 * since a reference was taken earlier while holding the spinlock.
468 * A reference on the node is put without holding the spinlock
469 * and it is OK to do so in this case, shrinker will not erase
470 * this node until all references are put and we do not access
471 * any fields of the node after a reference is put .
472 */
473 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
474 cid = psidid->id;
475 psidid->time = jiffies; /* update ts for accessing */
476 goto sid_to_id_out;
477 }
478
479 if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
480 goto sid_to_id_out;
481
482 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
483 saved_cred = override_creds(root_cred);
484 idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
485 if (IS_ERR(idkey))
486 cFYI(1, "%s: Can't map SID to an id", __func__);
487 else {
488 cid = *(unsigned long *)idkey->payload.value;
489 psidid->id = cid;
490 set_bit(SID_ID_MAPPED, &psidid->state);
491 key_put(idkey);
492 kfree(psidid->sidstr);
493 }
494 revert_creds(saved_cred);
495 psidid->time = jiffies; /* update ts for accessing */
496 clear_bit(SID_ID_PENDING, &psidid->state);
497 wake_up_bit(&psidid->state, SID_ID_PENDING);
498 } else {
499 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
500 sidid_pending_wait, TASK_INTERRUPTIBLE);
501 if (rc) {
502 cFYI(1, "%s: sidid_pending_wait interrupted %d",
503 __func__, rc);
504 --psidid->refcount; /* decremented without spinlock */
505 return rc;
506 }
507 if (test_bit(SID_ID_MAPPED, &psidid->state))
508 cid = psidid->id;
509 }
510
511 sid_to_id_out:
512 --psidid->refcount; /* decremented without spinlock */
513 if (sidtype == SIDOWNER)
514 fattr->cf_uid = cid;
515 else
516 fattr->cf_gid = cid;
517
518 return 0;
519 }
520
521 int
522 init_cifs_idmap(void)
523 {
524 struct cred *cred;
525 struct key *keyring;
526 int ret;
527
528 cFYI(1, "Registering the %s key type\n", cifs_idmap_key_type.name);
529
530 /* create an override credential set with a special thread keyring in
531 * which requests are cached
532 *
533 * this is used to prevent malicious redirections from being installed
534 * with add_key().
535 */
536 cred = prepare_kernel_cred(NULL);
537 if (!cred)
538 return -ENOMEM;
539
540 keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
541 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
542 KEY_USR_VIEW | KEY_USR_READ,
543 KEY_ALLOC_NOT_IN_QUOTA);
544 if (IS_ERR(keyring)) {
545 ret = PTR_ERR(keyring);
546 goto failed_put_cred;
547 }
548
549 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
550 if (ret < 0)
551 goto failed_put_key;
552
553 ret = register_key_type(&cifs_idmap_key_type);
554 if (ret < 0)
555 goto failed_put_key;
556
557 /* instruct request_key() to use this special keyring as a cache for
558 * the results it looks up */
559 cred->thread_keyring = keyring;
560 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
561 root_cred = cred;
562
563 spin_lock_init(&siduidlock);
564 uidtree = RB_ROOT;
565 spin_lock_init(&sidgidlock);
566 gidtree = RB_ROOT;
567
568 spin_lock_init(&uidsidlock);
569 siduidtree = RB_ROOT;
570 spin_lock_init(&gidsidlock);
571 sidgidtree = RB_ROOT;
572 register_shrinker(&cifs_shrinker);
573
574 cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
575 return 0;
576
577 failed_put_key:
578 key_put(keyring);
579 failed_put_cred:
580 put_cred(cred);
581 return ret;
582 }
583
584 void
585 exit_cifs_idmap(void)
586 {
587 key_revoke(root_cred->thread_keyring);
588 unregister_key_type(&cifs_idmap_key_type);
589 put_cred(root_cred);
590 unregister_shrinker(&cifs_shrinker);
591 cFYI(1, "Unregistered %s key type\n", cifs_idmap_key_type.name);
592 }
593
594 void
595 cifs_destroy_idmaptrees(void)
596 {
597 struct rb_root *root;
598 struct rb_node *node;
599
600 root = &uidtree;
601 spin_lock(&siduidlock);
602 while ((node = rb_first(root)))
603 rb_erase(node, root);
604 spin_unlock(&siduidlock);
605
606 root = &gidtree;
607 spin_lock(&sidgidlock);
608 while ((node = rb_first(root)))
609 rb_erase(node, root);
610 spin_unlock(&sidgidlock);
611
612 root = &siduidtree;
613 spin_lock(&uidsidlock);
614 while ((node = rb_first(root)))
615 rb_erase(node, root);
616 spin_unlock(&uidsidlock);
617
618 root = &sidgidtree;
619 spin_lock(&gidsidlock);
620 while ((node = rb_first(root)))
621 rb_erase(node, root);
622 spin_unlock(&gidsidlock);
623 }
624
625 /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
626 the same returns 1, if they do not match returns 0 */
627 int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
628 {
629 int i;
630 int num_subauth, num_sat, num_saw;
631
632 if ((!ctsid) || (!cwsid))
633 return 1;
634
635 /* compare the revision */
636 if (ctsid->revision != cwsid->revision) {
637 if (ctsid->revision > cwsid->revision)
638 return 1;
639 else
640 return -1;
641 }
642
643 /* compare all of the six auth values */
644 for (i = 0; i < 6; ++i) {
645 if (ctsid->authority[i] != cwsid->authority[i]) {
646 if (ctsid->authority[i] > cwsid->authority[i])
647 return 1;
648 else
649 return -1;
650 }
651 }
652
653 /* compare all of the subauth values if any */
654 num_sat = ctsid->num_subauth;
655 num_saw = cwsid->num_subauth;
656 num_subauth = num_sat < num_saw ? num_sat : num_saw;
657 if (num_subauth) {
658 for (i = 0; i < num_subauth; ++i) {
659 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
660 if (le32_to_cpu(ctsid->sub_auth[i]) >
661 le32_to_cpu(cwsid->sub_auth[i]))
662 return 1;
663 else
664 return -1;
665 }
666 }
667 }
668
669 return 0; /* sids compare/match */
670 }
671
672
673 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
674 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
675 struct cifs_ntsd *pnntsd, __u32 sidsoffset)
676 {
677 int i;
678
679 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
680 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
681
682 /* copy security descriptor control portion */
683 pnntsd->revision = pntsd->revision;
684 pnntsd->type = pntsd->type;
685 pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
686 pnntsd->sacloffset = 0;
687 pnntsd->osidoffset = cpu_to_le32(sidsoffset);
688 pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
689
690 /* copy owner sid */
691 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
692 le32_to_cpu(pntsd->osidoffset));
693 nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
694
695 nowner_sid_ptr->revision = owner_sid_ptr->revision;
696 nowner_sid_ptr->num_subauth = owner_sid_ptr->num_subauth;
697 for (i = 0; i < 6; i++)
698 nowner_sid_ptr->authority[i] = owner_sid_ptr->authority[i];
699 for (i = 0; i < 5; i++)
700 nowner_sid_ptr->sub_auth[i] = owner_sid_ptr->sub_auth[i];
701
702 /* copy group sid */
703 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
704 le32_to_cpu(pntsd->gsidoffset));
705 ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
706 sizeof(struct cifs_sid));
707
708 ngroup_sid_ptr->revision = group_sid_ptr->revision;
709 ngroup_sid_ptr->num_subauth = group_sid_ptr->num_subauth;
710 for (i = 0; i < 6; i++)
711 ngroup_sid_ptr->authority[i] = group_sid_ptr->authority[i];
712 for (i = 0; i < 5; i++)
713 ngroup_sid_ptr->sub_auth[i] = group_sid_ptr->sub_auth[i];
714
715 return;
716 }
717
718
719 /*
720 change posix mode to reflect permissions
721 pmode is the existing mode (we only want to overwrite part of this
722 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
723 */
724 static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
725 umode_t *pbits_to_set)
726 {
727 __u32 flags = le32_to_cpu(ace_flags);
728 /* the order of ACEs is important. The canonical order is to begin with
729 DENY entries followed by ALLOW, otherwise an allow entry could be
730 encountered first, making the subsequent deny entry like "dead code"
731 which would be superflous since Windows stops when a match is made
732 for the operation you are trying to perform for your user */
733
734 /* For deny ACEs we change the mask so that subsequent allow access
735 control entries do not turn on the bits we are denying */
736 if (type == ACCESS_DENIED) {
737 if (flags & GENERIC_ALL)
738 *pbits_to_set &= ~S_IRWXUGO;
739
740 if ((flags & GENERIC_WRITE) ||
741 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
742 *pbits_to_set &= ~S_IWUGO;
743 if ((flags & GENERIC_READ) ||
744 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
745 *pbits_to_set &= ~S_IRUGO;
746 if ((flags & GENERIC_EXECUTE) ||
747 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
748 *pbits_to_set &= ~S_IXUGO;
749 return;
750 } else if (type != ACCESS_ALLOWED) {
751 cERROR(1, "unknown access control type %d", type);
752 return;
753 }
754 /* else ACCESS_ALLOWED type */
755
756 if (flags & GENERIC_ALL) {
757 *pmode |= (S_IRWXUGO & (*pbits_to_set));
758 cFYI(DBG2, "all perms");
759 return;
760 }
761 if ((flags & GENERIC_WRITE) ||
762 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
763 *pmode |= (S_IWUGO & (*pbits_to_set));
764 if ((flags & GENERIC_READ) ||
765 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
766 *pmode |= (S_IRUGO & (*pbits_to_set));
767 if ((flags & GENERIC_EXECUTE) ||
768 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
769 *pmode |= (S_IXUGO & (*pbits_to_set));
770
771 cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
772 return;
773 }
774
775 /*
776 Generate access flags to reflect permissions mode is the existing mode.
777 This function is called for every ACE in the DACL whose SID matches
778 with either owner or group or everyone.
779 */
780
781 static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
782 __u32 *pace_flags)
783 {
784 /* reset access mask */
785 *pace_flags = 0x0;
786
787 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
788 mode &= bits_to_use;
789
790 /* check for R/W/X UGO since we do not know whose flags
791 is this but we have cleared all the bits sans RWX for
792 either user or group or other as per bits_to_use */
793 if (mode & S_IRUGO)
794 *pace_flags |= SET_FILE_READ_RIGHTS;
795 if (mode & S_IWUGO)
796 *pace_flags |= SET_FILE_WRITE_RIGHTS;
797 if (mode & S_IXUGO)
798 *pace_flags |= SET_FILE_EXEC_RIGHTS;
799
800 cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
801 return;
802 }
803
804 static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
805 const struct cifs_sid *psid, __u64 nmode, umode_t bits)
806 {
807 int i;
808 __u16 size = 0;
809 __u32 access_req = 0;
810
811 pntace->type = ACCESS_ALLOWED;
812 pntace->flags = 0x0;
813 mode_to_access_flags(nmode, bits, &access_req);
814 if (!access_req)
815 access_req = SET_MINIMUM_RIGHTS;
816 pntace->access_req = cpu_to_le32(access_req);
817
818 pntace->sid.revision = psid->revision;
819 pntace->sid.num_subauth = psid->num_subauth;
820 for (i = 0; i < 6; i++)
821 pntace->sid.authority[i] = psid->authority[i];
822 for (i = 0; i < psid->num_subauth; i++)
823 pntace->sid.sub_auth[i] = psid->sub_auth[i];
824
825 size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
826 pntace->size = cpu_to_le16(size);
827
828 return size;
829 }
830
831
832 #ifdef CONFIG_CIFS_DEBUG2
833 static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
834 {
835 int num_subauth;
836
837 /* validate that we do not go past end of acl */
838
839 if (le16_to_cpu(pace->size) < 16) {
840 cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
841 return;
842 }
843
844 if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
845 cERROR(1, "ACL too small to parse ACE");
846 return;
847 }
848
849 num_subauth = pace->sid.num_subauth;
850 if (num_subauth) {
851 int i;
852 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
853 pace->sid.revision, pace->sid.num_subauth, pace->type,
854 pace->flags, le16_to_cpu(pace->size));
855 for (i = 0; i < num_subauth; ++i) {
856 cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
857 le32_to_cpu(pace->sid.sub_auth[i]));
858 }
859
860 /* BB add length check to make sure that we do not have huge
861 num auths and therefore go off the end */
862 }
863
864 return;
865 }
866 #endif
867
868
869 static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
870 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
871 struct cifs_fattr *fattr)
872 {
873 int i;
874 int num_aces = 0;
875 int acl_size;
876 char *acl_base;
877 struct cifs_ace **ppace;
878
879 /* BB need to add parm so we can store the SID BB */
880
881 if (!pdacl) {
882 /* no DACL in the security descriptor, set
883 all the permissions for user/group/other */
884 fattr->cf_mode |= S_IRWXUGO;
885 return;
886 }
887
888 /* validate that we do not go past end of acl */
889 if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
890 cERROR(1, "ACL too small to parse DACL");
891 return;
892 }
893
894 cFYI(DBG2, "DACL revision %d size %d num aces %d",
895 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
896 le32_to_cpu(pdacl->num_aces));
897
898 /* reset rwx permissions for user/group/other.
899 Also, if num_aces is 0 i.e. DACL has no ACEs,
900 user/group/other have no permissions */
901 fattr->cf_mode &= ~(S_IRWXUGO);
902
903 acl_base = (char *)pdacl;
904 acl_size = sizeof(struct cifs_acl);
905
906 num_aces = le32_to_cpu(pdacl->num_aces);
907 if (num_aces > 0) {
908 umode_t user_mask = S_IRWXU;
909 umode_t group_mask = S_IRWXG;
910 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
911
912 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
913 GFP_KERNEL);
914 if (!ppace) {
915 cERROR(1, "DACL memory allocation error");
916 return;
917 }
918
919 for (i = 0; i < num_aces; ++i) {
920 ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
921 #ifdef CONFIG_CIFS_DEBUG2
922 dump_ace(ppace[i], end_of_acl);
923 #endif
924 if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
925 access_flags_to_mode(ppace[i]->access_req,
926 ppace[i]->type,
927 &fattr->cf_mode,
928 &user_mask);
929 if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
930 access_flags_to_mode(ppace[i]->access_req,
931 ppace[i]->type,
932 &fattr->cf_mode,
933 &group_mask);
934 if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
935 access_flags_to_mode(ppace[i]->access_req,
936 ppace[i]->type,
937 &fattr->cf_mode,
938 &other_mask);
939 if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
940 access_flags_to_mode(ppace[i]->access_req,
941 ppace[i]->type,
942 &fattr->cf_mode,
943 &other_mask);
944
945
946 /* memcpy((void *)(&(cifscred->aces[i])),
947 (void *)ppace[i],
948 sizeof(struct cifs_ace)); */
949
950 acl_base = (char *)ppace[i];
951 acl_size = le16_to_cpu(ppace[i]->size);
952 }
953
954 kfree(ppace);
955 }
956
957 return;
958 }
959
960
961 static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
962 struct cifs_sid *pgrpsid, __u64 nmode)
963 {
964 u16 size = 0;
965 struct cifs_acl *pnndacl;
966
967 pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
968
969 size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
970 pownersid, nmode, S_IRWXU);
971 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
972 pgrpsid, nmode, S_IRWXG);
973 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
974 &sid_everyone, nmode, S_IRWXO);
975
976 pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
977 pndacl->num_aces = cpu_to_le32(3);
978
979 return 0;
980 }
981
982
983 static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
984 {
985 /* BB need to add parm so we can store the SID BB */
986
987 /* validate that we do not go past end of ACL - sid must be at least 8
988 bytes long (assuming no sub-auths - e.g. the null SID */
989 if (end_of_acl < (char *)psid + 8) {
990 cERROR(1, "ACL too small to parse SID %p", psid);
991 return -EINVAL;
992 }
993
994 if (psid->num_subauth) {
995 #ifdef CONFIG_CIFS_DEBUG2
996 int i;
997 cFYI(1, "SID revision %d num_auth %d",
998 psid->revision, psid->num_subauth);
999
1000 for (i = 0; i < psid->num_subauth; i++) {
1001 cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
1002 le32_to_cpu(psid->sub_auth[i]));
1003 }
1004
1005 /* BB add length check to make sure that we do not have huge
1006 num auths and therefore go off the end */
1007 cFYI(1, "RID 0x%x",
1008 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
1009 #endif
1010 }
1011
1012 return 0;
1013 }
1014
1015
1016 /* Convert CIFS ACL to POSIX form */
1017 static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
1018 struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
1019 {
1020 int rc = 0;
1021 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1022 struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
1023 char *end_of_acl = ((char *)pntsd) + acl_len;
1024 __u32 dacloffset;
1025
1026 if (pntsd == NULL)
1027 return -EIO;
1028
1029 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1030 le32_to_cpu(pntsd->osidoffset));
1031 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1032 le32_to_cpu(pntsd->gsidoffset));
1033 dacloffset = le32_to_cpu(pntsd->dacloffset);
1034 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1035 cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1036 "sacloffset 0x%x dacloffset 0x%x",
1037 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
1038 le32_to_cpu(pntsd->gsidoffset),
1039 le32_to_cpu(pntsd->sacloffset), dacloffset);
1040 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1041 rc = parse_sid(owner_sid_ptr, end_of_acl);
1042 if (rc) {
1043 cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
1044 return rc;
1045 }
1046 rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
1047 if (rc) {
1048 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
1049 return rc;
1050 }
1051
1052 rc = parse_sid(group_sid_ptr, end_of_acl);
1053 if (rc) {
1054 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
1055 return rc;
1056 }
1057 rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
1058 if (rc) {
1059 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
1060 return rc;
1061 }
1062
1063 if (dacloffset)
1064 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
1065 group_sid_ptr, fattr);
1066 else
1067 cFYI(1, "no ACL"); /* BB grant all or default perms? */
1068
1069 return rc;
1070 }
1071
1072 /* Convert permission bits from mode to equivalent CIFS ACL */
1073 static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
1074 __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
1075 {
1076 int rc = 0;
1077 __u32 dacloffset;
1078 __u32 ndacloffset;
1079 __u32 sidsoffset;
1080 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1081 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
1082 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
1083 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
1084
1085 if (nmode != NO_CHANGE_64) { /* chmod */
1086 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1087 le32_to_cpu(pntsd->osidoffset));
1088 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1089 le32_to_cpu(pntsd->gsidoffset));
1090 dacloffset = le32_to_cpu(pntsd->dacloffset);
1091 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1092 ndacloffset = sizeof(struct cifs_ntsd);
1093 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
1094 ndacl_ptr->revision = dacl_ptr->revision;
1095 ndacl_ptr->size = 0;
1096 ndacl_ptr->num_aces = 0;
1097
1098 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
1099 nmode);
1100 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
1101 /* copy sec desc control portion & owner and group sids */
1102 copy_sec_desc(pntsd, pnntsd, sidsoffset);
1103 *aclflag = CIFS_ACL_DACL;
1104 } else {
1105 memcpy(pnntsd, pntsd, secdesclen);
1106 if (uid != NO_CHANGE_32) { /* chown */
1107 owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1108 le32_to_cpu(pnntsd->osidoffset));
1109 nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1110 GFP_KERNEL);
1111 if (!nowner_sid_ptr)
1112 return -ENOMEM;
1113 rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
1114 if (rc) {
1115 cFYI(1, "%s: Mapping error %d for owner id %d",
1116 __func__, rc, uid);
1117 kfree(nowner_sid_ptr);
1118 return rc;
1119 }
1120 memcpy(owner_sid_ptr, nowner_sid_ptr,
1121 sizeof(struct cifs_sid));
1122 kfree(nowner_sid_ptr);
1123 *aclflag = CIFS_ACL_OWNER;
1124 }
1125 if (gid != NO_CHANGE_32) { /* chgrp */
1126 group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1127 le32_to_cpu(pnntsd->gsidoffset));
1128 ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1129 GFP_KERNEL);
1130 if (!ngroup_sid_ptr)
1131 return -ENOMEM;
1132 rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
1133 if (rc) {
1134 cFYI(1, "%s: Mapping error %d for group id %d",
1135 __func__, rc, gid);
1136 kfree(ngroup_sid_ptr);
1137 return rc;
1138 }
1139 memcpy(group_sid_ptr, ngroup_sid_ptr,
1140 sizeof(struct cifs_sid));
1141 kfree(ngroup_sid_ptr);
1142 *aclflag = CIFS_ACL_GROUP;
1143 }
1144 }
1145
1146 return rc;
1147 }
1148
1149 static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
1150 __u16 fid, u32 *pacllen)
1151 {
1152 struct cifs_ntsd *pntsd = NULL;
1153 int xid, rc;
1154 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1155
1156 if (IS_ERR(tlink))
1157 return ERR_CAST(tlink);
1158
1159 xid = GetXid();
1160 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
1161 FreeXid(xid);
1162
1163 cifs_put_tlink(tlink);
1164
1165 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1166 if (rc)
1167 return ERR_PTR(rc);
1168 return pntsd;
1169 }
1170
1171 static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1172 const char *path, u32 *pacllen)
1173 {
1174 struct cifs_ntsd *pntsd = NULL;
1175 int oplock = 0;
1176 int xid, rc, create_options = 0;
1177 __u16 fid;
1178 struct cifs_tcon *tcon;
1179 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1180
1181 if (IS_ERR(tlink))
1182 return ERR_CAST(tlink);
1183
1184 tcon = tlink_tcon(tlink);
1185 xid = GetXid();
1186
1187 if (backup_cred(cifs_sb))
1188 create_options |= CREATE_OPEN_BACKUP_INTENT;
1189
1190 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
1191 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1192 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1193 if (!rc) {
1194 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
1195 CIFSSMBClose(xid, tcon, fid);
1196 }
1197
1198 cifs_put_tlink(tlink);
1199 FreeXid(xid);
1200
1201 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1202 if (rc)
1203 return ERR_PTR(rc);
1204 return pntsd;
1205 }
1206
1207 /* Retrieve an ACL from the server */
1208 struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
1209 struct inode *inode, const char *path,
1210 u32 *pacllen)
1211 {
1212 struct cifs_ntsd *pntsd = NULL;
1213 struct cifsFileInfo *open_file = NULL;
1214
1215 if (inode)
1216 open_file = find_readable_file(CIFS_I(inode), true);
1217 if (!open_file)
1218 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
1219
1220 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->netfid, pacllen);
1221 cifsFileInfo_put(open_file);
1222 return pntsd;
1223 }
1224
1225 /* Set an ACL on the server */
1226 int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1227 struct inode *inode, const char *path, int aclflag)
1228 {
1229 int oplock = 0;
1230 int xid, rc, access_flags, create_options = 0;
1231 __u16 fid;
1232 struct cifs_tcon *tcon;
1233 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1234 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1235
1236 if (IS_ERR(tlink))
1237 return PTR_ERR(tlink);
1238
1239 tcon = tlink_tcon(tlink);
1240 xid = GetXid();
1241
1242 if (backup_cred(cifs_sb))
1243 create_options |= CREATE_OPEN_BACKUP_INTENT;
1244
1245 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1246 access_flags = WRITE_OWNER;
1247 else
1248 access_flags = WRITE_DAC;
1249
1250 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
1251 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1252 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1253 if (rc) {
1254 cERROR(1, "Unable to open file to set ACL");
1255 goto out;
1256 }
1257
1258 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
1259 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1260
1261 CIFSSMBClose(xid, tcon, fid);
1262 out:
1263 FreeXid(xid);
1264 cifs_put_tlink(tlink);
1265 return rc;
1266 }
1267
1268 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1269 int
1270 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1271 struct inode *inode, const char *path, const __u16 *pfid)
1272 {
1273 struct cifs_ntsd *pntsd = NULL;
1274 u32 acllen = 0;
1275 int rc = 0;
1276
1277 cFYI(DBG2, "converting ACL to mode for %s", path);
1278
1279 if (pfid)
1280 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1281 else
1282 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1283
1284 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1285 if (IS_ERR(pntsd)) {
1286 rc = PTR_ERR(pntsd);
1287 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1288 } else {
1289 rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1290 kfree(pntsd);
1291 if (rc)
1292 cERROR(1, "parse sec desc failed rc = %d", rc);
1293 }
1294
1295 return rc;
1296 }
1297
1298 /* Convert mode bits to an ACL so we can update the ACL on the server */
1299 int
1300 id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1301 uid_t uid, gid_t gid)
1302 {
1303 int rc = 0;
1304 int aclflag = CIFS_ACL_DACL; /* default flag to set */
1305 __u32 secdesclen = 0;
1306 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1307 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1308
1309 cFYI(DBG2, "set ACL from mode for %s", path);
1310
1311 /* Get the security descriptor */
1312 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1313
1314 /* Add three ACEs for owner, group, everyone getting rid of
1315 other ACEs as chmod disables ACEs and set the security descriptor */
1316
1317 if (IS_ERR(pntsd)) {
1318 rc = PTR_ERR(pntsd);
1319 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1320 } else {
1321 /* allocate memory for the smb header,
1322 set security descriptor request security descriptor
1323 parameters, and secuirty descriptor itself */
1324
1325 secdesclen = secdesclen < DEFSECDESCLEN ?
1326 DEFSECDESCLEN : secdesclen;
1327 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1328 if (!pnntsd) {
1329 cERROR(1, "Unable to allocate security descriptor");
1330 kfree(pntsd);
1331 return -ENOMEM;
1332 }
1333
1334 rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
1335 &aclflag);
1336
1337 cFYI(DBG2, "build_sec_desc rc: %d", rc);
1338
1339 if (!rc) {
1340 /* Set the security descriptor */
1341 rc = set_cifs_acl(pnntsd, secdesclen, inode,
1342 path, aclflag);
1343 cFYI(DBG2, "set_cifs_acl rc: %d", rc);
1344 }
1345
1346 kfree(pnntsd);
1347 kfree(pntsd);
1348 }
1349
1350 return rc;
1351 }
This page took 0.060816 seconds and 5 git commands to generate.