4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone
= {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers
= {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
43 static const struct cifs_sid sid_user
= {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 static const struct cred
*root_cred
;
48 shrink_idmap_tree(struct rb_root
*root
, int nr_to_scan
, int *nr_rem
,
53 struct cifs_sid_id
*psidid
;
55 node
= rb_first(root
);
59 psidid
= rb_entry(tmp
, struct cifs_sid_id
, rbnode
);
60 if (nr_to_scan
== 0 || *nr_del
== nr_to_scan
)
63 if (time_after(jiffies
, psidid
->time
+ SID_MAP_EXPIRE
)
64 && psidid
->refcount
== 0) {
74 * Run idmap cache shrinker.
77 cifs_idmap_shrinker(struct shrinker
*shrink
, struct shrink_control
*sc
)
79 int nr_to_scan
= sc
->nr_to_scan
;
85 spin_lock(&siduidlock
);
86 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
87 spin_unlock(&siduidlock
);
90 spin_lock(&sidgidlock
);
91 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
92 spin_unlock(&sidgidlock
);
95 spin_lock(&uidsidlock
);
96 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
97 spin_unlock(&uidsidlock
);
100 spin_lock(&gidsidlock
);
101 shrink_idmap_tree(root
, nr_to_scan
, &nr_rem
, &nr_del
);
102 spin_unlock(&gidsidlock
);
108 sid_rb_insert(struct rb_root
*root
, unsigned long cid
,
109 struct cifs_sid_id
**psidid
, char *typestr
)
112 struct rb_node
*node
= root
->rb_node
;
113 struct rb_node
*parent
= NULL
;
114 struct rb_node
**linkto
= &(root
->rb_node
);
115 struct cifs_sid_id
*lsidid
;
118 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
120 if (cid
> lsidid
->id
) {
121 linkto
= &(node
->rb_left
);
122 node
= node
->rb_left
;
124 if (cid
< lsidid
->id
) {
125 linkto
= &(node
->rb_right
);
126 node
= node
->rb_right
;
131 (*psidid
)->time
= jiffies
- (SID_MAP_RETRY
+ 1);
132 (*psidid
)->refcount
= 0;
134 sprintf((*psidid
)->sidstr
, "%s", typestr
);
135 strptr
= (*psidid
)->sidstr
+ strlen((*psidid
)->sidstr
);
136 sprintf(strptr
, "%ld", cid
);
138 clear_bit(SID_ID_PENDING
, &(*psidid
)->state
);
139 clear_bit(SID_ID_MAPPED
, &(*psidid
)->state
);
141 rb_link_node(&(*psidid
)->rbnode
, parent
, linkto
);
142 rb_insert_color(&(*psidid
)->rbnode
, root
);
145 static struct cifs_sid_id
*
146 sid_rb_search(struct rb_root
*root
, unsigned long cid
)
148 struct rb_node
*node
= root
->rb_node
;
149 struct cifs_sid_id
*lsidid
;
152 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
153 if (cid
> lsidid
->id
)
154 node
= node
->rb_left
;
155 else if (cid
< lsidid
->id
)
156 node
= node
->rb_right
;
157 else /* node found */
164 static struct shrinker cifs_shrinker
= {
165 .shrink
= cifs_idmap_shrinker
,
166 .seeks
= DEFAULT_SEEKS
,
170 cifs_idmap_key_instantiate(struct key
*key
, struct key_preparsed_payload
*prep
)
174 payload
= kmalloc(prep
->datalen
, GFP_KERNEL
);
178 memcpy(payload
, prep
->data
, prep
->datalen
);
179 key
->payload
.data
= payload
;
180 key
->datalen
= prep
->datalen
;
185 cifs_idmap_key_destroy(struct key
*key
)
187 kfree(key
->payload
.data
);
190 static struct key_type cifs_idmap_key_type
= {
191 .name
= "cifs.idmap",
192 .instantiate
= cifs_idmap_key_instantiate
,
193 .destroy
= cifs_idmap_key_destroy
,
194 .describe
= user_describe
,
199 sid_to_str(struct cifs_sid
*sidptr
, char *sidstr
)
207 sprintf(strptr
, "S-%hhu", sidptr
->revision
);
208 strptr
= sidstr
+ strlen(sidstr
);
210 for (i
= 0; i
< NUM_AUTHS
; ++i
) {
211 if (sidptr
->authority
[i
]) {
212 sprintf(strptr
, "-%hhu", sidptr
->authority
[i
]);
213 strptr
= sidstr
+ strlen(sidstr
);
217 for (i
= 0; i
< sidptr
->num_subauth
; ++i
) {
218 saval
= le32_to_cpu(sidptr
->sub_auth
[i
]);
219 sprintf(strptr
, "-%u", saval
);
220 strptr
= sidstr
+ strlen(sidstr
);
225 * if the two SIDs (roughly equivalent to a UUID for a user or group) are
226 * the same returns zero, if they do not match returns non-zero.
229 compare_sids(const struct cifs_sid
*ctsid
, const struct cifs_sid
*cwsid
)
232 int num_subauth
, num_sat
, num_saw
;
234 if ((!ctsid
) || (!cwsid
))
237 /* compare the revision */
238 if (ctsid
->revision
!= cwsid
->revision
) {
239 if (ctsid
->revision
> cwsid
->revision
)
245 /* compare all of the six auth values */
246 for (i
= 0; i
< NUM_AUTHS
; ++i
) {
247 if (ctsid
->authority
[i
] != cwsid
->authority
[i
]) {
248 if (ctsid
->authority
[i
] > cwsid
->authority
[i
])
255 /* compare all of the subauth values if any */
256 num_sat
= ctsid
->num_subauth
;
257 num_saw
= cwsid
->num_subauth
;
258 num_subauth
= num_sat
< num_saw
? num_sat
: num_saw
;
260 for (i
= 0; i
< num_subauth
; ++i
) {
261 if (ctsid
->sub_auth
[i
] != cwsid
->sub_auth
[i
]) {
262 if (le32_to_cpu(ctsid
->sub_auth
[i
]) >
263 le32_to_cpu(cwsid
->sub_auth
[i
]))
271 return 0; /* sids compare/match */
275 cifs_copy_sid(struct cifs_sid
*dst
, const struct cifs_sid
*src
)
279 dst
->revision
= src
->revision
;
280 dst
->num_subauth
= min_t(u8
, src
->num_subauth
, SID_MAX_SUB_AUTHORITIES
);
281 for (i
= 0; i
< NUM_AUTHS
; ++i
)
282 dst
->authority
[i
] = src
->authority
[i
];
283 for (i
= 0; i
< dst
->num_subauth
; ++i
)
284 dst
->sub_auth
[i
] = src
->sub_auth
[i
];
288 id_rb_insert(struct rb_root
*root
, struct cifs_sid
*sidptr
,
289 struct cifs_sid_id
**psidid
, char *typestr
)
293 struct rb_node
*node
= root
->rb_node
;
294 struct rb_node
*parent
= NULL
;
295 struct rb_node
**linkto
= &(root
->rb_node
);
296 struct cifs_sid_id
*lsidid
;
299 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
301 rc
= compare_sids(sidptr
, &((lsidid
)->sid
));
303 linkto
= &(node
->rb_left
);
304 node
= node
->rb_left
;
306 linkto
= &(node
->rb_right
);
307 node
= node
->rb_right
;
311 cifs_copy_sid(&(*psidid
)->sid
, sidptr
);
312 (*psidid
)->time
= jiffies
- (SID_MAP_RETRY
+ 1);
313 (*psidid
)->refcount
= 0;
315 sprintf((*psidid
)->sidstr
, "%s", typestr
);
316 strptr
= (*psidid
)->sidstr
+ strlen((*psidid
)->sidstr
);
317 sid_to_str(&(*psidid
)->sid
, strptr
);
319 clear_bit(SID_ID_PENDING
, &(*psidid
)->state
);
320 clear_bit(SID_ID_MAPPED
, &(*psidid
)->state
);
322 rb_link_node(&(*psidid
)->rbnode
, parent
, linkto
);
323 rb_insert_color(&(*psidid
)->rbnode
, root
);
326 static struct cifs_sid_id
*
327 id_rb_search(struct rb_root
*root
, struct cifs_sid
*sidptr
)
330 struct rb_node
*node
= root
->rb_node
;
331 struct cifs_sid_id
*lsidid
;
334 lsidid
= rb_entry(node
, struct cifs_sid_id
, rbnode
);
335 rc
= compare_sids(sidptr
, &((lsidid
)->sid
));
337 node
= node
->rb_left
;
339 node
= node
->rb_right
;
340 } else /* node found */
348 sidid_pending_wait(void *unused
)
351 return signal_pending(current
) ? -ERESTARTSYS
: 0;
355 id_to_sid(unsigned long cid
, uint sidtype
, struct cifs_sid
*ssid
)
359 const struct cred
*saved_cred
;
360 struct cifs_sid
*lsid
;
361 struct cifs_sid_id
*psidid
, *npsidid
;
362 struct rb_root
*cidtree
;
365 if (sidtype
== SIDOWNER
) {
366 cidlock
= &siduidlock
;
368 } else if (sidtype
== SIDGROUP
) {
369 cidlock
= &sidgidlock
;
375 psidid
= sid_rb_search(cidtree
, cid
);
377 if (!psidid
) { /* node does not exist, allocate one & attempt adding */
378 spin_unlock(cidlock
);
379 npsidid
= kzalloc(sizeof(struct cifs_sid_id
), GFP_KERNEL
);
383 npsidid
->sidstr
= kmalloc(SID_STRING_MAX
, GFP_KERNEL
);
384 if (!npsidid
->sidstr
) {
390 psidid
= sid_rb_search(cidtree
, cid
);
391 if (psidid
) { /* node happened to get inserted meanwhile */
393 spin_unlock(cidlock
);
394 kfree(npsidid
->sidstr
);
398 sid_rb_insert(cidtree
, cid
, &psidid
,
399 sidtype
== SIDOWNER
? "oi:" : "gi:");
401 spin_unlock(cidlock
);
405 spin_unlock(cidlock
);
409 * If we are here, it is safe to access psidid and its fields
410 * since a reference was taken earlier while holding the spinlock.
411 * A reference on the node is put without holding the spinlock
412 * and it is OK to do so in this case, shrinker will not erase
413 * this node until all references are put and we do not access
414 * any fields of the node after a reference is put .
416 if (test_bit(SID_ID_MAPPED
, &psidid
->state
)) {
417 cifs_copy_sid(ssid
, &psidid
->sid
);
418 psidid
->time
= jiffies
; /* update ts for accessing */
422 if (time_after(psidid
->time
+ SID_MAP_RETRY
, jiffies
)) {
427 if (!test_and_set_bit(SID_ID_PENDING
, &psidid
->state
)) {
428 saved_cred
= override_creds(root_cred
);
429 sidkey
= request_key(&cifs_idmap_key_type
, psidid
->sidstr
, "");
430 if (IS_ERR(sidkey
)) {
432 cFYI(1, "%s: Can't map and id to a SID", __func__
);
433 } else if (sidkey
->datalen
< CIFS_SID_BASE_SIZE
) {
435 cFYI(1, "%s: Downcall contained malformed key "
436 "(datalen=%hu)", __func__
, sidkey
->datalen
);
438 lsid
= (struct cifs_sid
*)sidkey
->payload
.data
;
439 cifs_copy_sid(&psidid
->sid
, lsid
);
440 cifs_copy_sid(ssid
, &psidid
->sid
);
441 set_bit(SID_ID_MAPPED
, &psidid
->state
);
443 kfree(psidid
->sidstr
);
445 psidid
->time
= jiffies
; /* update ts for accessing */
446 revert_creds(saved_cred
);
447 clear_bit(SID_ID_PENDING
, &psidid
->state
);
448 wake_up_bit(&psidid
->state
, SID_ID_PENDING
);
450 rc
= wait_on_bit(&psidid
->state
, SID_ID_PENDING
,
451 sidid_pending_wait
, TASK_INTERRUPTIBLE
);
453 cFYI(1, "%s: sidid_pending_wait interrupted %d",
458 if (test_bit(SID_ID_MAPPED
, &psidid
->state
))
459 cifs_copy_sid(ssid
, &psidid
->sid
);
469 sid_to_id(struct cifs_sb_info
*cifs_sb
, struct cifs_sid
*psid
,
470 struct cifs_fattr
*fattr
, uint sidtype
)
475 const struct cred
*saved_cred
;
476 struct cifs_sid_id
*psidid
, *npsidid
;
477 struct rb_root
*cidtree
;
480 if (sidtype
== SIDOWNER
) {
481 cid
= cifs_sb
->mnt_uid
; /* default uid, in case upcall fails */
482 cidlock
= &siduidlock
;
484 } else if (sidtype
== SIDGROUP
) {
485 cid
= cifs_sb
->mnt_gid
; /* default gid, in case upcall fails */
486 cidlock
= &sidgidlock
;
492 psidid
= id_rb_search(cidtree
, psid
);
494 if (!psidid
) { /* node does not exist, allocate one & attempt adding */
495 spin_unlock(cidlock
);
496 npsidid
= kzalloc(sizeof(struct cifs_sid_id
), GFP_KERNEL
);
500 npsidid
->sidstr
= kmalloc(SID_STRING_MAX
, GFP_KERNEL
);
501 if (!npsidid
->sidstr
) {
507 psidid
= id_rb_search(cidtree
, psid
);
508 if (psidid
) { /* node happened to get inserted meanwhile */
510 spin_unlock(cidlock
);
511 kfree(npsidid
->sidstr
);
515 id_rb_insert(cidtree
, psid
, &psidid
,
516 sidtype
== SIDOWNER
? "os:" : "gs:");
518 spin_unlock(cidlock
);
522 spin_unlock(cidlock
);
526 * If we are here, it is safe to access psidid and its fields
527 * since a reference was taken earlier while holding the spinlock.
528 * A reference on the node is put without holding the spinlock
529 * and it is OK to do so in this case, shrinker will not erase
530 * this node until all references are put and we do not access
531 * any fields of the node after a reference is put .
533 if (test_bit(SID_ID_MAPPED
, &psidid
->state
)) {
535 psidid
->time
= jiffies
; /* update ts for accessing */
539 if (time_after(psidid
->time
+ SID_MAP_RETRY
, jiffies
))
542 if (!test_and_set_bit(SID_ID_PENDING
, &psidid
->state
)) {
543 saved_cred
= override_creds(root_cred
);
544 idkey
= request_key(&cifs_idmap_key_type
, psidid
->sidstr
, "");
546 cFYI(1, "%s: Can't map SID to an id", __func__
);
548 cid
= *(unsigned long *)idkey
->payload
.value
;
550 set_bit(SID_ID_MAPPED
, &psidid
->state
);
552 kfree(psidid
->sidstr
);
554 revert_creds(saved_cred
);
555 psidid
->time
= jiffies
; /* update ts for accessing */
556 clear_bit(SID_ID_PENDING
, &psidid
->state
);
557 wake_up_bit(&psidid
->state
, SID_ID_PENDING
);
559 rc
= wait_on_bit(&psidid
->state
, SID_ID_PENDING
,
560 sidid_pending_wait
, TASK_INTERRUPTIBLE
);
562 cFYI(1, "%s: sidid_pending_wait interrupted %d",
564 --psidid
->refcount
; /* decremented without spinlock */
567 if (test_bit(SID_ID_MAPPED
, &psidid
->state
))
572 --psidid
->refcount
; /* decremented without spinlock */
573 if (sidtype
== SIDOWNER
)
582 init_cifs_idmap(void)
588 cFYI(1, "Registering the %s key type", cifs_idmap_key_type
.name
);
590 /* create an override credential set with a special thread keyring in
591 * which requests are cached
593 * this is used to prevent malicious redirections from being installed
596 cred
= prepare_kernel_cred(NULL
);
600 keyring
= key_alloc(&key_type_keyring
, ".cifs_idmap", 0, 0, cred
,
601 (KEY_POS_ALL
& ~KEY_POS_SETATTR
) |
602 KEY_USR_VIEW
| KEY_USR_READ
,
603 KEY_ALLOC_NOT_IN_QUOTA
);
604 if (IS_ERR(keyring
)) {
605 ret
= PTR_ERR(keyring
);
606 goto failed_put_cred
;
609 ret
= key_instantiate_and_link(keyring
, NULL
, 0, NULL
, NULL
);
613 ret
= register_key_type(&cifs_idmap_key_type
);
617 /* instruct request_key() to use this special keyring as a cache for
618 * the results it looks up */
619 set_bit(KEY_FLAG_ROOT_CAN_CLEAR
, &keyring
->flags
);
620 cred
->thread_keyring
= keyring
;
621 cred
->jit_keyring
= KEY_REQKEY_DEFL_THREAD_KEYRING
;
624 spin_lock_init(&siduidlock
);
626 spin_lock_init(&sidgidlock
);
629 spin_lock_init(&uidsidlock
);
630 siduidtree
= RB_ROOT
;
631 spin_lock_init(&gidsidlock
);
632 sidgidtree
= RB_ROOT
;
633 register_shrinker(&cifs_shrinker
);
635 cFYI(1, "cifs idmap keyring: %d", key_serial(keyring
));
646 exit_cifs_idmap(void)
648 key_revoke(root_cred
->thread_keyring
);
649 unregister_key_type(&cifs_idmap_key_type
);
651 unregister_shrinker(&cifs_shrinker
);
652 cFYI(1, "Unregistered %s key type", cifs_idmap_key_type
.name
);
656 cifs_destroy_idmaptrees(void)
658 struct rb_root
*root
;
659 struct rb_node
*node
;
662 spin_lock(&siduidlock
);
663 while ((node
= rb_first(root
)))
664 rb_erase(node
, root
);
665 spin_unlock(&siduidlock
);
668 spin_lock(&sidgidlock
);
669 while ((node
= rb_first(root
)))
670 rb_erase(node
, root
);
671 spin_unlock(&sidgidlock
);
674 spin_lock(&uidsidlock
);
675 while ((node
= rb_first(root
)))
676 rb_erase(node
, root
);
677 spin_unlock(&uidsidlock
);
680 spin_lock(&gidsidlock
);
681 while ((node
= rb_first(root
)))
682 rb_erase(node
, root
);
683 spin_unlock(&gidsidlock
);
686 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
687 static void copy_sec_desc(const struct cifs_ntsd
*pntsd
,
688 struct cifs_ntsd
*pnntsd
, __u32 sidsoffset
)
690 struct cifs_sid
*owner_sid_ptr
, *group_sid_ptr
;
691 struct cifs_sid
*nowner_sid_ptr
, *ngroup_sid_ptr
;
693 /* copy security descriptor control portion */
694 pnntsd
->revision
= pntsd
->revision
;
695 pnntsd
->type
= pntsd
->type
;
696 pnntsd
->dacloffset
= cpu_to_le32(sizeof(struct cifs_ntsd
));
697 pnntsd
->sacloffset
= 0;
698 pnntsd
->osidoffset
= cpu_to_le32(sidsoffset
);
699 pnntsd
->gsidoffset
= cpu_to_le32(sidsoffset
+ sizeof(struct cifs_sid
));
702 owner_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
703 le32_to_cpu(pntsd
->osidoffset
));
704 nowner_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+ sidsoffset
);
705 cifs_copy_sid(nowner_sid_ptr
, owner_sid_ptr
);
708 group_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
709 le32_to_cpu(pntsd
->gsidoffset
));
710 ngroup_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+ sidsoffset
+
711 sizeof(struct cifs_sid
));
712 cifs_copy_sid(ngroup_sid_ptr
, group_sid_ptr
);
719 change posix mode to reflect permissions
720 pmode is the existing mode (we only want to overwrite part of this
721 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
723 static void access_flags_to_mode(__le32 ace_flags
, int type
, umode_t
*pmode
,
724 umode_t
*pbits_to_set
)
726 __u32 flags
= le32_to_cpu(ace_flags
);
727 /* the order of ACEs is important. The canonical order is to begin with
728 DENY entries followed by ALLOW, otherwise an allow entry could be
729 encountered first, making the subsequent deny entry like "dead code"
730 which would be superflous since Windows stops when a match is made
731 for the operation you are trying to perform for your user */
733 /* For deny ACEs we change the mask so that subsequent allow access
734 control entries do not turn on the bits we are denying */
735 if (type
== ACCESS_DENIED
) {
736 if (flags
& GENERIC_ALL
)
737 *pbits_to_set
&= ~S_IRWXUGO
;
739 if ((flags
& GENERIC_WRITE
) ||
740 ((flags
& FILE_WRITE_RIGHTS
) == FILE_WRITE_RIGHTS
))
741 *pbits_to_set
&= ~S_IWUGO
;
742 if ((flags
& GENERIC_READ
) ||
743 ((flags
& FILE_READ_RIGHTS
) == FILE_READ_RIGHTS
))
744 *pbits_to_set
&= ~S_IRUGO
;
745 if ((flags
& GENERIC_EXECUTE
) ||
746 ((flags
& FILE_EXEC_RIGHTS
) == FILE_EXEC_RIGHTS
))
747 *pbits_to_set
&= ~S_IXUGO
;
749 } else if (type
!= ACCESS_ALLOWED
) {
750 cERROR(1, "unknown access control type %d", type
);
753 /* else ACCESS_ALLOWED type */
755 if (flags
& GENERIC_ALL
) {
756 *pmode
|= (S_IRWXUGO
& (*pbits_to_set
));
757 cFYI(DBG2
, "all perms");
760 if ((flags
& GENERIC_WRITE
) ||
761 ((flags
& FILE_WRITE_RIGHTS
) == FILE_WRITE_RIGHTS
))
762 *pmode
|= (S_IWUGO
& (*pbits_to_set
));
763 if ((flags
& GENERIC_READ
) ||
764 ((flags
& FILE_READ_RIGHTS
) == FILE_READ_RIGHTS
))
765 *pmode
|= (S_IRUGO
& (*pbits_to_set
));
766 if ((flags
& GENERIC_EXECUTE
) ||
767 ((flags
& FILE_EXEC_RIGHTS
) == FILE_EXEC_RIGHTS
))
768 *pmode
|= (S_IXUGO
& (*pbits_to_set
));
770 cFYI(DBG2
, "access flags 0x%x mode now 0x%x", flags
, *pmode
);
775 Generate access flags to reflect permissions mode is the existing mode.
776 This function is called for every ACE in the DACL whose SID matches
777 with either owner or group or everyone.
780 static void mode_to_access_flags(umode_t mode
, umode_t bits_to_use
,
783 /* reset access mask */
786 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
789 /* check for R/W/X UGO since we do not know whose flags
790 is this but we have cleared all the bits sans RWX for
791 either user or group or other as per bits_to_use */
793 *pace_flags
|= SET_FILE_READ_RIGHTS
;
795 *pace_flags
|= SET_FILE_WRITE_RIGHTS
;
797 *pace_flags
|= SET_FILE_EXEC_RIGHTS
;
799 cFYI(DBG2
, "mode: 0x%x, access flags now 0x%x", mode
, *pace_flags
);
803 static __u16
fill_ace_for_sid(struct cifs_ace
*pntace
,
804 const struct cifs_sid
*psid
, __u64 nmode
, umode_t bits
)
808 __u32 access_req
= 0;
810 pntace
->type
= ACCESS_ALLOWED
;
812 mode_to_access_flags(nmode
, bits
, &access_req
);
814 access_req
= SET_MINIMUM_RIGHTS
;
815 pntace
->access_req
= cpu_to_le32(access_req
);
817 pntace
->sid
.revision
= psid
->revision
;
818 pntace
->sid
.num_subauth
= psid
->num_subauth
;
819 for (i
= 0; i
< NUM_AUTHS
; i
++)
820 pntace
->sid
.authority
[i
] = psid
->authority
[i
];
821 for (i
= 0; i
< psid
->num_subauth
; i
++)
822 pntace
->sid
.sub_auth
[i
] = psid
->sub_auth
[i
];
824 size
= 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid
->num_subauth
* 4);
825 pntace
->size
= cpu_to_le16(size
);
831 #ifdef CONFIG_CIFS_DEBUG2
832 static void dump_ace(struct cifs_ace
*pace
, char *end_of_acl
)
836 /* validate that we do not go past end of acl */
838 if (le16_to_cpu(pace
->size
) < 16) {
839 cERROR(1, "ACE too small %d", le16_to_cpu(pace
->size
));
843 if (end_of_acl
< (char *)pace
+ le16_to_cpu(pace
->size
)) {
844 cERROR(1, "ACL too small to parse ACE");
848 num_subauth
= pace
->sid
.num_subauth
;
851 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
852 pace
->sid
.revision
, pace
->sid
.num_subauth
, pace
->type
,
853 pace
->flags
, le16_to_cpu(pace
->size
));
854 for (i
= 0; i
< num_subauth
; ++i
) {
855 cFYI(1, "ACE sub_auth[%d]: 0x%x", i
,
856 le32_to_cpu(pace
->sid
.sub_auth
[i
]));
859 /* BB add length check to make sure that we do not have huge
860 num auths and therefore go off the end */
868 static void parse_dacl(struct cifs_acl
*pdacl
, char *end_of_acl
,
869 struct cifs_sid
*pownersid
, struct cifs_sid
*pgrpsid
,
870 struct cifs_fattr
*fattr
)
876 struct cifs_ace
**ppace
;
878 /* BB need to add parm so we can store the SID BB */
881 /* no DACL in the security descriptor, set
882 all the permissions for user/group/other */
883 fattr
->cf_mode
|= S_IRWXUGO
;
887 /* validate that we do not go past end of acl */
888 if (end_of_acl
< (char *)pdacl
+ le16_to_cpu(pdacl
->size
)) {
889 cERROR(1, "ACL too small to parse DACL");
893 cFYI(DBG2
, "DACL revision %d size %d num aces %d",
894 le16_to_cpu(pdacl
->revision
), le16_to_cpu(pdacl
->size
),
895 le32_to_cpu(pdacl
->num_aces
));
897 /* reset rwx permissions for user/group/other.
898 Also, if num_aces is 0 i.e. DACL has no ACEs,
899 user/group/other have no permissions */
900 fattr
->cf_mode
&= ~(S_IRWXUGO
);
902 acl_base
= (char *)pdacl
;
903 acl_size
= sizeof(struct cifs_acl
);
905 num_aces
= le32_to_cpu(pdacl
->num_aces
);
907 umode_t user_mask
= S_IRWXU
;
908 umode_t group_mask
= S_IRWXG
;
909 umode_t other_mask
= S_IRWXU
| S_IRWXG
| S_IRWXO
;
911 if (num_aces
> ULONG_MAX
/ sizeof(struct cifs_ace
*))
913 ppace
= kmalloc(num_aces
* sizeof(struct cifs_ace
*),
916 cERROR(1, "DACL memory allocation error");
920 for (i
= 0; i
< num_aces
; ++i
) {
921 ppace
[i
] = (struct cifs_ace
*) (acl_base
+ acl_size
);
922 #ifdef CONFIG_CIFS_DEBUG2
923 dump_ace(ppace
[i
], end_of_acl
);
925 if (compare_sids(&(ppace
[i
]->sid
), pownersid
) == 0)
926 access_flags_to_mode(ppace
[i
]->access_req
,
930 if (compare_sids(&(ppace
[i
]->sid
), pgrpsid
) == 0)
931 access_flags_to_mode(ppace
[i
]->access_req
,
935 if (compare_sids(&(ppace
[i
]->sid
), &sid_everyone
) == 0)
936 access_flags_to_mode(ppace
[i
]->access_req
,
940 if (compare_sids(&(ppace
[i
]->sid
), &sid_authusers
) == 0)
941 access_flags_to_mode(ppace
[i
]->access_req
,
947 /* memcpy((void *)(&(cifscred->aces[i])),
949 sizeof(struct cifs_ace)); */
951 acl_base
= (char *)ppace
[i
];
952 acl_size
= le16_to_cpu(ppace
[i
]->size
);
962 static int set_chmod_dacl(struct cifs_acl
*pndacl
, struct cifs_sid
*pownersid
,
963 struct cifs_sid
*pgrpsid
, __u64 nmode
)
966 struct cifs_acl
*pnndacl
;
968 pnndacl
= (struct cifs_acl
*)((char *)pndacl
+ sizeof(struct cifs_acl
));
970 size
+= fill_ace_for_sid((struct cifs_ace
*) ((char *)pnndacl
+ size
),
971 pownersid
, nmode
, S_IRWXU
);
972 size
+= fill_ace_for_sid((struct cifs_ace
*)((char *)pnndacl
+ size
),
973 pgrpsid
, nmode
, S_IRWXG
);
974 size
+= fill_ace_for_sid((struct cifs_ace
*)((char *)pnndacl
+ size
),
975 &sid_everyone
, nmode
, S_IRWXO
);
977 pndacl
->size
= cpu_to_le16(size
+ sizeof(struct cifs_acl
));
978 pndacl
->num_aces
= cpu_to_le32(3);
984 static int parse_sid(struct cifs_sid
*psid
, char *end_of_acl
)
986 /* BB need to add parm so we can store the SID BB */
988 /* validate that we do not go past end of ACL - sid must be at least 8
989 bytes long (assuming no sub-auths - e.g. the null SID */
990 if (end_of_acl
< (char *)psid
+ 8) {
991 cERROR(1, "ACL too small to parse SID %p", psid
);
995 #ifdef CONFIG_CIFS_DEBUG2
996 if (psid
->num_subauth
) {
998 cFYI(1, "SID revision %d num_auth %d",
999 psid
->revision
, psid
->num_subauth
);
1001 for (i
= 0; i
< psid
->num_subauth
; i
++) {
1002 cFYI(1, "SID sub_auth[%d]: 0x%x ", i
,
1003 le32_to_cpu(psid
->sub_auth
[i
]));
1006 /* BB add length check to make sure that we do not have huge
1007 num auths and therefore go off the end */
1009 le32_to_cpu(psid
->sub_auth
[psid
->num_subauth
-1]));
1017 /* Convert CIFS ACL to POSIX form */
1018 static int parse_sec_desc(struct cifs_sb_info
*cifs_sb
,
1019 struct cifs_ntsd
*pntsd
, int acl_len
, struct cifs_fattr
*fattr
)
1022 struct cifs_sid
*owner_sid_ptr
, *group_sid_ptr
;
1023 struct cifs_acl
*dacl_ptr
; /* no need for SACL ptr */
1024 char *end_of_acl
= ((char *)pntsd
) + acl_len
;
1030 owner_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1031 le32_to_cpu(pntsd
->osidoffset
));
1032 group_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1033 le32_to_cpu(pntsd
->gsidoffset
));
1034 dacloffset
= le32_to_cpu(pntsd
->dacloffset
);
1035 dacl_ptr
= (struct cifs_acl
*)((char *)pntsd
+ dacloffset
);
1036 cFYI(DBG2
, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1037 "sacloffset 0x%x dacloffset 0x%x",
1038 pntsd
->revision
, pntsd
->type
, le32_to_cpu(pntsd
->osidoffset
),
1039 le32_to_cpu(pntsd
->gsidoffset
),
1040 le32_to_cpu(pntsd
->sacloffset
), dacloffset
);
1041 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1042 rc
= parse_sid(owner_sid_ptr
, end_of_acl
);
1044 cFYI(1, "%s: Error %d parsing Owner SID", __func__
, rc
);
1047 rc
= sid_to_id(cifs_sb
, owner_sid_ptr
, fattr
, SIDOWNER
);
1049 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__
, rc
);
1053 rc
= parse_sid(group_sid_ptr
, end_of_acl
);
1055 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__
, rc
);
1058 rc
= sid_to_id(cifs_sb
, group_sid_ptr
, fattr
, SIDGROUP
);
1060 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__
, rc
);
1065 parse_dacl(dacl_ptr
, end_of_acl
, owner_sid_ptr
,
1066 group_sid_ptr
, fattr
);
1068 cFYI(1, "no ACL"); /* BB grant all or default perms? */
1073 /* Convert permission bits from mode to equivalent CIFS ACL */
1074 static int build_sec_desc(struct cifs_ntsd
*pntsd
, struct cifs_ntsd
*pnntsd
,
1075 __u32 secdesclen
, __u64 nmode
, uid_t uid
, gid_t gid
, int *aclflag
)
1081 struct cifs_sid
*owner_sid_ptr
, *group_sid_ptr
;
1082 struct cifs_sid
*nowner_sid_ptr
, *ngroup_sid_ptr
;
1083 struct cifs_acl
*dacl_ptr
= NULL
; /* no need for SACL ptr */
1084 struct cifs_acl
*ndacl_ptr
= NULL
; /* no need for SACL ptr */
1086 if (nmode
!= NO_CHANGE_64
) { /* chmod */
1087 owner_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1088 le32_to_cpu(pntsd
->osidoffset
));
1089 group_sid_ptr
= (struct cifs_sid
*)((char *)pntsd
+
1090 le32_to_cpu(pntsd
->gsidoffset
));
1091 dacloffset
= le32_to_cpu(pntsd
->dacloffset
);
1092 dacl_ptr
= (struct cifs_acl
*)((char *)pntsd
+ dacloffset
);
1093 ndacloffset
= sizeof(struct cifs_ntsd
);
1094 ndacl_ptr
= (struct cifs_acl
*)((char *)pnntsd
+ ndacloffset
);
1095 ndacl_ptr
->revision
= dacl_ptr
->revision
;
1096 ndacl_ptr
->size
= 0;
1097 ndacl_ptr
->num_aces
= 0;
1099 rc
= set_chmod_dacl(ndacl_ptr
, owner_sid_ptr
, group_sid_ptr
,
1101 sidsoffset
= ndacloffset
+ le16_to_cpu(ndacl_ptr
->size
);
1102 /* copy sec desc control portion & owner and group sids */
1103 copy_sec_desc(pntsd
, pnntsd
, sidsoffset
);
1104 *aclflag
= CIFS_ACL_DACL
;
1106 memcpy(pnntsd
, pntsd
, secdesclen
);
1107 if (uid
!= NO_CHANGE_32
) { /* chown */
1108 owner_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+
1109 le32_to_cpu(pnntsd
->osidoffset
));
1110 nowner_sid_ptr
= kmalloc(sizeof(struct cifs_sid
),
1112 if (!nowner_sid_ptr
)
1114 rc
= id_to_sid(uid
, SIDOWNER
, nowner_sid_ptr
);
1116 cFYI(1, "%s: Mapping error %d for owner id %d",
1118 kfree(nowner_sid_ptr
);
1121 cifs_copy_sid(owner_sid_ptr
, nowner_sid_ptr
);
1122 kfree(nowner_sid_ptr
);
1123 *aclflag
= CIFS_ACL_OWNER
;
1125 if (gid
!= NO_CHANGE_32
) { /* chgrp */
1126 group_sid_ptr
= (struct cifs_sid
*)((char *)pnntsd
+
1127 le32_to_cpu(pnntsd
->gsidoffset
));
1128 ngroup_sid_ptr
= kmalloc(sizeof(struct cifs_sid
),
1130 if (!ngroup_sid_ptr
)
1132 rc
= id_to_sid(gid
, SIDGROUP
, ngroup_sid_ptr
);
1134 cFYI(1, "%s: Mapping error %d for group id %d",
1136 kfree(ngroup_sid_ptr
);
1139 cifs_copy_sid(group_sid_ptr
, ngroup_sid_ptr
);
1140 kfree(ngroup_sid_ptr
);
1141 *aclflag
= CIFS_ACL_GROUP
;
1148 static struct cifs_ntsd
*get_cifs_acl_by_fid(struct cifs_sb_info
*cifs_sb
,
1149 __u16 fid
, u32
*pacllen
)
1151 struct cifs_ntsd
*pntsd
= NULL
;
1154 struct tcon_link
*tlink
= cifs_sb_tlink(cifs_sb
);
1157 return ERR_CAST(tlink
);
1160 rc
= CIFSSMBGetCIFSACL(xid
, tlink_tcon(tlink
), fid
, &pntsd
, pacllen
);
1163 cifs_put_tlink(tlink
);
1165 cFYI(1, "%s: rc = %d ACL len %d", __func__
, rc
, *pacllen
);
1171 static struct cifs_ntsd
*get_cifs_acl_by_path(struct cifs_sb_info
*cifs_sb
,
1172 const char *path
, u32
*pacllen
)
1174 struct cifs_ntsd
*pntsd
= NULL
;
1177 int rc
, create_options
= 0;
1179 struct cifs_tcon
*tcon
;
1180 struct tcon_link
*tlink
= cifs_sb_tlink(cifs_sb
);
1183 return ERR_CAST(tlink
);
1185 tcon
= tlink_tcon(tlink
);
1188 if (backup_cred(cifs_sb
))
1189 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
1191 rc
= CIFSSMBOpen(xid
, tcon
, path
, FILE_OPEN
, READ_CONTROL
,
1192 create_options
, &fid
, &oplock
, NULL
, cifs_sb
->local_nls
,
1193 cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR
);
1195 rc
= CIFSSMBGetCIFSACL(xid
, tcon
, fid
, &pntsd
, pacllen
);
1196 CIFSSMBClose(xid
, tcon
, fid
);
1199 cifs_put_tlink(tlink
);
1202 cFYI(1, "%s: rc = %d ACL len %d", __func__
, rc
, *pacllen
);
1208 /* Retrieve an ACL from the server */
1209 struct cifs_ntsd
*get_cifs_acl(struct cifs_sb_info
*cifs_sb
,
1210 struct inode
*inode
, const char *path
,
1213 struct cifs_ntsd
*pntsd
= NULL
;
1214 struct cifsFileInfo
*open_file
= NULL
;
1217 open_file
= find_readable_file(CIFS_I(inode
), true);
1219 return get_cifs_acl_by_path(cifs_sb
, path
, pacllen
);
1221 pntsd
= get_cifs_acl_by_fid(cifs_sb
, open_file
->fid
.netfid
, pacllen
);
1222 cifsFileInfo_put(open_file
);
1226 /* Set an ACL on the server */
1227 int set_cifs_acl(struct cifs_ntsd
*pnntsd
, __u32 acllen
,
1228 struct inode
*inode
, const char *path
, int aclflag
)
1232 int rc
, access_flags
, create_options
= 0;
1234 struct cifs_tcon
*tcon
;
1235 struct cifs_sb_info
*cifs_sb
= CIFS_SB(inode
->i_sb
);
1236 struct tcon_link
*tlink
= cifs_sb_tlink(cifs_sb
);
1239 return PTR_ERR(tlink
);
1241 tcon
= tlink_tcon(tlink
);
1244 if (backup_cred(cifs_sb
))
1245 create_options
|= CREATE_OPEN_BACKUP_INTENT
;
1247 if (aclflag
== CIFS_ACL_OWNER
|| aclflag
== CIFS_ACL_GROUP
)
1248 access_flags
= WRITE_OWNER
;
1250 access_flags
= WRITE_DAC
;
1252 rc
= CIFSSMBOpen(xid
, tcon
, path
, FILE_OPEN
, access_flags
,
1253 create_options
, &fid
, &oplock
, NULL
, cifs_sb
->local_nls
,
1254 cifs_sb
->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR
);
1256 cERROR(1, "Unable to open file to set ACL");
1260 rc
= CIFSSMBSetCIFSACL(xid
, tcon
, fid
, pnntsd
, acllen
, aclflag
);
1261 cFYI(DBG2
, "SetCIFSACL rc = %d", rc
);
1263 CIFSSMBClose(xid
, tcon
, fid
);
1266 cifs_put_tlink(tlink
);
1270 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1272 cifs_acl_to_fattr(struct cifs_sb_info
*cifs_sb
, struct cifs_fattr
*fattr
,
1273 struct inode
*inode
, const char *path
, const __u16
*pfid
)
1275 struct cifs_ntsd
*pntsd
= NULL
;
1279 cFYI(DBG2
, "converting ACL to mode for %s", path
);
1282 pntsd
= get_cifs_acl_by_fid(cifs_sb
, *pfid
, &acllen
);
1284 pntsd
= get_cifs_acl(cifs_sb
, inode
, path
, &acllen
);
1286 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1287 if (IS_ERR(pntsd
)) {
1288 rc
= PTR_ERR(pntsd
);
1289 cERROR(1, "%s: error %d getting sec desc", __func__
, rc
);
1291 rc
= parse_sec_desc(cifs_sb
, pntsd
, acllen
, fattr
);
1294 cERROR(1, "parse sec desc failed rc = %d", rc
);
1300 /* Convert mode bits to an ACL so we can update the ACL on the server */
1302 id_mode_to_cifs_acl(struct inode
*inode
, const char *path
, __u64 nmode
,
1303 uid_t uid
, gid_t gid
)
1306 int aclflag
= CIFS_ACL_DACL
; /* default flag to set */
1307 __u32 secdesclen
= 0;
1308 struct cifs_ntsd
*pntsd
= NULL
; /* acl obtained from server */
1309 struct cifs_ntsd
*pnntsd
= NULL
; /* modified acl to be sent to server */
1311 cFYI(DBG2
, "set ACL from mode for %s", path
);
1313 /* Get the security descriptor */
1314 pntsd
= get_cifs_acl(CIFS_SB(inode
->i_sb
), inode
, path
, &secdesclen
);
1315 if (IS_ERR(pntsd
)) {
1316 rc
= PTR_ERR(pntsd
);
1317 cERROR(1, "%s: error %d getting sec desc", __func__
, rc
);
1322 * Add three ACEs for owner, group, everyone getting rid of other ACEs
1323 * as chmod disables ACEs and set the security descriptor. Allocate
1324 * memory for the smb header, set security descriptor request security
1325 * descriptor parameters, and secuirty descriptor itself
1327 secdesclen
= max_t(u32
, secdesclen
, DEFSECDESCLEN
);
1328 pnntsd
= kmalloc(secdesclen
, GFP_KERNEL
);
1330 cERROR(1, "Unable to allocate security descriptor");
1335 rc
= build_sec_desc(pntsd
, pnntsd
, secdesclen
, nmode
, uid
, gid
,
1338 cFYI(DBG2
, "build_sec_desc rc: %d", rc
);
1341 /* Set the security descriptor */
1342 rc
= set_cifs_acl(pnntsd
, secdesclen
, inode
, path
, aclflag
);
1343 cFYI(DBG2
, "set_cifs_acl rc: %d", rc
);