4 * Copyright (C) International Business Machines Corp., 2007,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Contains the routines for mapping CIFS/NTFS ACLs
9 * This library is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU Lesser General Public License as published
11 * by the Free Software Foundation; either version 2.1 of the License, or
12 * (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public License
20 * along with this library; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/keyctl.h>
28 #include <linux/key-type.h>
29 #include <keys/user-type.h>
33 #include "cifsproto.h"
34 #include "cifs_debug.h"
36 /* security id for everyone/world system group */
37 static const struct cifs_sid sid_everyone = {
38 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
39 /* security id for Authenticated Users system group */
40 static const struct cifs_sid sid_authusers = {
41 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
43 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
45 const struct cred *root_cred;
48 shrink_idmap_tree(struct rb_root *root, int nr_to_scan, int *nr_rem,
53 struct cifs_sid_id *psidid;
55 node = rb_first(root);
59 psidid = rb_entry(tmp, struct cifs_sid_id, rbnode);
60 if (nr_to_scan == 0 || *nr_del == nr_to_scan)
63 if (time_after(jiffies, psidid->time + SID_MAP_EXPIRE)
64 && psidid->refcount == 0) {
74 * Run idmap cache shrinker.
77 cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
79 int nr_to_scan = sc->nr_to_scan;
85 spin_lock(&siduidlock);
86 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
87 spin_unlock(&siduidlock);
90 spin_lock(&sidgidlock);
91 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
92 spin_unlock(&sidgidlock);
95 spin_lock(&uidsidlock);
96 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
97 spin_unlock(&uidsidlock);
100 spin_lock(&gidsidlock);
101 shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
102 spin_unlock(&gidsidlock);
108 sid_rb_insert(struct rb_root *root, unsigned long cid,
109 struct cifs_sid_id **psidid, char *typestr)
112 struct rb_node *node = root->rb_node;
113 struct rb_node *parent = NULL;
114 struct rb_node **linkto = &(root->rb_node);
115 struct cifs_sid_id *lsidid;
118 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
120 if (cid > lsidid->id) {
121 linkto = &(node->rb_left);
122 node = node->rb_left;
124 if (cid < lsidid->id) {
125 linkto = &(node->rb_right);
126 node = node->rb_right;
131 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
132 (*psidid)->refcount = 0;
134 sprintf((*psidid)->sidstr, "%s", typestr);
135 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
136 sprintf(strptr, "%ld", cid);
138 clear_bit(SID_ID_PENDING, &(*psidid)->state);
139 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
141 rb_link_node(&(*psidid)->rbnode, parent, linkto);
142 rb_insert_color(&(*psidid)->rbnode, root);
145 static struct cifs_sid_id *
146 sid_rb_search(struct rb_root *root, unsigned long cid)
148 struct rb_node *node = root->rb_node;
149 struct cifs_sid_id *lsidid;
152 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
153 if (cid > lsidid->id)
154 node = node->rb_left;
155 else if (cid < lsidid->id)
156 node = node->rb_right;
157 else /* node found */
164 static struct shrinker cifs_shrinker = {
165 .shrink = cifs_idmap_shrinker,
166 .seeks = DEFAULT_SEEKS,
170 cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
174 payload = kmalloc(prep->datalen, GFP_KERNEL);
178 memcpy(payload, prep->data, prep->datalen);
179 key->payload.data = payload;
180 key->datalen = prep->datalen;
185 cifs_idmap_key_destroy(struct key *key)
187 kfree(key->payload.data);
190 struct key_type cifs_idmap_key_type = {
191 .name = "cifs.idmap",
192 .instantiate = cifs_idmap_key_instantiate,
193 .destroy = cifs_idmap_key_destroy,
194 .describe = user_describe,
199 sid_to_str(struct cifs_sid *sidptr, char *sidstr)
207 sprintf(strptr, "%s", "S");
208 strptr = sidstr + strlen(sidstr);
210 sprintf(strptr, "-%d", sidptr->revision);
211 strptr = sidstr + strlen(sidstr);
213 for (i = 0; i < NUM_AUTHS; ++i) {
214 if (sidptr->authority[i]) {
215 sprintf(strptr, "-%d", sidptr->authority[i]);
216 strptr = sidstr + strlen(sidstr);
220 for (i = 0; i < sidptr->num_subauth; ++i) {
221 saval = le32_to_cpu(sidptr->sub_auth[i]);
222 sprintf(strptr, "-%ld", saval);
223 strptr = sidstr + strlen(sidstr);
228 * if the two SIDs (roughly equivalent to a UUID for a user or group) are
229 * the same returns zero, if they do not match returns non-zero.
232 compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
235 int num_subauth, num_sat, num_saw;
237 if ((!ctsid) || (!cwsid))
240 /* compare the revision */
241 if (ctsid->revision != cwsid->revision) {
242 if (ctsid->revision > cwsid->revision)
248 /* compare all of the six auth values */
249 for (i = 0; i < NUM_AUTHS; ++i) {
250 if (ctsid->authority[i] != cwsid->authority[i]) {
251 if (ctsid->authority[i] > cwsid->authority[i])
258 /* compare all of the subauth values if any */
259 num_sat = ctsid->num_subauth;
260 num_saw = cwsid->num_subauth;
261 num_subauth = num_sat < num_saw ? num_sat : num_saw;
263 for (i = 0; i < num_subauth; ++i) {
264 if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
265 if (le32_to_cpu(ctsid->sub_auth[i]) >
266 le32_to_cpu(cwsid->sub_auth[i]))
274 return 0; /* sids compare/match */
278 cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
282 dst->revision = src->revision;
283 dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES);
284 for (i = 0; i < NUM_AUTHS; ++i)
285 dst->authority[i] = src->authority[i];
286 for (i = 0; i < dst->num_subauth; ++i)
287 dst->sub_auth[i] = src->sub_auth[i];
291 id_rb_insert(struct rb_root *root, struct cifs_sid *sidptr,
292 struct cifs_sid_id **psidid, char *typestr)
296 struct rb_node *node = root->rb_node;
297 struct rb_node *parent = NULL;
298 struct rb_node **linkto = &(root->rb_node);
299 struct cifs_sid_id *lsidid;
302 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
304 rc = compare_sids(sidptr, &((lsidid)->sid));
306 linkto = &(node->rb_left);
307 node = node->rb_left;
309 linkto = &(node->rb_right);
310 node = node->rb_right;
314 cifs_copy_sid(&(*psidid)->sid, sidptr);
315 (*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
316 (*psidid)->refcount = 0;
318 sprintf((*psidid)->sidstr, "%s", typestr);
319 strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
320 sid_to_str(&(*psidid)->sid, strptr);
322 clear_bit(SID_ID_PENDING, &(*psidid)->state);
323 clear_bit(SID_ID_MAPPED, &(*psidid)->state);
325 rb_link_node(&(*psidid)->rbnode, parent, linkto);
326 rb_insert_color(&(*psidid)->rbnode, root);
329 static struct cifs_sid_id *
330 id_rb_search(struct rb_root *root, struct cifs_sid *sidptr)
333 struct rb_node *node = root->rb_node;
334 struct cifs_sid_id *lsidid;
337 lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
338 rc = compare_sids(sidptr, &((lsidid)->sid));
340 node = node->rb_left;
342 node = node->rb_right;
343 } else /* node found */
351 sidid_pending_wait(void *unused)
354 return signal_pending(current) ? -ERESTARTSYS : 0;
358 id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
362 const struct cred *saved_cred;
363 struct cifs_sid *lsid;
364 struct cifs_sid_id *psidid, *npsidid;
365 struct rb_root *cidtree;
368 if (sidtype == SIDOWNER) {
369 cidlock = &siduidlock;
371 } else if (sidtype == SIDGROUP) {
372 cidlock = &sidgidlock;
378 psidid = sid_rb_search(cidtree, cid);
380 if (!psidid) { /* node does not exist, allocate one & attempt adding */
381 spin_unlock(cidlock);
382 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
386 npsidid->sidstr = kmalloc(SID_STRING_MAX, GFP_KERNEL);
387 if (!npsidid->sidstr) {
393 psidid = sid_rb_search(cidtree, cid);
394 if (psidid) { /* node happened to get inserted meanwhile */
396 spin_unlock(cidlock);
397 kfree(npsidid->sidstr);
401 sid_rb_insert(cidtree, cid, &psidid,
402 sidtype == SIDOWNER ? "oi:" : "gi:");
404 spin_unlock(cidlock);
408 spin_unlock(cidlock);
412 * If we are here, it is safe to access psidid and its fields
413 * since a reference was taken earlier while holding the spinlock.
414 * A reference on the node is put without holding the spinlock
415 * and it is OK to do so in this case, shrinker will not erase
416 * this node until all references are put and we do not access
417 * any fields of the node after a reference is put .
419 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
420 cifs_copy_sid(ssid, &psidid->sid);
421 psidid->time = jiffies; /* update ts for accessing */
425 if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
430 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
431 saved_cred = override_creds(root_cred);
432 sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
433 if (IS_ERR(sidkey)) {
435 cFYI(1, "%s: Can't map and id to a SID", __func__);
436 } else if (sidkey->datalen < CIFS_SID_BASE_SIZE) {
438 cFYI(1, "%s: Downcall contained malformed key "
439 "(datalen=%hu)", __func__, sidkey->datalen);
441 lsid = (struct cifs_sid *)sidkey->payload.data;
442 cifs_copy_sid(&psidid->sid, lsid);
443 cifs_copy_sid(ssid, &psidid->sid);
444 set_bit(SID_ID_MAPPED, &psidid->state);
446 kfree(psidid->sidstr);
448 psidid->time = jiffies; /* update ts for accessing */
449 revert_creds(saved_cred);
450 clear_bit(SID_ID_PENDING, &psidid->state);
451 wake_up_bit(&psidid->state, SID_ID_PENDING);
453 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
454 sidid_pending_wait, TASK_INTERRUPTIBLE);
456 cFYI(1, "%s: sidid_pending_wait interrupted %d",
461 if (test_bit(SID_ID_MAPPED, &psidid->state))
462 cifs_copy_sid(ssid, &psidid->sid);
472 sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
473 struct cifs_fattr *fattr, uint sidtype)
478 const struct cred *saved_cred;
479 struct cifs_sid_id *psidid, *npsidid;
480 struct rb_root *cidtree;
483 if (sidtype == SIDOWNER) {
484 cid = cifs_sb->mnt_uid; /* default uid, in case upcall fails */
485 cidlock = &siduidlock;
487 } else if (sidtype == SIDGROUP) {
488 cid = cifs_sb->mnt_gid; /* default gid, in case upcall fails */
489 cidlock = &sidgidlock;
495 psidid = id_rb_search(cidtree, psid);
497 if (!psidid) { /* node does not exist, allocate one & attempt adding */
498 spin_unlock(cidlock);
499 npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
503 npsidid->sidstr = kmalloc(SID_STRING_MAX, GFP_KERNEL);
504 if (!npsidid->sidstr) {
510 psidid = id_rb_search(cidtree, psid);
511 if (psidid) { /* node happened to get inserted meanwhile */
513 spin_unlock(cidlock);
514 kfree(npsidid->sidstr);
518 id_rb_insert(cidtree, psid, &psidid,
519 sidtype == SIDOWNER ? "os:" : "gs:");
521 spin_unlock(cidlock);
525 spin_unlock(cidlock);
529 * If we are here, it is safe to access psidid and its fields
530 * since a reference was taken earlier while holding the spinlock.
531 * A reference on the node is put without holding the spinlock
532 * and it is OK to do so in this case, shrinker will not erase
533 * this node until all references are put and we do not access
534 * any fields of the node after a reference is put .
536 if (test_bit(SID_ID_MAPPED, &psidid->state)) {
538 psidid->time = jiffies; /* update ts for accessing */
542 if (time_after(psidid->time + SID_MAP_RETRY, jiffies))
545 if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
546 saved_cred = override_creds(root_cred);
547 idkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
549 cFYI(1, "%s: Can't map SID to an id", __func__);
551 cid = *(unsigned long *)idkey->payload.value;
553 set_bit(SID_ID_MAPPED, &psidid->state);
555 kfree(psidid->sidstr);
557 revert_creds(saved_cred);
558 psidid->time = jiffies; /* update ts for accessing */
559 clear_bit(SID_ID_PENDING, &psidid->state);
560 wake_up_bit(&psidid->state, SID_ID_PENDING);
562 rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
563 sidid_pending_wait, TASK_INTERRUPTIBLE);
565 cFYI(1, "%s: sidid_pending_wait interrupted %d",
567 --psidid->refcount; /* decremented without spinlock */
570 if (test_bit(SID_ID_MAPPED, &psidid->state))
575 --psidid->refcount; /* decremented without spinlock */
576 if (sidtype == SIDOWNER)
585 init_cifs_idmap(void)
591 cFYI(1, "Registering the %s key type", cifs_idmap_key_type.name);
593 /* create an override credential set with a special thread keyring in
594 * which requests are cached
596 * this is used to prevent malicious redirections from being installed
599 cred = prepare_kernel_cred(NULL);
603 keyring = key_alloc(&key_type_keyring, ".cifs_idmap", 0, 0, cred,
604 (KEY_POS_ALL & ~KEY_POS_SETATTR) |
605 KEY_USR_VIEW | KEY_USR_READ,
606 KEY_ALLOC_NOT_IN_QUOTA);
607 if (IS_ERR(keyring)) {
608 ret = PTR_ERR(keyring);
609 goto failed_put_cred;
612 ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL);
616 ret = register_key_type(&cifs_idmap_key_type);
620 /* instruct request_key() to use this special keyring as a cache for
621 * the results it looks up */
622 set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
623 cred->thread_keyring = keyring;
624 cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
627 spin_lock_init(&siduidlock);
629 spin_lock_init(&sidgidlock);
632 spin_lock_init(&uidsidlock);
633 siduidtree = RB_ROOT;
634 spin_lock_init(&gidsidlock);
635 sidgidtree = RB_ROOT;
636 register_shrinker(&cifs_shrinker);
638 cFYI(1, "cifs idmap keyring: %d", key_serial(keyring));
649 exit_cifs_idmap(void)
651 key_revoke(root_cred->thread_keyring);
652 unregister_key_type(&cifs_idmap_key_type);
654 unregister_shrinker(&cifs_shrinker);
655 cFYI(1, "Unregistered %s key type", cifs_idmap_key_type.name);
659 cifs_destroy_idmaptrees(void)
661 struct rb_root *root;
662 struct rb_node *node;
665 spin_lock(&siduidlock);
666 while ((node = rb_first(root)))
667 rb_erase(node, root);
668 spin_unlock(&siduidlock);
671 spin_lock(&sidgidlock);
672 while ((node = rb_first(root)))
673 rb_erase(node, root);
674 spin_unlock(&sidgidlock);
677 spin_lock(&uidsidlock);
678 while ((node = rb_first(root)))
679 rb_erase(node, root);
680 spin_unlock(&uidsidlock);
683 spin_lock(&gidsidlock);
684 while ((node = rb_first(root)))
685 rb_erase(node, root);
686 spin_unlock(&gidsidlock);
689 /* copy ntsd, owner sid, and group sid from a security descriptor to another */
690 static void copy_sec_desc(const struct cifs_ntsd *pntsd,
691 struct cifs_ntsd *pnntsd, __u32 sidsoffset)
693 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
694 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
696 /* copy security descriptor control portion */
697 pnntsd->revision = pntsd->revision;
698 pnntsd->type = pntsd->type;
699 pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
700 pnntsd->sacloffset = 0;
701 pnntsd->osidoffset = cpu_to_le32(sidsoffset);
702 pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
705 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
706 le32_to_cpu(pntsd->osidoffset));
707 nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
708 cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
711 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
712 le32_to_cpu(pntsd->gsidoffset));
713 ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
714 sizeof(struct cifs_sid));
715 cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
722 change posix mode to reflect permissions
723 pmode is the existing mode (we only want to overwrite part of this
724 bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
726 static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
727 umode_t *pbits_to_set)
729 __u32 flags = le32_to_cpu(ace_flags);
730 /* the order of ACEs is important. The canonical order is to begin with
731 DENY entries followed by ALLOW, otherwise an allow entry could be
732 encountered first, making the subsequent deny entry like "dead code"
733 which would be superflous since Windows stops when a match is made
734 for the operation you are trying to perform for your user */
736 /* For deny ACEs we change the mask so that subsequent allow access
737 control entries do not turn on the bits we are denying */
738 if (type == ACCESS_DENIED) {
739 if (flags & GENERIC_ALL)
740 *pbits_to_set &= ~S_IRWXUGO;
742 if ((flags & GENERIC_WRITE) ||
743 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
744 *pbits_to_set &= ~S_IWUGO;
745 if ((flags & GENERIC_READ) ||
746 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
747 *pbits_to_set &= ~S_IRUGO;
748 if ((flags & GENERIC_EXECUTE) ||
749 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
750 *pbits_to_set &= ~S_IXUGO;
752 } else if (type != ACCESS_ALLOWED) {
753 cERROR(1, "unknown access control type %d", type);
756 /* else ACCESS_ALLOWED type */
758 if (flags & GENERIC_ALL) {
759 *pmode |= (S_IRWXUGO & (*pbits_to_set));
760 cFYI(DBG2, "all perms");
763 if ((flags & GENERIC_WRITE) ||
764 ((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
765 *pmode |= (S_IWUGO & (*pbits_to_set));
766 if ((flags & GENERIC_READ) ||
767 ((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
768 *pmode |= (S_IRUGO & (*pbits_to_set));
769 if ((flags & GENERIC_EXECUTE) ||
770 ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
771 *pmode |= (S_IXUGO & (*pbits_to_set));
773 cFYI(DBG2, "access flags 0x%x mode now 0x%x", flags, *pmode);
778 Generate access flags to reflect permissions mode is the existing mode.
779 This function is called for every ACE in the DACL whose SID matches
780 with either owner or group or everyone.
783 static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
786 /* reset access mask */
789 /* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
792 /* check for R/W/X UGO since we do not know whose flags
793 is this but we have cleared all the bits sans RWX for
794 either user or group or other as per bits_to_use */
796 *pace_flags |= SET_FILE_READ_RIGHTS;
798 *pace_flags |= SET_FILE_WRITE_RIGHTS;
800 *pace_flags |= SET_FILE_EXEC_RIGHTS;
802 cFYI(DBG2, "mode: 0x%x, access flags now 0x%x", mode, *pace_flags);
806 static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
807 const struct cifs_sid *psid, __u64 nmode, umode_t bits)
811 __u32 access_req = 0;
813 pntace->type = ACCESS_ALLOWED;
815 mode_to_access_flags(nmode, bits, &access_req);
817 access_req = SET_MINIMUM_RIGHTS;
818 pntace->access_req = cpu_to_le32(access_req);
820 pntace->sid.revision = psid->revision;
821 pntace->sid.num_subauth = psid->num_subauth;
822 for (i = 0; i < NUM_AUTHS; i++)
823 pntace->sid.authority[i] = psid->authority[i];
824 for (i = 0; i < psid->num_subauth; i++)
825 pntace->sid.sub_auth[i] = psid->sub_auth[i];
827 size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
828 pntace->size = cpu_to_le16(size);
834 #ifdef CONFIG_CIFS_DEBUG2
835 static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
839 /* validate that we do not go past end of acl */
841 if (le16_to_cpu(pace->size) < 16) {
842 cERROR(1, "ACE too small %d", le16_to_cpu(pace->size));
846 if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
847 cERROR(1, "ACL too small to parse ACE");
851 num_subauth = pace->sid.num_subauth;
854 cFYI(1, "ACE revision %d num_auth %d type %d flags %d size %d",
855 pace->sid.revision, pace->sid.num_subauth, pace->type,
856 pace->flags, le16_to_cpu(pace->size));
857 for (i = 0; i < num_subauth; ++i) {
858 cFYI(1, "ACE sub_auth[%d]: 0x%x", i,
859 le32_to_cpu(pace->sid.sub_auth[i]));
862 /* BB add length check to make sure that we do not have huge
863 num auths and therefore go off the end */
871 static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
872 struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
873 struct cifs_fattr *fattr)
879 struct cifs_ace **ppace;
881 /* BB need to add parm so we can store the SID BB */
884 /* no DACL in the security descriptor, set
885 all the permissions for user/group/other */
886 fattr->cf_mode |= S_IRWXUGO;
890 /* validate that we do not go past end of acl */
891 if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
892 cERROR(1, "ACL too small to parse DACL");
896 cFYI(DBG2, "DACL revision %d size %d num aces %d",
897 le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
898 le32_to_cpu(pdacl->num_aces));
900 /* reset rwx permissions for user/group/other.
901 Also, if num_aces is 0 i.e. DACL has no ACEs,
902 user/group/other have no permissions */
903 fattr->cf_mode &= ~(S_IRWXUGO);
905 acl_base = (char *)pdacl;
906 acl_size = sizeof(struct cifs_acl);
908 num_aces = le32_to_cpu(pdacl->num_aces);
910 umode_t user_mask = S_IRWXU;
911 umode_t group_mask = S_IRWXG;
912 umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
914 if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
916 ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
919 cERROR(1, "DACL memory allocation error");
923 for (i = 0; i < num_aces; ++i) {
924 ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
925 #ifdef CONFIG_CIFS_DEBUG2
926 dump_ace(ppace[i], end_of_acl);
928 if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
929 access_flags_to_mode(ppace[i]->access_req,
933 if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
934 access_flags_to_mode(ppace[i]->access_req,
938 if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
939 access_flags_to_mode(ppace[i]->access_req,
943 if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
944 access_flags_to_mode(ppace[i]->access_req,
950 /* memcpy((void *)(&(cifscred->aces[i])),
952 sizeof(struct cifs_ace)); */
954 acl_base = (char *)ppace[i];
955 acl_size = le16_to_cpu(ppace[i]->size);
965 static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
966 struct cifs_sid *pgrpsid, __u64 nmode)
969 struct cifs_acl *pnndacl;
971 pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
973 size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
974 pownersid, nmode, S_IRWXU);
975 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
976 pgrpsid, nmode, S_IRWXG);
977 size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
978 &sid_everyone, nmode, S_IRWXO);
980 pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
981 pndacl->num_aces = cpu_to_le32(3);
987 static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
989 /* BB need to add parm so we can store the SID BB */
991 /* validate that we do not go past end of ACL - sid must be at least 8
992 bytes long (assuming no sub-auths - e.g. the null SID */
993 if (end_of_acl < (char *)psid + 8) {
994 cERROR(1, "ACL too small to parse SID %p", psid);
998 #ifdef CONFIG_CIFS_DEBUG2
999 if (psid->num_subauth) {
1001 cFYI(1, "SID revision %d num_auth %d",
1002 psid->revision, psid->num_subauth);
1004 for (i = 0; i < psid->num_subauth; i++) {
1005 cFYI(1, "SID sub_auth[%d]: 0x%x ", i,
1006 le32_to_cpu(psid->sub_auth[i]));
1009 /* BB add length check to make sure that we do not have huge
1010 num auths and therefore go off the end */
1012 le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
1020 /* Convert CIFS ACL to POSIX form */
1021 static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
1022 struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
1025 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1026 struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
1027 char *end_of_acl = ((char *)pntsd) + acl_len;
1033 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1034 le32_to_cpu(pntsd->osidoffset));
1035 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1036 le32_to_cpu(pntsd->gsidoffset));
1037 dacloffset = le32_to_cpu(pntsd->dacloffset);
1038 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1039 cFYI(DBG2, "revision %d type 0x%x ooffset 0x%x goffset 0x%x "
1040 "sacloffset 0x%x dacloffset 0x%x",
1041 pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
1042 le32_to_cpu(pntsd->gsidoffset),
1043 le32_to_cpu(pntsd->sacloffset), dacloffset);
1044 /* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
1045 rc = parse_sid(owner_sid_ptr, end_of_acl);
1047 cFYI(1, "%s: Error %d parsing Owner SID", __func__, rc);
1050 rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
1052 cFYI(1, "%s: Error %d mapping Owner SID to uid", __func__, rc);
1056 rc = parse_sid(group_sid_ptr, end_of_acl);
1058 cFYI(1, "%s: Error %d mapping Owner SID to gid", __func__, rc);
1061 rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
1063 cFYI(1, "%s: Error %d mapping Group SID to gid", __func__, rc);
1068 parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
1069 group_sid_ptr, fattr);
1071 cFYI(1, "no ACL"); /* BB grant all or default perms? */
1076 /* Convert permission bits from mode to equivalent CIFS ACL */
1077 static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
1078 __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
1084 struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
1085 struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
1086 struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
1087 struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
1089 if (nmode != NO_CHANGE_64) { /* chmod */
1090 owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1091 le32_to_cpu(pntsd->osidoffset));
1092 group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
1093 le32_to_cpu(pntsd->gsidoffset));
1094 dacloffset = le32_to_cpu(pntsd->dacloffset);
1095 dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
1096 ndacloffset = sizeof(struct cifs_ntsd);
1097 ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
1098 ndacl_ptr->revision = dacl_ptr->revision;
1099 ndacl_ptr->size = 0;
1100 ndacl_ptr->num_aces = 0;
1102 rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
1104 sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
1105 /* copy sec desc control portion & owner and group sids */
1106 copy_sec_desc(pntsd, pnntsd, sidsoffset);
1107 *aclflag = CIFS_ACL_DACL;
1109 memcpy(pnntsd, pntsd, secdesclen);
1110 if (uid != NO_CHANGE_32) { /* chown */
1111 owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1112 le32_to_cpu(pnntsd->osidoffset));
1113 nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1115 if (!nowner_sid_ptr)
1117 rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
1119 cFYI(1, "%s: Mapping error %d for owner id %d",
1121 kfree(nowner_sid_ptr);
1124 cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
1125 kfree(nowner_sid_ptr);
1126 *aclflag = CIFS_ACL_OWNER;
1128 if (gid != NO_CHANGE_32) { /* chgrp */
1129 group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
1130 le32_to_cpu(pnntsd->gsidoffset));
1131 ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
1133 if (!ngroup_sid_ptr)
1135 rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
1137 cFYI(1, "%s: Mapping error %d for group id %d",
1139 kfree(ngroup_sid_ptr);
1142 cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
1143 kfree(ngroup_sid_ptr);
1144 *aclflag = CIFS_ACL_GROUP;
1151 static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
1152 __u16 fid, u32 *pacllen)
1154 struct cifs_ntsd *pntsd = NULL;
1157 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1160 return ERR_CAST(tlink);
1163 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
1166 cifs_put_tlink(tlink);
1168 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1174 static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
1175 const char *path, u32 *pacllen)
1177 struct cifs_ntsd *pntsd = NULL;
1180 int rc, create_options = 0;
1182 struct cifs_tcon *tcon;
1183 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1186 return ERR_CAST(tlink);
1188 tcon = tlink_tcon(tlink);
1191 if (backup_cred(cifs_sb))
1192 create_options |= CREATE_OPEN_BACKUP_INTENT;
1194 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
1195 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1196 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1198 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
1199 CIFSSMBClose(xid, tcon, fid);
1202 cifs_put_tlink(tlink);
1205 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
1211 /* Retrieve an ACL from the server */
1212 struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
1213 struct inode *inode, const char *path,
1216 struct cifs_ntsd *pntsd = NULL;
1217 struct cifsFileInfo *open_file = NULL;
1220 open_file = find_readable_file(CIFS_I(inode), true);
1222 return get_cifs_acl_by_path(cifs_sb, path, pacllen);
1224 pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen);
1225 cifsFileInfo_put(open_file);
1229 /* Set an ACL on the server */
1230 int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
1231 struct inode *inode, const char *path, int aclflag)
1235 int rc, access_flags, create_options = 0;
1237 struct cifs_tcon *tcon;
1238 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
1239 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
1242 return PTR_ERR(tlink);
1244 tcon = tlink_tcon(tlink);
1247 if (backup_cred(cifs_sb))
1248 create_options |= CREATE_OPEN_BACKUP_INTENT;
1250 if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
1251 access_flags = WRITE_OWNER;
1253 access_flags = WRITE_DAC;
1255 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
1256 create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
1257 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
1259 cERROR(1, "Unable to open file to set ACL");
1263 rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
1264 cFYI(DBG2, "SetCIFSACL rc = %d", rc);
1266 CIFSSMBClose(xid, tcon, fid);
1269 cifs_put_tlink(tlink);
1273 /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
1275 cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
1276 struct inode *inode, const char *path, const __u16 *pfid)
1278 struct cifs_ntsd *pntsd = NULL;
1282 cFYI(DBG2, "converting ACL to mode for %s", path);
1285 pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
1287 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
1289 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
1290 if (IS_ERR(pntsd)) {
1291 rc = PTR_ERR(pntsd);
1292 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1294 rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
1297 cERROR(1, "parse sec desc failed rc = %d", rc);
1303 /* Convert mode bits to an ACL so we can update the ACL on the server */
1305 id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
1306 uid_t uid, gid_t gid)
1309 int aclflag = CIFS_ACL_DACL; /* default flag to set */
1310 __u32 secdesclen = 0;
1311 struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
1312 struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
1314 cFYI(DBG2, "set ACL from mode for %s", path);
1316 /* Get the security descriptor */
1317 pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
1318 if (IS_ERR(pntsd)) {
1319 rc = PTR_ERR(pntsd);
1320 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
1325 * Add three ACEs for owner, group, everyone getting rid of other ACEs
1326 * as chmod disables ACEs and set the security descriptor. Allocate
1327 * memory for the smb header, set security descriptor request security
1328 * descriptor parameters, and secuirty descriptor itself
1330 secdesclen = max_t(u32, secdesclen, DEFSECDESCLEN);
1331 pnntsd = kmalloc(secdesclen, GFP_KERNEL);
1333 cERROR(1, "Unable to allocate security descriptor");
1338 rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
1341 cFYI(DBG2, "build_sec_desc rc: %d", rc);
1344 /* Set the security descriptor */
1345 rc = set_cifs_acl(pnntsd, secdesclen, inode, path, aclflag);
1346 cFYI(DBG2, "set_cifs_acl rc: %d", rc);