aboutsummaryrefslogtreecommitdiffstats
path: root/security
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2008-07-25 04:47:07 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-25 13:53:37 -0400
commit4efd1a1b2f09a4b746dd9dc057986c6dadcb1317 (patch)
tree048b7c286be2f17efce9b3482d9618cd150ee3f7 /security
parente885dcde75685e09f23cffae1f6d5169c105b8a0 (diff)
devcgroup: relax white-list protection down to RCU
Currently this list is protected with a simple spinlock, even for reading from one. This is OK, but can be better. Actually I want it to be better very much, since after replacing the OpenVZ device permissions engine with the cgroup-based one I noticed, that we set 12 default device permissions for each newly created container (for /dev/null, full, terminals, ect devices), and people sometimes have up to 20 perms more, so traversing the ~30-40 elements list under a spinlock doesn't seem very good. Here's the RCU protection for white-list - dev_whitelist_item-s are added and removed under the devcg->lock, but are looked up in permissions checking under the rcu_read_lock. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Acked-by: Serge Hallyn <serue@us.ibm.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Paul Menage <menage@google.com> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'security')
-rw-r--r--security/device_cgroup.c35
1 files changed, 22 insertions, 13 deletions
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 236fffa9d05e..9da3532726ff 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -41,6 +41,7 @@ struct dev_whitelist_item {
41 short type; 41 short type;
42 short access; 42 short access;
43 struct list_head list; 43 struct list_head list;
44 struct rcu_head rcu;
44}; 45};
45 46
46struct dev_cgroup { 47struct dev_cgroup {
@@ -133,11 +134,19 @@ static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
133 } 134 }
134 135
135 if (whcopy != NULL) 136 if (whcopy != NULL)
136 list_add_tail(&whcopy->list, &dev_cgroup->whitelist); 137 list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist);
137 spin_unlock(&dev_cgroup->lock); 138 spin_unlock(&dev_cgroup->lock);
138 return 0; 139 return 0;
139} 140}
140 141
142static void whitelist_item_free(struct rcu_head *rcu)
143{
144 struct dev_whitelist_item *item;
145
146 item = container_of(rcu, struct dev_whitelist_item, rcu);
147 kfree(item);
148}
149
141/* 150/*
142 * called under cgroup_lock() 151 * called under cgroup_lock()
143 * since the list is visible to other tasks, we need the spinlock also 152 * since the list is visible to other tasks, we need the spinlock also
@@ -161,8 +170,8 @@ static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
161remove: 170remove:
162 walk->access &= ~wh->access; 171 walk->access &= ~wh->access;
163 if (!walk->access) { 172 if (!walk->access) {
164 list_del(&walk->list); 173 list_del_rcu(&walk->list);
165 kfree(walk); 174 call_rcu(&walk->rcu, whitelist_item_free);
166 } 175 }
167 } 176 }
168 spin_unlock(&dev_cgroup->lock); 177 spin_unlock(&dev_cgroup->lock);
@@ -269,15 +278,15 @@ static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
269 struct dev_whitelist_item *wh; 278 struct dev_whitelist_item *wh;
270 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN]; 279 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
271 280
272 spin_lock(&devcgroup->lock); 281 rcu_read_lock();
273 list_for_each_entry(wh, &devcgroup->whitelist, list) { 282 list_for_each_entry_rcu(wh, &devcgroup->whitelist, list) {
274 set_access(acc, wh->access); 283 set_access(acc, wh->access);
275 set_majmin(maj, wh->major); 284 set_majmin(maj, wh->major);
276 set_majmin(min, wh->minor); 285 set_majmin(min, wh->minor);
277 seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type), 286 seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
278 maj, min, acc); 287 maj, min, acc);
279 } 288 }
280 spin_unlock(&devcgroup->lock); 289 rcu_read_unlock();
281 290
282 return 0; 291 return 0;
283} 292}
@@ -510,8 +519,8 @@ int devcgroup_inode_permission(struct inode *inode, int mask)
510 if (!dev_cgroup) 519 if (!dev_cgroup)
511 return 0; 520 return 0;
512 521
513 spin_lock(&dev_cgroup->lock); 522 rcu_read_lock();
514 list_for_each_entry(wh, &dev_cgroup->whitelist, list) { 523 list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
515 if (wh->type & DEV_ALL) 524 if (wh->type & DEV_ALL)
516 goto acc_check; 525 goto acc_check;
517 if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode)) 526 if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
@@ -527,10 +536,10 @@ acc_check:
527 continue; 536 continue;
528 if ((mask & MAY_READ) && !(wh->access & ACC_READ)) 537 if ((mask & MAY_READ) && !(wh->access & ACC_READ))
529 continue; 538 continue;
530 spin_unlock(&dev_cgroup->lock); 539 rcu_read_unlock();
531 return 0; 540 return 0;
532 } 541 }
533 spin_unlock(&dev_cgroup->lock); 542 rcu_read_unlock();
534 543
535 return -EPERM; 544 return -EPERM;
536} 545}
@@ -545,7 +554,7 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
545 if (!dev_cgroup) 554 if (!dev_cgroup)
546 return 0; 555 return 0;
547 556
548 spin_lock(&dev_cgroup->lock); 557 rcu_read_lock();
549 list_for_each_entry(wh, &dev_cgroup->whitelist, list) { 558 list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
550 if (wh->type & DEV_ALL) 559 if (wh->type & DEV_ALL)
551 goto acc_check; 560 goto acc_check;
@@ -560,9 +569,9 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
560acc_check: 569acc_check:
561 if (!(wh->access & ACC_MKNOD)) 570 if (!(wh->access & ACC_MKNOD))
562 continue; 571 continue;
563 spin_unlock(&dev_cgroup->lock); 572 rcu_read_unlock();
564 return 0; 573 return 0;
565 } 574 }
566 spin_unlock(&dev_cgroup->lock); 575 rcu_read_unlock();
567 return -EPERM; 576 return -EPERM;
568} 577}