aboutsummaryrefslogtreecommitdiffstats
path: root/ipc/sem.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <davidlohr@hp.com>2013-09-23 20:04:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-24 12:36:53 -0400
commit53dad6d3a8e5ac1af8bacc6ac2134ae1a8b085f1 (patch)
treefc9349452c9bae7e86dbbbeed99c07bde4bad0c4 /ipc/sem.c
parent4a10c2ac2f368583138b774ca41fac4207911983 (diff)
ipc: fix race with LSMs
Currently, IPC mechanisms do security and auditing related checks under RCU. However, since security modules can free the security structure, for example, through selinux_[sem,msg_queue,shm]_free_security(), we can race if the structure is freed before other tasks are done with it, creating a use-after-free condition. Manfred illustrates this nicely, for instance with shared mem and selinux: -> do_shmat calls rcu_read_lock() -> do_shmat calls shm_object_check(). Checks that the object is still valid - but doesn't acquire any locks. Then it returns. -> do_shmat calls security_shm_shmat (e.g. selinux_shm_shmat) -> selinux_shm_shmat calls ipc_has_perm() -> ipc_has_perm accesses ipc_perms->security shm_close() -> shm_close acquires rw_mutex & shm_lock -> shm_close calls shm_destroy -> shm_destroy calls security_shm_free (e.g. selinux_shm_free_security) -> selinux_shm_free_security calls ipc_free_security(&shp->shm_perm) -> ipc_free_security calls kfree(ipc_perms->security) This patch delays the freeing of the security structures after all RCU readers are done. Furthermore it aligns the security life cycle with that of the rest of IPC - freeing them based on the reference counter. For situations where we need not free security, the current behavior is kept. Linus states: "... the old behavior was suspect for another reason too: having the security blob go away from under a user sounds like it could cause various other problems anyway, so I think the old code was at least _prone_ to bugs even if it didn't have catastrophic behavior." I have tested this patch with IPC testcases from LTP on both my quad-core laptop and on a 64 core NUMA server. In both cases selinux is enabled, and tests pass for both voluntary and forced preemption models. While the mentioned races are theoretical (at least no one as reported them), I wanted to make sure that this new logic doesn't break anything we weren't aware of. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Davidlohr Bueso <davidlohr@hp.com> Acked-by: Manfred Spraul <manfred@colorfullife.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index 69b6a21f3844..19c8b980d1fe 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -243,6 +243,15 @@ static void merge_queues(struct sem_array *sma)
243 } 243 }
244} 244}
245 245
246static void sem_rcu_free(struct rcu_head *head)
247{
248 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249 struct sem_array *sma = ipc_rcu_to_struct(p);
250
251 security_sem_free(sma);
252 ipc_rcu_free(head);
253}
254
246/* 255/*
247 * If the request contains only one semaphore operation, and there are 256 * If the request contains only one semaphore operation, and there are
248 * no complex transactions pending, lock only the semaphore involved. 257 * no complex transactions pending, lock only the semaphore involved.
@@ -374,12 +383,7 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
374static inline void sem_lock_and_putref(struct sem_array *sma) 383static inline void sem_lock_and_putref(struct sem_array *sma)
375{ 384{
376 sem_lock(sma, NULL, -1); 385 sem_lock(sma, NULL, -1);
377 ipc_rcu_putref(sma); 386 ipc_rcu_putref(sma, ipc_rcu_free);
378}
379
380static inline void sem_putref(struct sem_array *sma)
381{
382 ipc_rcu_putref(sma);
383} 387}
384 388
385static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) 389static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -458,14 +462,13 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
458 sma->sem_perm.security = NULL; 462 sma->sem_perm.security = NULL;
459 retval = security_sem_alloc(sma); 463 retval = security_sem_alloc(sma);
460 if (retval) { 464 if (retval) {
461 ipc_rcu_putref(sma); 465 ipc_rcu_putref(sma, ipc_rcu_free);
462 return retval; 466 return retval;
463 } 467 }
464 468
465 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); 469 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
466 if (id < 0) { 470 if (id < 0) {
467 security_sem_free(sma); 471 ipc_rcu_putref(sma, sem_rcu_free);
468 ipc_rcu_putref(sma);
469 return id; 472 return id;
470 } 473 }
471 ns->used_sems += nsems; 474 ns->used_sems += nsems;
@@ -1047,8 +1050,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1047 1050
1048 wake_up_sem_queue_do(&tasks); 1051 wake_up_sem_queue_do(&tasks);
1049 ns->used_sems -= sma->sem_nsems; 1052 ns->used_sems -= sma->sem_nsems;
1050 security_sem_free(sma); 1053 ipc_rcu_putref(sma, sem_rcu_free);
1051 ipc_rcu_putref(sma);
1052} 1054}
1053 1055
1054static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) 1056static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
@@ -1292,7 +1294,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1292 rcu_read_unlock(); 1294 rcu_read_unlock();
1293 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1295 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1294 if(sem_io == NULL) { 1296 if(sem_io == NULL) {
1295 sem_putref(sma); 1297 ipc_rcu_putref(sma, ipc_rcu_free);
1296 return -ENOMEM; 1298 return -ENOMEM;
1297 } 1299 }
1298 1300
@@ -1328,20 +1330,20 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1328 if(nsems > SEMMSL_FAST) { 1330 if(nsems > SEMMSL_FAST) {
1329 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1331 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1330 if(sem_io == NULL) { 1332 if(sem_io == NULL) {
1331 sem_putref(sma); 1333 ipc_rcu_putref(sma, ipc_rcu_free);
1332 return -ENOMEM; 1334 return -ENOMEM;
1333 } 1335 }
1334 } 1336 }
1335 1337
1336 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { 1338 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1337 sem_putref(sma); 1339 ipc_rcu_putref(sma, ipc_rcu_free);
1338 err = -EFAULT; 1340 err = -EFAULT;
1339 goto out_free; 1341 goto out_free;
1340 } 1342 }
1341 1343
1342 for (i = 0; i < nsems; i++) { 1344 for (i = 0; i < nsems; i++) {
1343 if (sem_io[i] > SEMVMX) { 1345 if (sem_io[i] > SEMVMX) {
1344 sem_putref(sma); 1346 ipc_rcu_putref(sma, ipc_rcu_free);
1345 err = -ERANGE; 1347 err = -ERANGE;
1346 goto out_free; 1348 goto out_free;
1347 } 1349 }
@@ -1629,7 +1631,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1629 /* step 2: allocate new undo structure */ 1631 /* step 2: allocate new undo structure */
1630 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); 1632 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1631 if (!new) { 1633 if (!new) {
1632 sem_putref(sma); 1634 ipc_rcu_putref(sma, ipc_rcu_free);
1633 return ERR_PTR(-ENOMEM); 1635 return ERR_PTR(-ENOMEM);
1634 } 1636 }
1635 1637