From 6ade43fbbcc3c12f0ddba112351d14d6c82ae476 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 1 Aug 2005 21:11:45 -0700 Subject: [PATCH] shm: CONFIG_SHMEM=n build fix Fix bug found by Grant Coady 's autobuild setup. shmem_set_policy() and shmem_get_policy() are macros if !CONFIG_SHMEM, so this doesn't work. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- ipc/shm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'ipc') diff --git a/ipc/shm.c b/ipc/shm.c index cce022435dbc..1d6cf08d950b 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -170,7 +170,7 @@ static struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ .nopage = shmem_nopage, -#ifdef CONFIG_NUMA +#if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM) .set_policy = shmem_set_policy, .get_policy = shmem_get_policy, #endif -- cgit v1.2.2 From 00a5dfdb93f74e4d95fb0d83c890728e331f8810 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 5 Aug 2005 23:05:27 +0200 Subject: [PATCH] Fix semundo lock leakage semundo->lock can leak if semundo->refcount goes from 2 to 1 while another thread has it locked. This causes major problems for PREEMPT kernels. The simplest fix for now is to undo the single-thread optimization. This bug was found via relentless testing by Dominik Karall. Signed-off-by: Ingo Molnar Signed-off-by: Linus Torvalds --- ipc/sem.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'ipc') diff --git a/ipc/sem.c b/ipc/sem.c index 7e8a25c82ef3..70975ce0784a 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -895,7 +895,7 @@ static inline void lock_semundo(void) struct sem_undo_list *undo_list; undo_list = current->sysvsem.undo_list; - if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) + if (undo_list) spin_lock(&undo_list->lock); } @@ -915,7 +915,7 @@ static inline void unlock_semundo(void) struct sem_undo_list *undo_list; undo_list = current->sysvsem.undo_list; - if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) + if (undo_list) spin_unlock(&undo_list->lock); } @@ -943,9 +943,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) if (undo_list == NULL) return -ENOMEM; memset(undo_list, 0, size); - /* don't initialize unodhd->lock here. It's done - * in copy_semundo() instead. - */ + spin_lock_init(&undo_list->lock); atomic_set(&undo_list->refcnt, 1); current->sysvsem.undo_list = undo_list; } @@ -1231,8 +1229,6 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) error = get_undo_list(&undo_list); if (error) return error; - if (atomic_read(&undo_list->refcnt) == 1) - spin_lock_init(&undo_list->lock); atomic_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; } else -- cgit v1.2.2