diff options
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/sem.c | 10 | ||||
-rw-r--r-- | ipc/shm.c | 2 |
2 files changed, 4 insertions, 8 deletions
@@ -895,7 +895,7 @@ static inline void lock_semundo(void) | |||
895 | struct sem_undo_list *undo_list; | 895 | struct sem_undo_list *undo_list; |
896 | 896 | ||
897 | undo_list = current->sysvsem.undo_list; | 897 | undo_list = current->sysvsem.undo_list; |
898 | if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) | 898 | if (undo_list) |
899 | spin_lock(&undo_list->lock); | 899 | spin_lock(&undo_list->lock); |
900 | } | 900 | } |
901 | 901 | ||
@@ -915,7 +915,7 @@ static inline void unlock_semundo(void) | |||
915 | struct sem_undo_list *undo_list; | 915 | struct sem_undo_list *undo_list; |
916 | 916 | ||
917 | undo_list = current->sysvsem.undo_list; | 917 | undo_list = current->sysvsem.undo_list; |
918 | if ((undo_list != NULL) && (atomic_read(&undo_list->refcnt) != 1)) | 918 | if (undo_list) |
919 | spin_unlock(&undo_list->lock); | 919 | spin_unlock(&undo_list->lock); |
920 | } | 920 | } |
921 | 921 | ||
@@ -943,9 +943,7 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) | |||
943 | if (undo_list == NULL) | 943 | if (undo_list == NULL) |
944 | return -ENOMEM; | 944 | return -ENOMEM; |
945 | memset(undo_list, 0, size); | 945 | memset(undo_list, 0, size); |
946 | /* don't initialize unodhd->lock here. It's done | 946 | spin_lock_init(&undo_list->lock); |
947 | * in copy_semundo() instead. | ||
948 | */ | ||
949 | atomic_set(&undo_list->refcnt, 1); | 947 | atomic_set(&undo_list->refcnt, 1); |
950 | current->sysvsem.undo_list = undo_list; | 948 | current->sysvsem.undo_list = undo_list; |
951 | } | 949 | } |
@@ -1231,8 +1229,6 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) | |||
1231 | error = get_undo_list(&undo_list); | 1229 | error = get_undo_list(&undo_list); |
1232 | if (error) | 1230 | if (error) |
1233 | return error; | 1231 | return error; |
1234 | if (atomic_read(&undo_list->refcnt) == 1) | ||
1235 | spin_lock_init(&undo_list->lock); | ||
1236 | atomic_inc(&undo_list->refcnt); | 1232 | atomic_inc(&undo_list->refcnt); |
1237 | tsk->sysvsem.undo_list = undo_list; | 1233 | tsk->sysvsem.undo_list = undo_list; |
1238 | } else | 1234 | } else |
@@ -170,7 +170,7 @@ static struct vm_operations_struct shm_vm_ops = { | |||
170 | .open = shm_open, /* callback for a new vm-area open */ | 170 | .open = shm_open, /* callback for a new vm-area open */ |
171 | .close = shm_close, /* callback for when the vm-area is released */ | 171 | .close = shm_close, /* callback for when the vm-area is released */ |
172 | .nopage = shmem_nopage, | 172 | .nopage = shmem_nopage, |
173 | #ifdef CONFIG_NUMA | 173 | #if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM) |
174 | .set_policy = shmem_set_policy, | 174 | .set_policy = shmem_set_policy, |
175 | .get_policy = shmem_get_policy, | 175 | .get_policy = shmem_get_policy, |
176 | #endif | 176 | #endif |