aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/unicore32/kernel/early_printk.c8
-rw-r--r--arch/x86/include/asm/page_types.h4
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--drivers/firmware/google/Kconfig3
-rw-r--r--fs/notify/fanotify/fanotify_user.c4
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--include/linux/bootmem.h37
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h1
-rw-r--r--include/linux/msg.h2
-rw-r--r--include/linux/shm.h2
-rw-r--r--include/linux/splice.h3
-rw-r--r--init/main.c2
-rw-r--r--ipc/compat.c30
-rw-r--r--ipc/compat_mq.c2
-rw-r--r--ipc/ipc_sysctl.c14
-rw-r--r--ipc/mqueue.c22
-rw-r--r--ipc/msg.c44
-rw-r--r--ipc/sem.c178
-rw-r--r--ipc/shm.c49
-rw-r--r--ipc/util.c290
-rw-r--r--ipc/util.h28
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/softirq.c72
-rw-r--r--lib/dynamic_debug.c14
-rw-r--r--lib/swiotlb.c4
-rw-r--r--mm/memblock.c6
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mm_init.c2
-rw-r--r--mm/vmalloc.c20
-rwxr-xr-xscripts/checkpatch.pl9
33 files changed, 448 insertions, 416 deletions
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1e8b030dbefd..b0df9761de6d 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
731 kernel_data.end = virt_to_phys(_end - 1); 731 kernel_data.end = virt_to_phys(_end - 1);
732 732
733 for_each_memblock(memory, region) { 733 for_each_memblock(memory, region) {
734 res = memblock_virt_alloc(sizeof(*res), 0); 734 res = memblock_virt_alloc_low(sizeof(*res), 0);
735 res->name = "System RAM"; 735 res->name = "System RAM";
736 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); 736 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
737 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; 737 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
diff --git a/arch/unicore32/kernel/early_printk.c b/arch/unicore32/kernel/early_printk.c
index 9be0d5d02a9a..f2f6323c8d64 100644
--- a/arch/unicore32/kernel/early_printk.c
+++ b/arch/unicore32/kernel/early_printk.c
@@ -35,17 +35,11 @@ static struct console early_ocd_console = {
35 35
36static int __init setup_early_printk(char *buf) 36static int __init setup_early_printk(char *buf)
37{ 37{
38 int keep_early;
39
40 if (!buf || early_console) 38 if (!buf || early_console)
41 return 0; 39 return 0;
42 40
43 if (strstr(buf, "keep"))
44 keep_early = 1;
45
46 early_console = &early_ocd_console; 41 early_console = &early_ocd_console;
47 42 if (strstr(buf, "keep"))
48 if (keep_early)
49 early_console->flags &= ~CON_BOOT; 43 early_console->flags &= ~CON_BOOT;
50 else 44 else
51 early_console->flags |= CON_BOOT; 45 early_console->flags |= CON_BOOT;
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 2f59cce3b38a..f97fbe3abb67 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -51,9 +51,9 @@ extern int devmem_is_allowed(unsigned long pagenr);
51extern unsigned long max_low_pfn_mapped; 51extern unsigned long max_low_pfn_mapped;
52extern unsigned long max_pfn_mapped; 52extern unsigned long max_pfn_mapped;
53 53
54static inline phys_addr_t get_max_low_mapped(void) 54static inline phys_addr_t get_max_mapped(void)
55{ 55{
56 return (phys_addr_t)max_low_pfn_mapped << PAGE_SHIFT; 56 return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
57} 57}
58 58
59bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn); 59bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index c9675594d7ca..06853e670354 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p)
1119 1119
1120 setup_real_mode(); 1120 setup_real_mode();
1121 1121
1122 memblock_set_current_limit(get_max_low_mapped()); 1122 memblock_set_current_limit(get_max_mapped());
1123 dma_contiguous_reserve(0); 1123 dma_contiguous_reserve(0);
1124 1124
1125 /* 1125 /*
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 2f21b0bfe653..29c8cdda82a1 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -12,8 +12,7 @@ menu "Google Firmware Drivers"
12 12
13config GOOGLE_SMI 13config GOOGLE_SMI
14 tristate "SMI interface for Google platforms" 14 tristate "SMI interface for Google platforms"
15 depends on ACPI && DMI 15 depends on ACPI && DMI && EFI
16 select EFI
17 select EFI_VARS 16 select EFI_VARS
18 help 17 help
19 Say Y here if you want to enable SMI callbacks for Google 18 Say Y here if you want to enable SMI callbacks for Google
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 57d7c083cb4b..1fd66abe5740 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -886,9 +886,9 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark,
886{ 886{
887 return sys_fanotify_mark(fanotify_fd, flags, 887 return sys_fanotify_mark(fanotify_fd, flags,
888#ifdef __BIG_ENDIAN 888#ifdef __BIG_ENDIAN
889 ((__u64)mask1 << 32) | mask0,
890#else
891 ((__u64)mask0 << 32) | mask1, 889 ((__u64)mask0 << 32) | mask1,
890#else
891 ((__u64)mask1 << 32) | mask0,
892#endif 892#endif
893 dfd, pathname); 893 dfd, pathname);
894} 894}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 4f791f6d27d0..41513a4e98e4 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -948,7 +948,7 @@ leave:
948 ocfs2_free_dir_lookup_result(&orphan_insert); 948 ocfs2_free_dir_lookup_result(&orphan_insert);
949 ocfs2_free_dir_lookup_result(&lookup); 949 ocfs2_free_dir_lookup_result(&lookup);
950 950
951 if (status && (status != -ENOTEMPTY)) 951 if (status && (status != -ENOTEMPTY) && (status != -ENOENT))
952 mlog_errno(status); 952 mlog_errno(status);
953 953
954 return status; 954 return status;
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 2fae55def608..b388223bd4a9 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -175,6 +175,27 @@ static inline void * __init memblock_virt_alloc_nopanic(
175 NUMA_NO_NODE); 175 NUMA_NO_NODE);
176} 176}
177 177
178#ifndef ARCH_LOW_ADDRESS_LIMIT
179#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
180#endif
181
182static inline void * __init memblock_virt_alloc_low(
183 phys_addr_t size, phys_addr_t align)
184{
185 return memblock_virt_alloc_try_nid(size, align,
186 BOOTMEM_LOW_LIMIT,
187 ARCH_LOW_ADDRESS_LIMIT,
188 NUMA_NO_NODE);
189}
190static inline void * __init memblock_virt_alloc_low_nopanic(
191 phys_addr_t size, phys_addr_t align)
192{
193 return memblock_virt_alloc_try_nid_nopanic(size, align,
194 BOOTMEM_LOW_LIMIT,
195 ARCH_LOW_ADDRESS_LIMIT,
196 NUMA_NO_NODE);
197}
198
178static inline void * __init memblock_virt_alloc_from_nopanic( 199static inline void * __init memblock_virt_alloc_from_nopanic(
179 phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) 200 phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
180{ 201{
@@ -238,6 +259,22 @@ static inline void * __init memblock_virt_alloc_nopanic(
238 return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT); 259 return __alloc_bootmem_nopanic(size, align, BOOTMEM_LOW_LIMIT);
239} 260}
240 261
262static inline void * __init memblock_virt_alloc_low(
263 phys_addr_t size, phys_addr_t align)
264{
265 if (!align)
266 align = SMP_CACHE_BYTES;
267 return __alloc_bootmem_low(size, align, BOOTMEM_LOW_LIMIT);
268}
269
270static inline void * __init memblock_virt_alloc_low_nopanic(
271 phys_addr_t size, phys_addr_t align)
272{
273 if (!align)
274 align = SMP_CACHE_BYTES;
275 return __alloc_bootmem_low_nopanic(size, align, BOOTMEM_LOW_LIMIT);
276}
277
241static inline void * __init memblock_virt_alloc_from_nopanic( 278static inline void * __init memblock_virt_alloc_from_nopanic(
242 phys_addr_t size, phys_addr_t align, phys_addr_t min_addr) 279 phys_addr_t size, phys_addr_t align, phys_addr_t min_addr)
243{ 280{
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index db43b58a3355..0053adde0ed9 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -360,7 +360,7 @@ enum
360/* map softirq index to softirq name. update 'softirq_to_name' in 360/* map softirq index to softirq name. update 'softirq_to_name' in
361 * kernel/softirq.c when adding a new softirq. 361 * kernel/softirq.c when adding a new softirq.
362 */ 362 */
363extern char *softirq_to_name[NR_SOFTIRQS]; 363extern const char * const softirq_to_name[NR_SOFTIRQS];
364 364
365/* softirq mask and active fields moved to irq_cpustat_t in 365/* softirq mask and active fields moved to irq_cpustat_t in
366 * asm/hardirq.h to get better cache usage. KAO 366 * asm/hardirq.h to get better cache usage. KAO
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 8d861b2651f7..9d84942ae2e5 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -11,7 +11,7 @@
11struct kern_ipc_perm 11struct kern_ipc_perm
12{ 12{
13 spinlock_t lock; 13 spinlock_t lock;
14 int deleted; 14 bool deleted;
15 int id; 15 int id;
16 key_t key; 16 key_t key;
17 kuid_t uid; 17 kuid_t uid;
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index f6c82de12541..e7831d203737 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -21,7 +21,6 @@ struct user_namespace;
21struct ipc_ids { 21struct ipc_ids {
22 int in_use; 22 int in_use;
23 unsigned short seq; 23 unsigned short seq;
24 unsigned short seq_max;
25 struct rw_semaphore rwsem; 24 struct rw_semaphore rwsem;
26 struct idr ipcs_idr; 25 struct idr ipcs_idr;
27 int next_id; 26 int next_id;
diff --git a/include/linux/msg.h b/include/linux/msg.h
index e21f9d44307f..f3f302f9c197 100644
--- a/include/linux/msg.h
+++ b/include/linux/msg.h
@@ -9,7 +9,7 @@ struct msg_msg {
9 struct list_head m_list; 9 struct list_head m_list;
10 long m_type; 10 long m_type;
11 size_t m_ts; /* message text size */ 11 size_t m_ts; /* message text size */
12 struct msg_msgseg* next; 12 struct msg_msgseg *next;
13 void *security; 13 void *security;
14 /* the actual message follows immediately */ 14 /* the actual message follows immediately */
15}; 15};
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 429c1995d756..1e2cd2e6b540 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -9,7 +9,7 @@
9struct shmid_kernel /* private to the kernel */ 9struct shmid_kernel /* private to the kernel */
10{ 10{
11 struct kern_ipc_perm shm_perm; 11 struct kern_ipc_perm shm_perm;
12 struct file * shm_file; 12 struct file *shm_file;
13 unsigned long shm_nattch; 13 unsigned long shm_nattch;
14 unsigned long shm_segsz; 14 unsigned long shm_segsz;
15 time_t shm_atim; 15 time_t shm_atim;
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 74575cbf2d6f..0e43906d2fda 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -24,7 +24,8 @@
24 * Passed to the actors 24 * Passed to the actors
25 */ 25 */
26struct splice_desc { 26struct splice_desc {
27 unsigned int len, total_len; /* current and remaining length */ 27 size_t total_len; /* remaining length */
28 unsigned int len; /* current length */
28 unsigned int flags; /* splice flags */ 29 unsigned int flags; /* splice flags */
29 /* 30 /*
30 * actor() private data 31 * actor() private data
diff --git a/init/main.c b/init/main.c
index 3f1f4e4b61f7..2fd9cef70ee8 100644
--- a/init/main.c
+++ b/init/main.c
@@ -92,8 +92,6 @@ static int kernel_init(void *);
92 92
93extern void init_IRQ(void); 93extern void init_IRQ(void);
94extern void fork_init(unsigned long); 94extern void fork_init(unsigned long);
95extern void mca_init(void);
96extern void sbus_init(void);
97extern void radix_tree_init(void); 95extern void radix_tree_init(void);
98#ifndef CONFIG_DEBUG_RODATA 96#ifndef CONFIG_DEBUG_RODATA
99static inline void mark_rodata_ro(void) { } 97static inline void mark_rodata_ro(void) { }
diff --git a/ipc/compat.c b/ipc/compat.c
index 892f6585dd60..f486b0096a67 100644
--- a/ipc/compat.c
+++ b/ipc/compat.c
@@ -197,7 +197,7 @@ static inline int __put_compat_ipc_perm(struct ipc64_perm *p,
197static inline int get_compat_semid64_ds(struct semid64_ds *s64, 197static inline int get_compat_semid64_ds(struct semid64_ds *s64,
198 struct compat_semid64_ds __user *up64) 198 struct compat_semid64_ds __user *up64)
199{ 199{
200 if (!access_ok (VERIFY_READ, up64, sizeof(*up64))) 200 if (!access_ok(VERIFY_READ, up64, sizeof(*up64)))
201 return -EFAULT; 201 return -EFAULT;
202 return __get_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); 202 return __get_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm);
203} 203}
@@ -205,7 +205,7 @@ static inline int get_compat_semid64_ds(struct semid64_ds *s64,
205static inline int get_compat_semid_ds(struct semid64_ds *s, 205static inline int get_compat_semid_ds(struct semid64_ds *s,
206 struct compat_semid_ds __user *up) 206 struct compat_semid_ds __user *up)
207{ 207{
208 if (!access_ok (VERIFY_READ, up, sizeof(*up))) 208 if (!access_ok(VERIFY_READ, up, sizeof(*up)))
209 return -EFAULT; 209 return -EFAULT;
210 return __get_compat_ipc_perm(&s->sem_perm, &up->sem_perm); 210 return __get_compat_ipc_perm(&s->sem_perm, &up->sem_perm);
211} 211}
@@ -215,7 +215,7 @@ static inline int put_compat_semid64_ds(struct semid64_ds *s64,
215{ 215{
216 int err; 216 int err;
217 217
218 if (!access_ok (VERIFY_WRITE, up64, sizeof(*up64))) 218 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64)))
219 return -EFAULT; 219 return -EFAULT;
220 err = __put_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); 220 err = __put_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm);
221 err |= __put_user(s64->sem_otime, &up64->sem_otime); 221 err |= __put_user(s64->sem_otime, &up64->sem_otime);
@@ -229,7 +229,7 @@ static inline int put_compat_semid_ds(struct semid64_ds *s,
229{ 229{
230 int err; 230 int err;
231 231
232 if (!access_ok (VERIFY_WRITE, up, sizeof(*up))) 232 if (!access_ok(VERIFY_WRITE, up, sizeof(*up)))
233 return -EFAULT; 233 return -EFAULT;
234 err = __put_compat_ipc_perm(&s->sem_perm, &up->sem_perm); 234 err = __put_compat_ipc_perm(&s->sem_perm, &up->sem_perm);
235 err |= __put_user(s->sem_otime, &up->sem_otime); 235 err |= __put_user(s->sem_otime, &up->sem_otime);
@@ -288,11 +288,11 @@ static long do_compat_semctl(int first, int second, int third, u32 pad)
288 break; 288 break;
289 289
290 case IPC_SET: 290 case IPC_SET:
291 if (version == IPC_64) { 291 if (version == IPC_64)
292 err = get_compat_semid64_ds(&s64, compat_ptr(pad)); 292 err = get_compat_semid64_ds(&s64, compat_ptr(pad));
293 } else { 293 else
294 err = get_compat_semid_ds(&s64, compat_ptr(pad)); 294 err = get_compat_semid_ds(&s64, compat_ptr(pad));
295 } 295
296 up64 = compat_alloc_user_space(sizeof(s64)); 296 up64 = compat_alloc_user_space(sizeof(s64));
297 if (copy_to_user(up64, &s64, sizeof(s64))) 297 if (copy_to_user(up64, &s64, sizeof(s64)))
298 err = -EFAULT; 298 err = -EFAULT;
@@ -376,12 +376,12 @@ COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
376 struct compat_ipc_kludge ipck; 376 struct compat_ipc_kludge ipck;
377 if (!uptr) 377 if (!uptr)
378 return -EINVAL; 378 return -EINVAL;
379 if (copy_from_user (&ipck, uptr, sizeof(ipck))) 379 if (copy_from_user(&ipck, uptr, sizeof(ipck)))
380 return -EFAULT; 380 return -EFAULT;
381 uptr = compat_ptr(ipck.msgp); 381 uptr = compat_ptr(ipck.msgp);
382 fifth = ipck.msgtyp; 382 fifth = ipck.msgtyp;
383 } 383 }
384 return do_msgrcv(first, uptr, second, fifth, third, 384 return do_msgrcv(first, uptr, second, (s32)fifth, third,
385 compat_do_msg_fill); 385 compat_do_msg_fill);
386 } 386 }
387 case MSGGET: 387 case MSGGET:
@@ -515,11 +515,11 @@ long compat_sys_msgctl(int first, int second, void __user *uptr)
515 break; 515 break;
516 516
517 case IPC_SET: 517 case IPC_SET:
518 if (version == IPC_64) { 518 if (version == IPC_64)
519 err = get_compat_msqid64(&m64, uptr); 519 err = get_compat_msqid64(&m64, uptr);
520 } else { 520 else
521 err = get_compat_msqid(&m64, uptr); 521 err = get_compat_msqid(&m64, uptr);
522 } 522
523 if (err) 523 if (err)
524 break; 524 break;
525 p = compat_alloc_user_space(sizeof(m64)); 525 p = compat_alloc_user_space(sizeof(m64));
@@ -702,11 +702,11 @@ long compat_sys_shmctl(int first, int second, void __user *uptr)
702 702
703 703
704 case IPC_SET: 704 case IPC_SET:
705 if (version == IPC_64) { 705 if (version == IPC_64)
706 err = get_compat_shmid64_ds(&s64, uptr); 706 err = get_compat_shmid64_ds(&s64, uptr);
707 } else { 707 else
708 err = get_compat_shmid_ds(&s64, uptr); 708 err = get_compat_shmid_ds(&s64, uptr);
709 } 709
710 if (err) 710 if (err)
711 break; 711 break;
712 p = compat_alloc_user_space(sizeof(s64)); 712 p = compat_alloc_user_space(sizeof(s64));
diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c
index 380ea4fe08e7..63d7c6de335b 100644
--- a/ipc/compat_mq.c
+++ b/ipc/compat_mq.c
@@ -64,7 +64,7 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
64 return sys_mq_open(u_name, oflag, mode, p); 64 return sys_mq_open(u_name, oflag, mode, p);
65} 65}
66 66
67static int compat_prepare_timeout(struct timespec __user * *p, 67static int compat_prepare_timeout(struct timespec __user **p,
68 const struct compat_timespec __user *u) 68 const struct compat_timespec __user *u)
69{ 69{
70 struct timespec ts; 70 struct timespec ts;
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
index b0e99deb6d05..17028648cfeb 100644
--- a/ipc/ipc_sysctl.c
+++ b/ipc/ipc_sysctl.c
@@ -164,21 +164,21 @@ static struct ctl_table ipc_kern_table[] = {
164 { 164 {
165 .procname = "shmmax", 165 .procname = "shmmax",
166 .data = &init_ipc_ns.shm_ctlmax, 166 .data = &init_ipc_ns.shm_ctlmax,
167 .maxlen = sizeof (init_ipc_ns.shm_ctlmax), 167 .maxlen = sizeof(init_ipc_ns.shm_ctlmax),
168 .mode = 0644, 168 .mode = 0644,
169 .proc_handler = proc_ipc_doulongvec_minmax, 169 .proc_handler = proc_ipc_doulongvec_minmax,
170 }, 170 },
171 { 171 {
172 .procname = "shmall", 172 .procname = "shmall",
173 .data = &init_ipc_ns.shm_ctlall, 173 .data = &init_ipc_ns.shm_ctlall,
174 .maxlen = sizeof (init_ipc_ns.shm_ctlall), 174 .maxlen = sizeof(init_ipc_ns.shm_ctlall),
175 .mode = 0644, 175 .mode = 0644,
176 .proc_handler = proc_ipc_doulongvec_minmax, 176 .proc_handler = proc_ipc_doulongvec_minmax,
177 }, 177 },
178 { 178 {
179 .procname = "shmmni", 179 .procname = "shmmni",
180 .data = &init_ipc_ns.shm_ctlmni, 180 .data = &init_ipc_ns.shm_ctlmni,
181 .maxlen = sizeof (init_ipc_ns.shm_ctlmni), 181 .maxlen = sizeof(init_ipc_ns.shm_ctlmni),
182 .mode = 0644, 182 .mode = 0644,
183 .proc_handler = proc_ipc_dointvec, 183 .proc_handler = proc_ipc_dointvec,
184 }, 184 },
@@ -194,7 +194,7 @@ static struct ctl_table ipc_kern_table[] = {
194 { 194 {
195 .procname = "msgmax", 195 .procname = "msgmax",
196 .data = &init_ipc_ns.msg_ctlmax, 196 .data = &init_ipc_ns.msg_ctlmax,
197 .maxlen = sizeof (init_ipc_ns.msg_ctlmax), 197 .maxlen = sizeof(init_ipc_ns.msg_ctlmax),
198 .mode = 0644, 198 .mode = 0644,
199 .proc_handler = proc_ipc_dointvec_minmax, 199 .proc_handler = proc_ipc_dointvec_minmax,
200 .extra1 = &zero, 200 .extra1 = &zero,
@@ -203,7 +203,7 @@ static struct ctl_table ipc_kern_table[] = {
203 { 203 {
204 .procname = "msgmni", 204 .procname = "msgmni",
205 .data = &init_ipc_ns.msg_ctlmni, 205 .data = &init_ipc_ns.msg_ctlmni,
206 .maxlen = sizeof (init_ipc_ns.msg_ctlmni), 206 .maxlen = sizeof(init_ipc_ns.msg_ctlmni),
207 .mode = 0644, 207 .mode = 0644,
208 .proc_handler = proc_ipc_callback_dointvec_minmax, 208 .proc_handler = proc_ipc_callback_dointvec_minmax,
209 .extra1 = &zero, 209 .extra1 = &zero,
@@ -212,7 +212,7 @@ static struct ctl_table ipc_kern_table[] = {
212 { 212 {
213 .procname = "msgmnb", 213 .procname = "msgmnb",
214 .data = &init_ipc_ns.msg_ctlmnb, 214 .data = &init_ipc_ns.msg_ctlmnb,
215 .maxlen = sizeof (init_ipc_ns.msg_ctlmnb), 215 .maxlen = sizeof(init_ipc_ns.msg_ctlmnb),
216 .mode = 0644, 216 .mode = 0644,
217 .proc_handler = proc_ipc_dointvec_minmax, 217 .proc_handler = proc_ipc_dointvec_minmax,
218 .extra1 = &zero, 218 .extra1 = &zero,
@@ -221,7 +221,7 @@ static struct ctl_table ipc_kern_table[] = {
221 { 221 {
222 .procname = "sem", 222 .procname = "sem",
223 .data = &init_ipc_ns.sem_ctls, 223 .data = &init_ipc_ns.sem_ctls,
224 .maxlen = 4*sizeof (int), 224 .maxlen = 4*sizeof(int),
225 .mode = 0644, 225 .mode = 0644,
226 .proc_handler = proc_ipc_dointvec, 226 .proc_handler = proc_ipc_dointvec,
227 }, 227 },
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 95827ce2f3c7..ccf1f9fd263a 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) 7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify: 8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com) 9 * Manfred Spraul (manfred@colorfullife.com)
10 * 10 *
11 * Audit: George Wilson (ltcgcw@us.ibm.com) 11 * Audit: George Wilson (ltcgcw@us.ibm.com)
12 * 12 *
@@ -73,7 +73,7 @@ struct mqueue_inode_info {
73 struct mq_attr attr; 73 struct mq_attr attr;
74 74
75 struct sigevent notify; 75 struct sigevent notify;
76 struct pid* notify_owner; 76 struct pid *notify_owner;
77 struct user_namespace *notify_user_ns; 77 struct user_namespace *notify_user_ns;
78 struct user_struct *user; /* user who created, for accounting */ 78 struct user_struct *user; /* user who created, for accounting */
79 struct sock *notify_sock; 79 struct sock *notify_sock;
@@ -92,7 +92,7 @@ static void remove_notification(struct mqueue_inode_info *info);
92 92
93static struct kmem_cache *mqueue_inode_cachep; 93static struct kmem_cache *mqueue_inode_cachep;
94 94
95static struct ctl_table_header * mq_sysctl_table; 95static struct ctl_table_header *mq_sysctl_table;
96 96
97static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) 97static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
98{ 98{
@@ -466,13 +466,13 @@ out_unlock:
466 466
467static int mqueue_unlink(struct inode *dir, struct dentry *dentry) 467static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
468{ 468{
469 struct inode *inode = dentry->d_inode; 469 struct inode *inode = dentry->d_inode;
470 470
471 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; 471 dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
472 dir->i_size -= DIRENT_SIZE; 472 dir->i_size -= DIRENT_SIZE;
473 drop_nlink(inode); 473 drop_nlink(inode);
474 dput(dentry); 474 dput(dentry);
475 return 0; 475 return 0;
476} 476}
477 477
478/* 478/*
@@ -622,7 +622,7 @@ static struct ext_wait_queue *wq_get_first_waiter(
622 622
623static inline void set_cookie(struct sk_buff *skb, char code) 623static inline void set_cookie(struct sk_buff *skb, char code)
624{ 624{
625 ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; 625 ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
626} 626}
627 627
628/* 628/*
@@ -1303,11 +1303,11 @@ retry:
1303out_fput: 1303out_fput:
1304 fdput(f); 1304 fdput(f);
1305out: 1305out:
1306 if (sock) { 1306 if (sock)
1307 netlink_detachskb(sock, nc); 1307 netlink_detachskb(sock, nc);
1308 } else if (nc) { 1308 else if (nc)
1309 dev_kfree_skb(nc); 1309 dev_kfree_skb(nc);
1310 } 1310
1311 return ret; 1311 return ret;
1312} 1312}
1313 1313
diff --git a/ipc/msg.c b/ipc/msg.c
index 558aa91186b6..245db1140ad6 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -253,8 +253,14 @@ static void expunge_all(struct msg_queue *msq, int res)
253 struct msg_receiver *msr, *t; 253 struct msg_receiver *msr, *t;
254 254
255 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { 255 list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
256 msr->r_msg = NULL; 256 msr->r_msg = NULL; /* initialize expunge ordering */
257 wake_up_process(msr->r_tsk); 257 wake_up_process(msr->r_tsk);
258 /*
259 * Ensure that the wakeup is visible before setting r_msg as
260 * the receiving end depends on it: either spinning on a nil,
261 * or dealing with -EAGAIN cases. See lockless receive part 1
262 * and 2 in do_msgrcv().
263 */
258 smp_mb(); 264 smp_mb();
259 msr->r_msg = ERR_PTR(res); 265 msr->r_msg = ERR_PTR(res);
260 } 266 }
@@ -318,7 +324,7 @@ SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
318static inline unsigned long 324static inline unsigned long
319copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) 325copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
320{ 326{
321 switch(version) { 327 switch (version) {
322 case IPC_64: 328 case IPC_64:
323 return copy_to_user(buf, in, sizeof(*in)); 329 return copy_to_user(buf, in, sizeof(*in));
324 case IPC_OLD: 330 case IPC_OLD:
@@ -363,7 +369,7 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
363static inline unsigned long 369static inline unsigned long
364copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) 370copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
365{ 371{
366 switch(version) { 372 switch (version) {
367 case IPC_64: 373 case IPC_64:
368 if (copy_from_user(out, buf, sizeof(*out))) 374 if (copy_from_user(out, buf, sizeof(*out)))
369 return -EFAULT; 375 return -EFAULT;
@@ -375,9 +381,9 @@ copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
375 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 381 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
376 return -EFAULT; 382 return -EFAULT;
377 383
378 out->msg_perm.uid = tbuf_old.msg_perm.uid; 384 out->msg_perm.uid = tbuf_old.msg_perm.uid;
379 out->msg_perm.gid = tbuf_old.msg_perm.gid; 385 out->msg_perm.gid = tbuf_old.msg_perm.gid;
380 out->msg_perm.mode = tbuf_old.msg_perm.mode; 386 out->msg_perm.mode = tbuf_old.msg_perm.mode;
381 387
382 if (tbuf_old.msg_qbytes == 0) 388 if (tbuf_old.msg_qbytes == 0)
383 out->msg_qbytes = tbuf_old.msg_lqbytes; 389 out->msg_qbytes = tbuf_old.msg_lqbytes;
@@ -606,13 +612,13 @@ SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
606 612
607static int testmsg(struct msg_msg *msg, long type, int mode) 613static int testmsg(struct msg_msg *msg, long type, int mode)
608{ 614{
609 switch(mode) 615 switch (mode)
610 { 616 {
611 case SEARCH_ANY: 617 case SEARCH_ANY:
612 case SEARCH_NUMBER: 618 case SEARCH_NUMBER:
613 return 1; 619 return 1;
614 case SEARCH_LESSEQUAL: 620 case SEARCH_LESSEQUAL:
615 if (msg->m_type <=type) 621 if (msg->m_type <= type)
616 return 1; 622 return 1;
617 break; 623 break;
618 case SEARCH_EQUAL: 624 case SEARCH_EQUAL:
@@ -638,15 +644,22 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
638 644
639 list_del(&msr->r_list); 645 list_del(&msr->r_list);
640 if (msr->r_maxsize < msg->m_ts) { 646 if (msr->r_maxsize < msg->m_ts) {
647 /* initialize pipelined send ordering */
641 msr->r_msg = NULL; 648 msr->r_msg = NULL;
642 wake_up_process(msr->r_tsk); 649 wake_up_process(msr->r_tsk);
643 smp_mb(); 650 smp_mb(); /* see barrier comment below */
644 msr->r_msg = ERR_PTR(-E2BIG); 651 msr->r_msg = ERR_PTR(-E2BIG);
645 } else { 652 } else {
646 msr->r_msg = NULL; 653 msr->r_msg = NULL;
647 msq->q_lrpid = task_pid_vnr(msr->r_tsk); 654 msq->q_lrpid = task_pid_vnr(msr->r_tsk);
648 msq->q_rtime = get_seconds(); 655 msq->q_rtime = get_seconds();
649 wake_up_process(msr->r_tsk); 656 wake_up_process(msr->r_tsk);
657 /*
658 * Ensure that the wakeup is visible before
659 * setting r_msg, as the receiving end depends
660 * on it. See lockless receive part 1 and 2 in
661 * do_msgrcv().
662 */
650 smp_mb(); 663 smp_mb();
651 msr->r_msg = msg; 664 msr->r_msg = msg;
652 665
@@ -654,6 +667,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
654 } 667 }
655 } 668 }
656 } 669 }
670
657 return 0; 671 return 0;
658} 672}
659 673
@@ -696,7 +710,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
696 goto out_unlock0; 710 goto out_unlock0;
697 711
698 /* raced with RMID? */ 712 /* raced with RMID? */
699 if (msq->q_perm.deleted) { 713 if (!ipc_valid_object(&msq->q_perm)) {
700 err = -EIDRM; 714 err = -EIDRM;
701 goto out_unlock0; 715 goto out_unlock0;
702 } 716 }
@@ -716,6 +730,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
716 goto out_unlock0; 730 goto out_unlock0;
717 } 731 }
718 732
733 /* enqueue the sender and prepare to block */
719 ss_add(msq, &s); 734 ss_add(msq, &s);
720 735
721 if (!ipc_rcu_getref(msq)) { 736 if (!ipc_rcu_getref(msq)) {
@@ -731,7 +746,8 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
731 ipc_lock_object(&msq->q_perm); 746 ipc_lock_object(&msq->q_perm);
732 747
733 ipc_rcu_putref(msq, ipc_rcu_free); 748 ipc_rcu_putref(msq, ipc_rcu_free);
734 if (msq->q_perm.deleted) { 749 /* raced with RMID? */
750 if (!ipc_valid_object(&msq->q_perm)) {
735 err = -EIDRM; 751 err = -EIDRM;
736 goto out_unlock0; 752 goto out_unlock0;
737 } 753 }
@@ -909,7 +925,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
909 ipc_lock_object(&msq->q_perm); 925 ipc_lock_object(&msq->q_perm);
910 926
911 /* raced with RMID? */ 927 /* raced with RMID? */
912 if (msq->q_perm.deleted) { 928 if (!ipc_valid_object(&msq->q_perm)) {
913 msg = ERR_PTR(-EIDRM); 929 msg = ERR_PTR(-EIDRM);
914 goto out_unlock0; 930 goto out_unlock0;
915 } 931 }
@@ -983,7 +999,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
983 * wake_up_process(). There is a race with exit(), see 999 * wake_up_process(). There is a race with exit(), see
984 * ipc/mqueue.c for the details. 1000 * ipc/mqueue.c for the details.
985 */ 1001 */
986 msg = (struct msg_msg*)msr_d.r_msg; 1002 msg = (struct msg_msg *)msr_d.r_msg;
987 while (msg == NULL) { 1003 while (msg == NULL) {
988 cpu_relax(); 1004 cpu_relax();
989 msg = (struct msg_msg *)msr_d.r_msg; 1005 msg = (struct msg_msg *)msr_d.r_msg;
@@ -1004,7 +1020,7 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
1004 /* Lockless receive, part 4: 1020 /* Lockless receive, part 4:
1005 * Repeat test after acquiring the spinlock. 1021 * Repeat test after acquiring the spinlock.
1006 */ 1022 */
1007 msg = (struct msg_msg*)msr_d.r_msg; 1023 msg = (struct msg_msg *)msr_d.r_msg;
1008 if (msg != ERR_PTR(-EAGAIN)) 1024 if (msg != ERR_PTR(-EAGAIN))
1009 goto out_unlock0; 1025 goto out_unlock0;
1010 1026
diff --git a/ipc/sem.c b/ipc/sem.c
index db9d241af133..bee555417312 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -188,7 +188,7 @@ void sem_exit_ns(struct ipc_namespace *ns)
188} 188}
189#endif 189#endif
190 190
191void __init sem_init (void) 191void __init sem_init(void)
192{ 192{
193 sem_init_ns(&init_ipc_ns); 193 sem_init_ns(&init_ipc_ns);
194 ipc_init_proc_interface("sysvipc/sem", 194 ipc_init_proc_interface("sysvipc/sem",
@@ -225,7 +225,7 @@ static void unmerge_queues(struct sem_array *sma)
225} 225}
226 226
227/** 227/**
228 * merge_queues - Merge single semop queues into global queue 228 * merge_queues - merge single semop queues into global queue
229 * @sma: semaphore array 229 * @sma: semaphore array
230 * 230 *
231 * This function merges all per-semaphore queues into the global queue. 231 * This function merges all per-semaphore queues into the global queue.
@@ -394,7 +394,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
394 /* ipc_rmid() may have already freed the ID while sem_lock 394 /* ipc_rmid() may have already freed the ID while sem_lock
395 * was spinning: verify that the structure is still valid 395 * was spinning: verify that the structure is still valid
396 */ 396 */
397 if (!ipcp->deleted) 397 if (ipc_valid_object(ipcp))
398 return container_of(ipcp, struct sem_array, sem_perm); 398 return container_of(ipcp, struct sem_array, sem_perm);
399 399
400 sem_unlock(sma, *locknum); 400 sem_unlock(sma, *locknum);
@@ -445,11 +445,11 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
445 * * call wake_up_process 445 * * call wake_up_process
446 * * set queue.status to the final value. 446 * * set queue.status to the final value.
447 * - the previously blocked thread checks queue.status: 447 * - the previously blocked thread checks queue.status:
448 * * if it's IN_WAKEUP, then it must wait until the value changes 448 * * if it's IN_WAKEUP, then it must wait until the value changes
449 * * if it's not -EINTR, then the operation was completed by 449 * * if it's not -EINTR, then the operation was completed by
450 * update_queue. semtimedop can return queue.status without 450 * update_queue. semtimedop can return queue.status without
451 * performing any operation on the sem array. 451 * performing any operation on the sem array.
452 * * otherwise it must acquire the spinlock and check what's up. 452 * * otherwise it must acquire the spinlock and check what's up.
453 * 453 *
454 * The two-stage algorithm is necessary to protect against the following 454 * The two-stage algorithm is necessary to protect against the following
455 * races: 455 * races:
@@ -474,7 +474,6 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
474 * 474 *
475 * Called with sem_ids.rwsem held (as a writer) 475 * Called with sem_ids.rwsem held (as a writer)
476 */ 476 */
477
478static int newary(struct ipc_namespace *ns, struct ipc_params *params) 477static int newary(struct ipc_namespace *ns, struct ipc_params *params)
479{ 478{
480 int id; 479 int id;
@@ -491,12 +490,12 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
491 if (ns->used_sems + nsems > ns->sc_semmns) 490 if (ns->used_sems + nsems > ns->sc_semmns)
492 return -ENOSPC; 491 return -ENOSPC;
493 492
494 size = sizeof (*sma) + nsems * sizeof (struct sem); 493 size = sizeof(*sma) + nsems * sizeof(struct sem);
495 sma = ipc_rcu_alloc(size); 494 sma = ipc_rcu_alloc(size);
496 if (!sma) { 495 if (!sma)
497 return -ENOMEM; 496 return -ENOMEM;
498 } 497
499 memset (sma, 0, size); 498 memset(sma, 0, size);
500 499
501 sma->sem_perm.mode = (semflg & S_IRWXUGO); 500 sma->sem_perm.mode = (semflg & S_IRWXUGO);
502 sma->sem_perm.key = key; 501 sma->sem_perm.key = key;
@@ -584,10 +583,11 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
584 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); 583 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
585} 584}
586 585
587/** perform_atomic_semop - Perform (if possible) a semaphore operation 586/**
587 * perform_atomic_semop - Perform (if possible) a semaphore operation
588 * @sma: semaphore array 588 * @sma: semaphore array
589 * @sops: array with operations that should be checked 589 * @sops: array with operations that should be checked
590 * @nsems: number of sops 590 * @nsops: number of operations
591 * @un: undo array 591 * @un: undo array
592 * @pid: pid that did the change 592 * @pid: pid that did the change
593 * 593 *
@@ -595,19 +595,18 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
595 * Returns 1 if the operation is impossible, the caller must sleep. 595 * Returns 1 if the operation is impossible, the caller must sleep.
596 * Negative values are error codes. 596 * Negative values are error codes.
597 */ 597 */
598
599static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, 598static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
600 int nsops, struct sem_undo *un, int pid) 599 int nsops, struct sem_undo *un, int pid)
601{ 600{
602 int result, sem_op; 601 int result, sem_op;
603 struct sembuf *sop; 602 struct sembuf *sop;
604 struct sem * curr; 603 struct sem *curr;
605 604
606 for (sop = sops; sop < sops + nsops; sop++) { 605 for (sop = sops; sop < sops + nsops; sop++) {
607 curr = sma->sem_base + sop->sem_num; 606 curr = sma->sem_base + sop->sem_num;
608 sem_op = sop->sem_op; 607 sem_op = sop->sem_op;
609 result = curr->semval; 608 result = curr->semval;
610 609
611 if (!sem_op && result) 610 if (!sem_op && result)
612 goto would_block; 611 goto would_block;
613 612
@@ -616,25 +615,24 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
616 goto would_block; 615 goto would_block;
617 if (result > SEMVMX) 616 if (result > SEMVMX)
618 goto out_of_range; 617 goto out_of_range;
618
619 if (sop->sem_flg & SEM_UNDO) { 619 if (sop->sem_flg & SEM_UNDO) {
620 int undo = un->semadj[sop->sem_num] - sem_op; 620 int undo = un->semadj[sop->sem_num] - sem_op;
621 /* 621 /* Exceeding the undo range is an error. */
622 * Exceeding the undo range is an error.
623 */
624 if (undo < (-SEMAEM - 1) || undo > SEMAEM) 622 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
625 goto out_of_range; 623 goto out_of_range;
624 un->semadj[sop->sem_num] = undo;
626 } 625 }
626
627 curr->semval = result; 627 curr->semval = result;
628 } 628 }
629 629
630 sop--; 630 sop--;
631 while (sop >= sops) { 631 while (sop >= sops) {
632 sma->sem_base[sop->sem_num].sempid = pid; 632 sma->sem_base[sop->sem_num].sempid = pid;
633 if (sop->sem_flg & SEM_UNDO)
634 un->semadj[sop->sem_num] -= sop->sem_op;
635 sop--; 633 sop--;
636 } 634 }
637 635
638 return 0; 636 return 0;
639 637
640out_of_range: 638out_of_range:
@@ -650,7 +648,10 @@ would_block:
650undo: 648undo:
651 sop--; 649 sop--;
652 while (sop >= sops) { 650 while (sop >= sops) {
653 sma->sem_base[sop->sem_num].semval -= sop->sem_op; 651 sem_op = sop->sem_op;
652 sma->sem_base[sop->sem_num].semval -= sem_op;
653 if (sop->sem_flg & SEM_UNDO)
654 un->semadj[sop->sem_num] += sem_op;
654 sop--; 655 sop--;
655 } 656 }
656 657
@@ -680,7 +681,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
680} 681}
681 682
682/** 683/**
683 * wake_up_sem_queue_do(pt) - do the actual wake-up 684 * wake_up_sem_queue_do - do the actual wake-up
684 * @pt: list of tasks to be woken up 685 * @pt: list of tasks to be woken up
685 * 686 *
686 * Do the actual wake-up. 687 * Do the actual wake-up.
@@ -746,7 +747,7 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q)
746} 747}
747 748
748/** 749/**
749 * wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks 750 * wake_const_ops - wake up non-alter tasks
750 * @sma: semaphore array. 751 * @sma: semaphore array.
751 * @semnum: semaphore that was modified. 752 * @semnum: semaphore that was modified.
752 * @pt: list head for the tasks that must be woken up. 753 * @pt: list head for the tasks that must be woken up.
@@ -796,15 +797,14 @@ static int wake_const_ops(struct sem_array *sma, int semnum,
796} 797}
797 798
798/** 799/**
799 * do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks 800 * do_smart_wakeup_zero - wakeup all wait for zero tasks
800 * @sma: semaphore array 801 * @sma: semaphore array
801 * @sops: operations that were performed 802 * @sops: operations that were performed
802 * @nsops: number of operations 803 * @nsops: number of operations
803 * @pt: list head of the tasks that must be woken up. 804 * @pt: list head of the tasks that must be woken up.
804 * 805 *
805 * do_smart_wakeup_zero() checks all required queue for wait-for-zero 806 * Checks all required queue for wait-for-zero operations, based
806 * operations, based on the actual changes that were performed on the 807 * on the actual changes that were performed on the semaphore array.
807 * semaphore array.
808 * The function returns 1 if at least one operation was completed successfully. 808 * The function returns 1 if at least one operation was completed successfully.
809 */ 809 */
810static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, 810static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
@@ -848,7 +848,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
848 848
849 849
850/** 850/**
851 * update_queue(sma, semnum): Look for tasks that can be completed. 851 * update_queue - look for tasks that can be completed.
852 * @sma: semaphore array. 852 * @sma: semaphore array.
853 * @semnum: semaphore that was modified. 853 * @semnum: semaphore that was modified.
854 * @pt: list head for the tasks that must be woken up. 854 * @pt: list head for the tasks that must be woken up.
@@ -918,7 +918,7 @@ again:
918} 918}
919 919
920/** 920/**
921 * set_semotime(sma, sops) - set sem_otime 921 * set_semotime - set sem_otime
922 * @sma: semaphore array 922 * @sma: semaphore array
923 * @sops: operations that modified the array, may be NULL 923 * @sops: operations that modified the array, may be NULL
924 * 924 *
@@ -936,7 +936,7 @@ static void set_semotime(struct sem_array *sma, struct sembuf *sops)
936} 936}
937 937
938/** 938/**
939 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue 939 * do_smart_update - optimized update_queue
940 * @sma: semaphore array 940 * @sma: semaphore array
941 * @sops: operations that were performed 941 * @sops: operations that were performed
942 * @nsops: number of operations 942 * @nsops: number of operations
@@ -998,21 +998,21 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
998 * The counts we return here are a rough approximation, but still 998 * The counts we return here are a rough approximation, but still
999 * warrant that semncnt+semzcnt>0 if the task is on the pending queue. 999 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
1000 */ 1000 */
1001static int count_semncnt (struct sem_array * sma, ushort semnum) 1001static int count_semncnt(struct sem_array *sma, ushort semnum)
1002{ 1002{
1003 int semncnt; 1003 int semncnt;
1004 struct sem_queue * q; 1004 struct sem_queue *q;
1005 1005
1006 semncnt = 0; 1006 semncnt = 0;
1007 list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { 1007 list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
1008 struct sembuf * sops = q->sops; 1008 struct sembuf *sops = q->sops;
1009 BUG_ON(sops->sem_num != semnum); 1009 BUG_ON(sops->sem_num != semnum);
1010 if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) 1010 if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
1011 semncnt++; 1011 semncnt++;
1012 } 1012 }
1013 1013
1014 list_for_each_entry(q, &sma->pending_alter, list) { 1014 list_for_each_entry(q, &sma->pending_alter, list) {
1015 struct sembuf * sops = q->sops; 1015 struct sembuf *sops = q->sops;
1016 int nsops = q->nsops; 1016 int nsops = q->nsops;
1017 int i; 1017 int i;
1018 for (i = 0; i < nsops; i++) 1018 for (i = 0; i < nsops; i++)
@@ -1024,21 +1024,21 @@ static int count_semncnt (struct sem_array * sma, ushort semnum)
1024 return semncnt; 1024 return semncnt;
1025} 1025}
1026 1026
1027static int count_semzcnt (struct sem_array * sma, ushort semnum) 1027static int count_semzcnt(struct sem_array *sma, ushort semnum)
1028{ 1028{
1029 int semzcnt; 1029 int semzcnt;
1030 struct sem_queue * q; 1030 struct sem_queue *q;
1031 1031
1032 semzcnt = 0; 1032 semzcnt = 0;
1033 list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { 1033 list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
1034 struct sembuf * sops = q->sops; 1034 struct sembuf *sops = q->sops;
1035 BUG_ON(sops->sem_num != semnum); 1035 BUG_ON(sops->sem_num != semnum);
1036 if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) 1036 if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
1037 semzcnt++; 1037 semzcnt++;
1038 } 1038 }
1039 1039
1040 list_for_each_entry(q, &sma->pending_const, list) { 1040 list_for_each_entry(q, &sma->pending_const, list) {
1041 struct sembuf * sops = q->sops; 1041 struct sembuf *sops = q->sops;
1042 int nsops = q->nsops; 1042 int nsops = q->nsops;
1043 int i; 1043 int i;
1044 for (i = 0; i < nsops; i++) 1044 for (i = 0; i < nsops; i++)
@@ -1108,7 +1108,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1108 1108
1109static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) 1109static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1110{ 1110{
1111 switch(version) { 1111 switch (version) {
1112 case IPC_64: 1112 case IPC_64:
1113 return copy_to_user(buf, in, sizeof(*in)); 1113 return copy_to_user(buf, in, sizeof(*in));
1114 case IPC_OLD: 1114 case IPC_OLD:
@@ -1151,7 +1151,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
1151 int err; 1151 int err;
1152 struct sem_array *sma; 1152 struct sem_array *sma;
1153 1153
1154 switch(cmd) { 1154 switch (cmd) {
1155 case IPC_INFO: 1155 case IPC_INFO:
1156 case SEM_INFO: 1156 case SEM_INFO:
1157 { 1157 {
@@ -1162,7 +1162,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
1162 if (err) 1162 if (err)
1163 return err; 1163 return err;
1164 1164
1165 memset(&seminfo,0,sizeof(seminfo)); 1165 memset(&seminfo, 0, sizeof(seminfo));
1166 seminfo.semmni = ns->sc_semmni; 1166 seminfo.semmni = ns->sc_semmni;
1167 seminfo.semmns = ns->sc_semmns; 1167 seminfo.semmns = ns->sc_semmns;
1168 seminfo.semmsl = ns->sc_semmsl; 1168 seminfo.semmsl = ns->sc_semmsl;
@@ -1183,7 +1183,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid,
1183 up_read(&sem_ids(ns).rwsem); 1183 up_read(&sem_ids(ns).rwsem);
1184 if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) 1184 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1185 return -EFAULT; 1185 return -EFAULT;
1186 return (max_id < 0) ? 0: max_id; 1186 return (max_id < 0) ? 0 : max_id;
1187 } 1187 }
1188 case IPC_STAT: 1188 case IPC_STAT:
1189 case SEM_STAT: 1189 case SEM_STAT:
@@ -1239,7 +1239,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1239{ 1239{
1240 struct sem_undo *un; 1240 struct sem_undo *un;
1241 struct sem_array *sma; 1241 struct sem_array *sma;
1242 struct sem* curr; 1242 struct sem *curr;
1243 int err; 1243 int err;
1244 struct list_head tasks; 1244 struct list_head tasks;
1245 int val; 1245 int val;
@@ -1282,7 +1282,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1282 1282
1283 sem_lock(sma, NULL, -1); 1283 sem_lock(sma, NULL, -1);
1284 1284
1285 if (sma->sem_perm.deleted) { 1285 if (!ipc_valid_object(&sma->sem_perm)) {
1286 sem_unlock(sma, -1); 1286 sem_unlock(sma, -1);
1287 rcu_read_unlock(); 1287 rcu_read_unlock();
1288 return -EIDRM; 1288 return -EIDRM;
@@ -1309,10 +1309,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1309 int cmd, void __user *p) 1309 int cmd, void __user *p)
1310{ 1310{
1311 struct sem_array *sma; 1311 struct sem_array *sma;
1312 struct sem* curr; 1312 struct sem *curr;
1313 int err, nsems; 1313 int err, nsems;
1314 ushort fast_sem_io[SEMMSL_FAST]; 1314 ushort fast_sem_io[SEMMSL_FAST];
1315 ushort* sem_io = fast_sem_io; 1315 ushort *sem_io = fast_sem_io;
1316 struct list_head tasks; 1316 struct list_head tasks;
1317 1317
1318 INIT_LIST_HEAD(&tasks); 1318 INIT_LIST_HEAD(&tasks);
@@ -1342,11 +1342,11 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1342 int i; 1342 int i;
1343 1343
1344 sem_lock(sma, NULL, -1); 1344 sem_lock(sma, NULL, -1);
1345 if (sma->sem_perm.deleted) { 1345 if (!ipc_valid_object(&sma->sem_perm)) {
1346 err = -EIDRM; 1346 err = -EIDRM;
1347 goto out_unlock; 1347 goto out_unlock;
1348 } 1348 }
1349 if(nsems > SEMMSL_FAST) { 1349 if (nsems > SEMMSL_FAST) {
1350 if (!ipc_rcu_getref(sma)) { 1350 if (!ipc_rcu_getref(sma)) {
1351 err = -EIDRM; 1351 err = -EIDRM;
1352 goto out_unlock; 1352 goto out_unlock;
@@ -1354,14 +1354,14 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1354 sem_unlock(sma, -1); 1354 sem_unlock(sma, -1);
1355 rcu_read_unlock(); 1355 rcu_read_unlock();
1356 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1356 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1357 if(sem_io == NULL) { 1357 if (sem_io == NULL) {
1358 ipc_rcu_putref(sma, ipc_rcu_free); 1358 ipc_rcu_putref(sma, ipc_rcu_free);
1359 return -ENOMEM; 1359 return -ENOMEM;
1360 } 1360 }
1361 1361
1362 rcu_read_lock(); 1362 rcu_read_lock();
1363 sem_lock_and_putref(sma); 1363 sem_lock_and_putref(sma);
1364 if (sma->sem_perm.deleted) { 1364 if (!ipc_valid_object(&sma->sem_perm)) {
1365 err = -EIDRM; 1365 err = -EIDRM;
1366 goto out_unlock; 1366 goto out_unlock;
1367 } 1367 }
@@ -1371,7 +1371,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1371 sem_unlock(sma, -1); 1371 sem_unlock(sma, -1);
1372 rcu_read_unlock(); 1372 rcu_read_unlock();
1373 err = 0; 1373 err = 0;
1374 if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) 1374 if (copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1375 err = -EFAULT; 1375 err = -EFAULT;
1376 goto out_free; 1376 goto out_free;
1377 } 1377 }
@@ -1386,15 +1386,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1386 } 1386 }
1387 rcu_read_unlock(); 1387 rcu_read_unlock();
1388 1388
1389 if(nsems > SEMMSL_FAST) { 1389 if (nsems > SEMMSL_FAST) {
1390 sem_io = ipc_alloc(sizeof(ushort)*nsems); 1390 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1391 if(sem_io == NULL) { 1391 if (sem_io == NULL) {
1392 ipc_rcu_putref(sma, ipc_rcu_free); 1392 ipc_rcu_putref(sma, ipc_rcu_free);
1393 return -ENOMEM; 1393 return -ENOMEM;
1394 } 1394 }
1395 } 1395 }
1396 1396
1397 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { 1397 if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) {
1398 ipc_rcu_putref(sma, ipc_rcu_free); 1398 ipc_rcu_putref(sma, ipc_rcu_free);
1399 err = -EFAULT; 1399 err = -EFAULT;
1400 goto out_free; 1400 goto out_free;
@@ -1409,7 +1409,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1409 } 1409 }
1410 rcu_read_lock(); 1410 rcu_read_lock();
1411 sem_lock_and_putref(sma); 1411 sem_lock_and_putref(sma);
1412 if (sma->sem_perm.deleted) { 1412 if (!ipc_valid_object(&sma->sem_perm)) {
1413 err = -EIDRM; 1413 err = -EIDRM;
1414 goto out_unlock; 1414 goto out_unlock;
1415 } 1415 }
@@ -1435,7 +1435,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1435 goto out_rcu_wakeup; 1435 goto out_rcu_wakeup;
1436 1436
1437 sem_lock(sma, NULL, -1); 1437 sem_lock(sma, NULL, -1);
1438 if (sma->sem_perm.deleted) { 1438 if (!ipc_valid_object(&sma->sem_perm)) {
1439 err = -EIDRM; 1439 err = -EIDRM;
1440 goto out_unlock; 1440 goto out_unlock;
1441 } 1441 }
@@ -1449,10 +1449,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1449 err = curr->sempid; 1449 err = curr->sempid;
1450 goto out_unlock; 1450 goto out_unlock;
1451 case GETNCNT: 1451 case GETNCNT:
1452 err = count_semncnt(sma,semnum); 1452 err = count_semncnt(sma, semnum);
1453 goto out_unlock; 1453 goto out_unlock;
1454 case GETZCNT: 1454 case GETZCNT:
1455 err = count_semzcnt(sma,semnum); 1455 err = count_semzcnt(sma, semnum);
1456 goto out_unlock; 1456 goto out_unlock;
1457 } 1457 }
1458 1458
@@ -1462,7 +1462,7 @@ out_rcu_wakeup:
1462 rcu_read_unlock(); 1462 rcu_read_unlock();
1463 wake_up_sem_queue_do(&tasks); 1463 wake_up_sem_queue_do(&tasks);
1464out_free: 1464out_free:
1465 if(sem_io != fast_sem_io) 1465 if (sem_io != fast_sem_io)
1466 ipc_free(sem_io, sizeof(ushort)*nsems); 1466 ipc_free(sem_io, sizeof(ushort)*nsems);
1467 return err; 1467 return err;
1468} 1468}
@@ -1470,7 +1470,7 @@ out_free:
1470static inline unsigned long 1470static inline unsigned long
1471copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) 1471copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1472{ 1472{
1473 switch(version) { 1473 switch (version) {
1474 case IPC_64: 1474 case IPC_64:
1475 if (copy_from_user(out, buf, sizeof(*out))) 1475 if (copy_from_user(out, buf, sizeof(*out)))
1476 return -EFAULT; 1476 return -EFAULT;
@@ -1479,7 +1479,7 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1479 { 1479 {
1480 struct semid_ds tbuf_old; 1480 struct semid_ds tbuf_old;
1481 1481
1482 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 1482 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1483 return -EFAULT; 1483 return -EFAULT;
1484 1484
1485 out->sem_perm.uid = tbuf_old.sem_perm.uid; 1485 out->sem_perm.uid = tbuf_old.sem_perm.uid;
@@ -1506,7 +1506,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
1506 struct semid64_ds semid64; 1506 struct semid64_ds semid64;
1507 struct kern_ipc_perm *ipcp; 1507 struct kern_ipc_perm *ipcp;
1508 1508
1509 if(cmd == IPC_SET) { 1509 if (cmd == IPC_SET) {
1510 if (copy_semid_from_user(&semid64, p, version)) 1510 if (copy_semid_from_user(&semid64, p, version))
1511 return -EFAULT; 1511 return -EFAULT;
1512 } 1512 }
@@ -1566,7 +1566,7 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1566 version = ipc_parse_version(&cmd); 1566 version = ipc_parse_version(&cmd);
1567 ns = current->nsproxy->ipc_ns; 1567 ns = current->nsproxy->ipc_ns;
1568 1568
1569 switch(cmd) { 1569 switch (cmd) {
1570 case IPC_INFO: 1570 case IPC_INFO:
1571 case SEM_INFO: 1571 case SEM_INFO:
1572 case IPC_STAT: 1572 case IPC_STAT:
@@ -1634,7 +1634,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1634{ 1634{
1635 struct sem_undo *un; 1635 struct sem_undo *un;
1636 1636
1637 assert_spin_locked(&ulp->lock); 1637 assert_spin_locked(&ulp->lock);
1638 1638
1639 un = __lookup_undo(ulp, semid); 1639 un = __lookup_undo(ulp, semid);
1640 if (un) { 1640 if (un) {
@@ -1645,7 +1645,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1645} 1645}
1646 1646
1647/** 1647/**
1648 * find_alloc_undo - Lookup (and if not present create) undo array 1648 * find_alloc_undo - lookup (and if not present create) undo array
1649 * @ns: namespace 1649 * @ns: namespace
1650 * @semid: semaphore array id 1650 * @semid: semaphore array id
1651 * 1651 *
@@ -1670,7 +1670,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1670 spin_lock(&ulp->lock); 1670 spin_lock(&ulp->lock);
1671 un = lookup_undo(ulp, semid); 1671 un = lookup_undo(ulp, semid);
1672 spin_unlock(&ulp->lock); 1672 spin_unlock(&ulp->lock);
1673 if (likely(un!=NULL)) 1673 if (likely(un != NULL))
1674 goto out; 1674 goto out;
1675 1675
1676 /* no undo structure around - allocate one. */ 1676 /* no undo structure around - allocate one. */
@@ -1699,7 +1699,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1699 /* step 3: Acquire the lock on semaphore array */ 1699 /* step 3: Acquire the lock on semaphore array */
1700 rcu_read_lock(); 1700 rcu_read_lock();
1701 sem_lock_and_putref(sma); 1701 sem_lock_and_putref(sma);
1702 if (sma->sem_perm.deleted) { 1702 if (!ipc_valid_object(&sma->sem_perm)) {
1703 sem_unlock(sma, -1); 1703 sem_unlock(sma, -1);
1704 rcu_read_unlock(); 1704 rcu_read_unlock();
1705 kfree(new); 1705 kfree(new);
@@ -1735,7 +1735,7 @@ out:
1735 1735
1736 1736
1737/** 1737/**
1738 * get_queue_result - Retrieve the result code from sem_queue 1738 * get_queue_result - retrieve the result code from sem_queue
1739 * @q: Pointer to queue structure 1739 * @q: Pointer to queue structure
1740 * 1740 *
1741 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in 1741 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
@@ -1765,7 +1765,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1765 int error = -EINVAL; 1765 int error = -EINVAL;
1766 struct sem_array *sma; 1766 struct sem_array *sma;
1767 struct sembuf fast_sops[SEMOPM_FAST]; 1767 struct sembuf fast_sops[SEMOPM_FAST];
1768 struct sembuf* sops = fast_sops, *sop; 1768 struct sembuf *sops = fast_sops, *sop;
1769 struct sem_undo *un; 1769 struct sem_undo *un;
1770 int undos = 0, alter = 0, max, locknum; 1770 int undos = 0, alter = 0, max, locknum;
1771 struct sem_queue queue; 1771 struct sem_queue queue;
@@ -1779,13 +1779,13 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1779 return -EINVAL; 1779 return -EINVAL;
1780 if (nsops > ns->sc_semopm) 1780 if (nsops > ns->sc_semopm)
1781 return -E2BIG; 1781 return -E2BIG;
1782 if(nsops > SEMOPM_FAST) { 1782 if (nsops > SEMOPM_FAST) {
1783 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); 1783 sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
1784 if(sops==NULL) 1784 if (sops == NULL)
1785 return -ENOMEM; 1785 return -ENOMEM;
1786 } 1786 }
1787 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { 1787 if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) {
1788 error=-EFAULT; 1788 error = -EFAULT;
1789 goto out_free; 1789 goto out_free;
1790 } 1790 }
1791 if (timeout) { 1791 if (timeout) {
@@ -1846,7 +1846,15 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1846 1846
1847 error = -EIDRM; 1847 error = -EIDRM;
1848 locknum = sem_lock(sma, sops, nsops); 1848 locknum = sem_lock(sma, sops, nsops);
1849 if (sma->sem_perm.deleted) 1849 /*
1850 * We eventually might perform the following check in a lockless
1851 * fashion, considering ipc_valid_object() locking constraints.
1852 * If nsops == 1 and there is no contention for sem_perm.lock, then
1853 * only a per-semaphore lock is held and it's OK to proceed with the
1854 * check below. More details on the fine grained locking scheme
1855 * entangled here and why it's RMID race safe on comments at sem_lock()
1856 */
1857 if (!ipc_valid_object(&sma->sem_perm))
1850 goto out_unlock_free; 1858 goto out_unlock_free;
1851 /* 1859 /*
1852 * semid identifiers are not unique - find_alloc_undo may have 1860 * semid identifiers are not unique - find_alloc_undo may have
@@ -1959,10 +1967,8 @@ sleep_again:
1959 * If queue.status != -EINTR we are woken up by another process. 1967 * If queue.status != -EINTR we are woken up by another process.
1960 * Leave without unlink_queue(), but with sem_unlock(). 1968 * Leave without unlink_queue(), but with sem_unlock().
1961 */ 1969 */
1962 1970 if (error != -EINTR)
1963 if (error != -EINTR) {
1964 goto out_unlock_free; 1971 goto out_unlock_free;
1965 }
1966 1972
1967 /* 1973 /*
1968 * If an interrupt occurred we have to clean up the queue 1974 * If an interrupt occurred we have to clean up the queue
@@ -1984,7 +1990,7 @@ out_rcu_wakeup:
1984 rcu_read_unlock(); 1990 rcu_read_unlock();
1985 wake_up_sem_queue_do(&tasks); 1991 wake_up_sem_queue_do(&tasks);
1986out_free: 1992out_free:
1987 if(sops != fast_sops) 1993 if (sops != fast_sops)
1988 kfree(sops); 1994 kfree(sops);
1989 return error; 1995 return error;
1990} 1996}
@@ -2068,7 +2074,7 @@ void exit_sem(struct task_struct *tsk)
2068 2074
2069 sem_lock(sma, NULL, -1); 2075 sem_lock(sma, NULL, -1);
2070 /* exit_sem raced with IPC_RMID, nothing to do */ 2076 /* exit_sem raced with IPC_RMID, nothing to do */
2071 if (sma->sem_perm.deleted) { 2077 if (!ipc_valid_object(&sma->sem_perm)) {
2072 sem_unlock(sma, -1); 2078 sem_unlock(sma, -1);
2073 rcu_read_unlock(); 2079 rcu_read_unlock();
2074 continue; 2080 continue;
@@ -2093,7 +2099,7 @@ void exit_sem(struct task_struct *tsk)
2093 2099
2094 /* perform adjustments registered in un */ 2100 /* perform adjustments registered in un */
2095 for (i = 0; i < sma->sem_nsems; i++) { 2101 for (i = 0; i < sma->sem_nsems; i++) {
2096 struct sem * semaphore = &sma->sem_base[i]; 2102 struct sem *semaphore = &sma->sem_base[i];
2097 if (un->semadj[i]) { 2103 if (un->semadj[i]) {
2098 semaphore->semval += un->semadj[i]; 2104 semaphore->semval += un->semadj[i];
2099 /* 2105 /*
@@ -2107,7 +2113,7 @@ void exit_sem(struct task_struct *tsk)
2107 * Linux caps the semaphore value, both at 0 2113 * Linux caps the semaphore value, both at 0
2108 * and at SEMVMX. 2114 * and at SEMVMX.
2109 * 2115 *
2110 * Manfred <manfred@colorfullife.com> 2116 * Manfred <manfred@colorfullife.com>
2111 */ 2117 */
2112 if (semaphore->semval < 0) 2118 if (semaphore->semval < 0)
2113 semaphore->semval = 0; 2119 semaphore->semval = 0;
diff --git a/ipc/shm.c b/ipc/shm.c
index 7a51443a51d6..76459616a7fa 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -67,7 +67,7 @@ static const struct vm_operations_struct shm_vm_ops;
67static int newseg(struct ipc_namespace *, struct ipc_params *); 67static int newseg(struct ipc_namespace *, struct ipc_params *);
68static void shm_open(struct vm_area_struct *vma); 68static void shm_open(struct vm_area_struct *vma);
69static void shm_close(struct vm_area_struct *vma); 69static void shm_close(struct vm_area_struct *vma);
70static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); 70static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
71#ifdef CONFIG_PROC_FS 71#ifdef CONFIG_PROC_FS
72static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 72static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
73#endif 73#endif
@@ -91,7 +91,7 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
91 struct shmid_kernel *shp; 91 struct shmid_kernel *shp;
92 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 92 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
93 93
94 if (shp->shm_nattch){ 94 if (shp->shm_nattch) {
95 shp->shm_perm.mode |= SHM_DEST; 95 shp->shm_perm.mode |= SHM_DEST;
96 /* Do not find it any more */ 96 /* Do not find it any more */
97 shp->shm_perm.key = IPC_PRIVATE; 97 shp->shm_perm.key = IPC_PRIVATE;
@@ -116,7 +116,7 @@ static int __init ipc_ns_init(void)
116 116
117pure_initcall(ipc_ns_init); 117pure_initcall(ipc_ns_init);
118 118
119void __init shm_init (void) 119void __init shm_init(void)
120{ 120{
121 ipc_init_proc_interface("sysvipc/shm", 121 ipc_init_proc_interface("sysvipc/shm",
122#if BITS_PER_LONG <= 32 122#if BITS_PER_LONG <= 32
@@ -248,7 +248,7 @@ static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
248 */ 248 */
249static void shm_close(struct vm_area_struct *vma) 249static void shm_close(struct vm_area_struct *vma)
250{ 250{
251 struct file * file = vma->vm_file; 251 struct file *file = vma->vm_file;
252 struct shm_file_data *sfd = shm_file_data(file); 252 struct shm_file_data *sfd = shm_file_data(file);
253 struct shmid_kernel *shp; 253 struct shmid_kernel *shp;
254 struct ipc_namespace *ns = sfd->ns; 254 struct ipc_namespace *ns = sfd->ns;
@@ -379,7 +379,7 @@ static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
379} 379}
380#endif 380#endif
381 381
382static int shm_mmap(struct file * file, struct vm_area_struct * vma) 382static int shm_mmap(struct file *file, struct vm_area_struct *vma)
383{ 383{
384 struct shm_file_data *sfd = shm_file_data(file); 384 struct shm_file_data *sfd = shm_file_data(file);
385 int ret; 385 int ret;
@@ -477,7 +477,6 @@ static const struct vm_operations_struct shm_vm_ops = {
477 * 477 *
478 * Called with shm_ids.rwsem held as a writer. 478 * Called with shm_ids.rwsem held as a writer.
479 */ 479 */
480
481static int newseg(struct ipc_namespace *ns, struct ipc_params *params) 480static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
482{ 481{
483 key_t key = params->key; 482 key_t key = params->key;
@@ -486,7 +485,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
486 int error; 485 int error;
487 struct shmid_kernel *shp; 486 struct shmid_kernel *shp;
488 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 487 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
489 struct file * file; 488 struct file *file;
490 char name[13]; 489 char name[13];
491 int id; 490 int id;
492 vm_flags_t acctflag = 0; 491 vm_flags_t acctflag = 0;
@@ -512,7 +511,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
512 return error; 511 return error;
513 } 512 }
514 513
515 sprintf (name, "SYSV%08x", key); 514 sprintf(name, "SYSV%08x", key);
516 if (shmflg & SHM_HUGETLB) { 515 if (shmflg & SHM_HUGETLB) {
517 struct hstate *hs; 516 struct hstate *hs;
518 size_t hugesize; 517 size_t hugesize;
@@ -533,7 +532,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
533 } else { 532 } else {
534 /* 533 /*
535 * Do not allow no accounting for OVERCOMMIT_NEVER, even 534 * Do not allow no accounting for OVERCOMMIT_NEVER, even
536 * if it's asked for. 535 * if it's asked for.
537 */ 536 */
538 if ((shmflg & SHM_NORESERVE) && 537 if ((shmflg & SHM_NORESERVE) &&
539 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 538 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
@@ -628,7 +627,7 @@ SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
628 627
629static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 628static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
630{ 629{
631 switch(version) { 630 switch (version) {
632 case IPC_64: 631 case IPC_64:
633 return copy_to_user(buf, in, sizeof(*in)); 632 return copy_to_user(buf, in, sizeof(*in));
634 case IPC_OLD: 633 case IPC_OLD:
@@ -655,7 +654,7 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_
655static inline unsigned long 654static inline unsigned long
656copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) 655copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
657{ 656{
658 switch(version) { 657 switch (version) {
659 case IPC_64: 658 case IPC_64:
660 if (copy_from_user(out, buf, sizeof(*out))) 659 if (copy_from_user(out, buf, sizeof(*out)))
661 return -EFAULT; 660 return -EFAULT;
@@ -680,14 +679,14 @@ copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
680 679
681static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 680static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
682{ 681{
683 switch(version) { 682 switch (version) {
684 case IPC_64: 683 case IPC_64:
685 return copy_to_user(buf, in, sizeof(*in)); 684 return copy_to_user(buf, in, sizeof(*in));
686 case IPC_OLD: 685 case IPC_OLD:
687 { 686 {
688 struct shminfo out; 687 struct shminfo out;
689 688
690 if(in->shmmax > INT_MAX) 689 if (in->shmmax > INT_MAX)
691 out.shmmax = INT_MAX; 690 out.shmmax = INT_MAX;
692 else 691 else
693 out.shmmax = (int)in->shmmax; 692 out.shmmax = (int)in->shmmax;
@@ -846,14 +845,14 @@ static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
846 shminfo.shmall = ns->shm_ctlall; 845 shminfo.shmall = ns->shm_ctlall;
847 846
848 shminfo.shmmin = SHMMIN; 847 shminfo.shmmin = SHMMIN;
849 if(copy_shminfo_to_user (buf, &shminfo, version)) 848 if (copy_shminfo_to_user(buf, &shminfo, version))
850 return -EFAULT; 849 return -EFAULT;
851 850
852 down_read(&shm_ids(ns).rwsem); 851 down_read(&shm_ids(ns).rwsem);
853 err = ipc_get_maxid(&shm_ids(ns)); 852 err = ipc_get_maxid(&shm_ids(ns));
854 up_read(&shm_ids(ns).rwsem); 853 up_read(&shm_ids(ns).rwsem);
855 854
856 if(err<0) 855 if (err < 0)
857 err = 0; 856 err = 0;
858 goto out; 857 goto out;
859 } 858 }
@@ -864,7 +863,7 @@ static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
864 memset(&shm_info, 0, sizeof(shm_info)); 863 memset(&shm_info, 0, sizeof(shm_info));
865 down_read(&shm_ids(ns).rwsem); 864 down_read(&shm_ids(ns).rwsem);
866 shm_info.used_ids = shm_ids(ns).in_use; 865 shm_info.used_ids = shm_ids(ns).in_use;
867 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); 866 shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
868 shm_info.shm_tot = ns->shm_tot; 867 shm_info.shm_tot = ns->shm_tot;
869 shm_info.swap_attempts = 0; 868 shm_info.swap_attempts = 0;
870 shm_info.swap_successes = 0; 869 shm_info.swap_successes = 0;
@@ -975,6 +974,13 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
975 goto out_unlock1; 974 goto out_unlock1;
976 975
977 ipc_lock_object(&shp->shm_perm); 976 ipc_lock_object(&shp->shm_perm);
977
978 /* check if shm_destroy() is tearing down shp */
979 if (!ipc_valid_object(&shp->shm_perm)) {
980 err = -EIDRM;
981 goto out_unlock0;
982 }
983
978 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { 984 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
979 kuid_t euid = current_euid(); 985 kuid_t euid = current_euid();
980 if (!uid_eq(euid, shp->shm_perm.uid) && 986 if (!uid_eq(euid, shp->shm_perm.uid) &&
@@ -989,13 +995,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
989 } 995 }
990 996
991 shm_file = shp->shm_file; 997 shm_file = shp->shm_file;
992
993 /* check if shm_destroy() is tearing down shp */
994 if (shm_file == NULL) {
995 err = -EIDRM;
996 goto out_unlock0;
997 }
998
999 if (is_file_hugepages(shm_file)) 998 if (is_file_hugepages(shm_file))
1000 goto out_unlock0; 999 goto out_unlock0;
1001 1000
@@ -1047,7 +1046,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1047 struct shmid_kernel *shp; 1046 struct shmid_kernel *shp;
1048 unsigned long addr; 1047 unsigned long addr;
1049 unsigned long size; 1048 unsigned long size;
1050 struct file * file; 1049 struct file *file;
1051 int err; 1050 int err;
1052 unsigned long flags; 1051 unsigned long flags;
1053 unsigned long prot; 1052 unsigned long prot;
@@ -1116,7 +1115,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1116 ipc_lock_object(&shp->shm_perm); 1115 ipc_lock_object(&shp->shm_perm);
1117 1116
1118 /* check if shm_destroy() is tearing down shp */ 1117 /* check if shm_destroy() is tearing down shp */
1119 if (shp->shm_file == NULL) { 1118 if (!ipc_valid_object(&shp->shm_perm)) {
1120 ipc_unlock_object(&shp->shm_perm); 1119 ipc_unlock_object(&shp->shm_perm);
1121 err = -EIDRM; 1120 err = -EIDRM;
1122 goto out_unlock; 1121 goto out_unlock;
diff --git a/ipc/util.c b/ipc/util.c
index 3ae17a4ace5b..e1b4c6db8aa0 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -110,15 +110,15 @@ static struct notifier_block ipc_memory_nb = {
110}; 110};
111 111
112/** 112/**
113 * ipc_init - initialise IPC subsystem 113 * ipc_init - initialise ipc subsystem
114 * 114 *
115 * The various system5 IPC resources (semaphores, messages and shared 115 * The various sysv ipc resources (semaphores, messages and shared
116 * memory) are initialised 116 * memory) are initialised.
117 * A callback routine is registered into the memory hotplug notifier 117 *
118 * chain: since msgmni scales to lowmem this callback routine will be 118 * A callback routine is registered into the memory hotplug notifier
119 * called upon successful memory add / remove to recompute msmgni. 119 * chain: since msgmni scales to lowmem this callback routine will be
120 * called upon successful memory add / remove to recompute msmgni.
120 */ 121 */
121
122static int __init ipc_init(void) 122static int __init ipc_init(void)
123{ 123{
124 sem_init(); 124 sem_init();
@@ -131,39 +131,29 @@ static int __init ipc_init(void)
131__initcall(ipc_init); 131__initcall(ipc_init);
132 132
133/** 133/**
134 * ipc_init_ids - initialise IPC identifiers 134 * ipc_init_ids - initialise ipc identifiers
135 * @ids: Identifier set 135 * @ids: ipc identifier set
136 * 136 *
137 * Set up the sequence range to use for the ipc identifier range (limited 137 * Set up the sequence range to use for the ipc identifier range (limited
138 * below IPCMNI) then initialise the ids idr. 138 * below IPCMNI) then initialise the ids idr.
139 */ 139 */
140
141void ipc_init_ids(struct ipc_ids *ids) 140void ipc_init_ids(struct ipc_ids *ids)
142{ 141{
143 init_rwsem(&ids->rwsem);
144
145 ids->in_use = 0; 142 ids->in_use = 0;
146 ids->seq = 0; 143 ids->seq = 0;
147 ids->next_id = -1; 144 ids->next_id = -1;
148 { 145 init_rwsem(&ids->rwsem);
149 int seq_limit = INT_MAX/SEQ_MULTIPLIER;
150 if (seq_limit > USHRT_MAX)
151 ids->seq_max = USHRT_MAX;
152 else
153 ids->seq_max = seq_limit;
154 }
155
156 idr_init(&ids->ipcs_idr); 146 idr_init(&ids->ipcs_idr);
157} 147}
158 148
159#ifdef CONFIG_PROC_FS 149#ifdef CONFIG_PROC_FS
160static const struct file_operations sysvipc_proc_fops; 150static const struct file_operations sysvipc_proc_fops;
161/** 151/**
162 * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface. 152 * ipc_init_proc_interface - create a proc interface for sysipc types using a seq_file interface.
163 * @path: Path in procfs 153 * @path: Path in procfs
164 * @header: Banner to be printed at the beginning of the file. 154 * @header: Banner to be printed at the beginning of the file.
165 * @ids: ipc id table to iterate. 155 * @ids: ipc id table to iterate.
166 * @show: show routine. 156 * @show: show routine.
167 */ 157 */
168void __init ipc_init_proc_interface(const char *path, const char *header, 158void __init ipc_init_proc_interface(const char *path, const char *header,
169 int ids, int (*show)(struct seq_file *, void *)) 159 int ids, int (*show)(struct seq_file *, void *))
@@ -184,23 +174,21 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
184 NULL, /* parent dir */ 174 NULL, /* parent dir */
185 &sysvipc_proc_fops, 175 &sysvipc_proc_fops,
186 iface); 176 iface);
187 if (!pde) { 177 if (!pde)
188 kfree(iface); 178 kfree(iface);
189 }
190} 179}
191#endif 180#endif
192 181
193/** 182/**
194 * ipc_findkey - find a key in an ipc identifier set 183 * ipc_findkey - find a key in an ipc identifier set
195 * @ids: Identifier set 184 * @ids: ipc identifier set
196 * @key: The key to find 185 * @key: key to find
197 * 186 *
198 * Requires ipc_ids.rwsem locked. 187 * Returns the locked pointer to the ipc structure if found or NULL
199 * Returns the LOCKED pointer to the ipc structure if found or NULL 188 * otherwise. If key is found ipc points to the owning ipc structure
200 * if not. 189 *
201 * If key is found ipc points to the owning ipc structure 190 * Called with ipc_ids.rwsem held.
202 */ 191 */
203
204static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) 192static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
205{ 193{
206 struct kern_ipc_perm *ipc; 194 struct kern_ipc_perm *ipc;
@@ -227,12 +215,11 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
227} 215}
228 216
229/** 217/**
230 * ipc_get_maxid - get the last assigned id 218 * ipc_get_maxid - get the last assigned id
231 * @ids: IPC identifier set 219 * @ids: ipc identifier set
232 * 220 *
233 * Called with ipc_ids.rwsem held. 221 * Called with ipc_ids.rwsem held.
234 */ 222 */
235
236int ipc_get_maxid(struct ipc_ids *ids) 223int ipc_get_maxid(struct ipc_ids *ids)
237{ 224{
238 struct kern_ipc_perm *ipc; 225 struct kern_ipc_perm *ipc;
@@ -258,19 +245,19 @@ int ipc_get_maxid(struct ipc_ids *ids)
258} 245}
259 246
260/** 247/**
261 * ipc_addid - add an IPC identifier 248 * ipc_addid - add an ipc identifier
262 * @ids: IPC identifier set 249 * @ids: ipc identifier set
263 * @new: new IPC permission set 250 * @new: new ipc permission set
264 * @size: limit for the number of used ids 251 * @size: limit for the number of used ids
265 * 252 *
266 * Add an entry 'new' to the IPC ids idr. The permissions object is 253 * Add an entry 'new' to the ipc ids idr. The permissions object is
267 * initialised and the first free entry is set up and the id assigned 254 * initialised and the first free entry is set up and the id assigned
268 * is returned. The 'new' entry is returned in a locked state on success. 255 * is returned. The 'new' entry is returned in a locked state on success.
269 * On failure the entry is not locked and a negative err-code is returned. 256 * On failure the entry is not locked and a negative err-code is returned.
270 * 257 *
271 * Called with writer ipc_ids.rwsem held. 258 * Called with writer ipc_ids.rwsem held.
272 */ 259 */
273int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) 260int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size)
274{ 261{
275 kuid_t euid; 262 kuid_t euid;
276 kgid_t egid; 263 kgid_t egid;
@@ -286,7 +273,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
286 idr_preload(GFP_KERNEL); 273 idr_preload(GFP_KERNEL);
287 274
288 spin_lock_init(&new->lock); 275 spin_lock_init(&new->lock);
289 new->deleted = 0; 276 new->deleted = false;
290 rcu_read_lock(); 277 rcu_read_lock();
291 spin_lock(&new->lock); 278 spin_lock(&new->lock);
292 279
@@ -308,7 +295,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
308 295
309 if (next_id < 0) { 296 if (next_id < 0) {
310 new->seq = ids->seq++; 297 new->seq = ids->seq++;
311 if (ids->seq > ids->seq_max) 298 if (ids->seq > IPCID_SEQ_MAX)
312 ids->seq = 0; 299 ids->seq = 0;
313 } else { 300 } else {
314 new->seq = ipcid_to_seqx(next_id); 301 new->seq = ipcid_to_seqx(next_id);
@@ -320,14 +307,14 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
320} 307}
321 308
322/** 309/**
323 * ipcget_new - create a new ipc object 310 * ipcget_new - create a new ipc object
324 * @ns: namespace 311 * @ns: ipc namespace
325 * @ids: IPC identifer set 312 * @ids: ipc identifer set
326 * @ops: the actual creation routine to call 313 * @ops: the actual creation routine to call
327 * @params: its parameters 314 * @params: its parameters
328 * 315 *
329 * This routine is called by sys_msgget, sys_semget() and sys_shmget() 316 * This routine is called by sys_msgget, sys_semget() and sys_shmget()
330 * when the key is IPC_PRIVATE. 317 * when the key is IPC_PRIVATE.
331 */ 318 */
332static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, 319static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
333 struct ipc_ops *ops, struct ipc_params *params) 320 struct ipc_ops *ops, struct ipc_params *params)
@@ -341,19 +328,19 @@ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
341} 328}
342 329
343/** 330/**
344 * ipc_check_perms - check security and permissions for an IPC 331 * ipc_check_perms - check security and permissions for an ipc object
345 * @ns: IPC namespace 332 * @ns: ipc namespace
346 * @ipcp: ipc permission set 333 * @ipcp: ipc permission set
347 * @ops: the actual security routine to call 334 * @ops: the actual security routine to call
348 * @params: its parameters 335 * @params: its parameters
349 * 336 *
350 * This routine is called by sys_msgget(), sys_semget() and sys_shmget() 337 * This routine is called by sys_msgget(), sys_semget() and sys_shmget()
351 * when the key is not IPC_PRIVATE and that key already exists in the 338 * when the key is not IPC_PRIVATE and that key already exists in the
352 * ids IDR. 339 * ds IDR.
353 * 340 *
354 * On success, the IPC id is returned. 341 * On success, the ipc id is returned.
355 * 342 *
356 * It is called with ipc_ids.rwsem and ipcp->lock held. 343 * It is called with ipc_ids.rwsem and ipcp->lock held.
357 */ 344 */
358static int ipc_check_perms(struct ipc_namespace *ns, 345static int ipc_check_perms(struct ipc_namespace *ns,
359 struct kern_ipc_perm *ipcp, 346 struct kern_ipc_perm *ipcp,
@@ -374,18 +361,18 @@ static int ipc_check_perms(struct ipc_namespace *ns,
374} 361}
375 362
376/** 363/**
377 * ipcget_public - get an ipc object or create a new one 364 * ipcget_public - get an ipc object or create a new one
378 * @ns: namespace 365 * @ns: ipc namespace
379 * @ids: IPC identifer set 366 * @ids: ipc identifer set
380 * @ops: the actual creation routine to call 367 * @ops: the actual creation routine to call
381 * @params: its parameters 368 * @params: its parameters
382 * 369 *
383 * This routine is called by sys_msgget, sys_semget() and sys_shmget() 370 * This routine is called by sys_msgget, sys_semget() and sys_shmget()
384 * when the key is not IPC_PRIVATE. 371 * when the key is not IPC_PRIVATE.
385 * It adds a new entry if the key is not found and does some permission 372 * It adds a new entry if the key is not found and does some permission
386 * / security checkings if the key is found. 373 * / security checkings if the key is found.
387 * 374 *
388 * On success, the ipc id is returned. 375 * On success, the ipc id is returned.
389 */ 376 */
390static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, 377static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
391 struct ipc_ops *ops, struct ipc_params *params) 378 struct ipc_ops *ops, struct ipc_params *params)
@@ -431,39 +418,33 @@ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
431 418
432 419
433/** 420/**
434 * ipc_rmid - remove an IPC identifier 421 * ipc_rmid - remove an ipc identifier
435 * @ids: IPC identifier set 422 * @ids: ipc identifier set
436 * @ipcp: ipc perm structure containing the identifier to remove 423 * @ipcp: ipc perm structure containing the identifier to remove
437 * 424 *
438 * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held 425 * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held
439 * before this function is called, and remain locked on the exit. 426 * before this function is called, and remain locked on the exit.
440 */ 427 */
441
442void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) 428void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
443{ 429{
444 int lid = ipcid_to_idx(ipcp->id); 430 int lid = ipcid_to_idx(ipcp->id);
445 431
446 idr_remove(&ids->ipcs_idr, lid); 432 idr_remove(&ids->ipcs_idr, lid);
447
448 ids->in_use--; 433 ids->in_use--;
449 434 ipcp->deleted = true;
450 ipcp->deleted = 1;
451
452 return;
453} 435}
454 436
455/** 437/**
456 * ipc_alloc - allocate ipc space 438 * ipc_alloc - allocate ipc space
457 * @size: size desired 439 * @size: size desired
458 * 440 *
459 * Allocate memory from the appropriate pools and return a pointer to it. 441 * Allocate memory from the appropriate pools and return a pointer to it.
460 * NULL is returned if the allocation fails 442 * NULL is returned if the allocation fails
461 */ 443 */
462
463void *ipc_alloc(int size) 444void *ipc_alloc(int size)
464{ 445{
465 void *out; 446 void *out;
466 if(size > PAGE_SIZE) 447 if (size > PAGE_SIZE)
467 out = vmalloc(size); 448 out = vmalloc(size);
468 else 449 else
469 out = kmalloc(size, GFP_KERNEL); 450 out = kmalloc(size, GFP_KERNEL);
@@ -471,28 +452,27 @@ void *ipc_alloc(int size)
471} 452}
472 453
473/** 454/**
474 * ipc_free - free ipc space 455 * ipc_free - free ipc space
475 * @ptr: pointer returned by ipc_alloc 456 * @ptr: pointer returned by ipc_alloc
476 * @size: size of block 457 * @size: size of block
477 * 458 *
478 * Free a block created with ipc_alloc(). The caller must know the size 459 * Free a block created with ipc_alloc(). The caller must know the size
479 * used in the allocation call. 460 * used in the allocation call.
480 */ 461 */
481 462void ipc_free(void *ptr, int size)
482void ipc_free(void* ptr, int size)
483{ 463{
484 if(size > PAGE_SIZE) 464 if (size > PAGE_SIZE)
485 vfree(ptr); 465 vfree(ptr);
486 else 466 else
487 kfree(ptr); 467 kfree(ptr);
488} 468}
489 469
490/** 470/**
491 * ipc_rcu_alloc - allocate ipc and rcu space 471 * ipc_rcu_alloc - allocate ipc and rcu space
492 * @size: size desired 472 * @size: size desired
493 * 473 *
494 * Allocate memory for the rcu header structure + the object. 474 * Allocate memory for the rcu header structure + the object.
495 * Returns the pointer to the object or NULL upon failure. 475 * Returns the pointer to the object or NULL upon failure.
496 */ 476 */
497void *ipc_rcu_alloc(int size) 477void *ipc_rcu_alloc(int size)
498{ 478{
@@ -534,17 +514,16 @@ void ipc_rcu_free(struct rcu_head *head)
534} 514}
535 515
536/** 516/**
537 * ipcperms - check IPC permissions 517 * ipcperms - check ipc permissions
538 * @ns: IPC namespace 518 * @ns: ipc namespace
539 * @ipcp: IPC permission set 519 * @ipcp: ipc permission set
540 * @flag: desired permission set. 520 * @flag: desired permission set
541 * 521 *
542 * Check user, group, other permissions for access 522 * Check user, group, other permissions for access
543 * to ipc resources. return 0 if allowed 523 * to ipc resources. return 0 if allowed
544 * 524 *
545 * @flag will most probably be 0 or S_...UGO from <linux/stat.h> 525 * @flag will most probably be 0 or S_...UGO from <linux/stat.h>
546 */ 526 */
547
548int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) 527int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
549{ 528{
550 kuid_t euid = current_euid(); 529 kuid_t euid = current_euid();
@@ -572,16 +551,14 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
572 */ 551 */
573 552
574/** 553/**
575 * kernel_to_ipc64_perm - convert kernel ipc permissions to user 554 * kernel_to_ipc64_perm - convert kernel ipc permissions to user
576 * @in: kernel permissions 555 * @in: kernel permissions
577 * @out: new style IPC permissions 556 * @out: new style ipc permissions
578 * 557 *
579 * Turn the kernel object @in into a set of permissions descriptions 558 * Turn the kernel object @in into a set of permissions descriptions
580 * for returning to userspace (@out). 559 * for returning to userspace (@out).
581 */ 560 */
582 561void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out)
583
584void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
585{ 562{
586 out->key = in->key; 563 out->key = in->key;
587 out->uid = from_kuid_munged(current_user_ns(), in->uid); 564 out->uid = from_kuid_munged(current_user_ns(), in->uid);
@@ -593,15 +570,14 @@ void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
593} 570}
594 571
595/** 572/**
596 * ipc64_perm_to_ipc_perm - convert new ipc permissions to old 573 * ipc64_perm_to_ipc_perm - convert new ipc permissions to old
597 * @in: new style IPC permissions 574 * @in: new style ipc permissions
598 * @out: old style IPC permissions 575 * @out: old style ipc permissions
599 * 576 *
600 * Turn the new style permissions object @in into a compatibility 577 * Turn the new style permissions object @in into a compatibility
601 * object and store it into the @out pointer. 578 * object and store it into the @out pointer.
602 */ 579 */
603 580void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out)
604void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
605{ 581{
606 out->key = in->key; 582 out->key = in->key;
607 SET_UID(out->uid, in->uid); 583 SET_UID(out->uid, in->uid);
@@ -635,8 +611,8 @@ struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
635} 611}
636 612
637/** 613/**
638 * ipc_lock - Lock an ipc structure without rwsem held 614 * ipc_lock - lock an ipc structure without rwsem held
639 * @ids: IPC identifier set 615 * @ids: ipc identifier set
640 * @id: ipc id to look for 616 * @id: ipc id to look for
641 * 617 *
642 * Look for an id in the ipc ids idr and lock the associated ipc object. 618 * Look for an id in the ipc ids idr and lock the associated ipc object.
@@ -657,7 +633,7 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id)
657 /* ipc_rmid() may have already freed the ID while ipc_lock 633 /* ipc_rmid() may have already freed the ID while ipc_lock
658 * was spinning: here verify that the structure is still valid 634 * was spinning: here verify that the structure is still valid
659 */ 635 */
660 if (!out->deleted) 636 if (ipc_valid_object(out))
661 return out; 637 return out;
662 638
663 spin_unlock(&out->lock); 639 spin_unlock(&out->lock);
@@ -693,11 +669,11 @@ out:
693 669
694/** 670/**
695 * ipcget - Common sys_*get() code 671 * ipcget - Common sys_*get() code
696 * @ns : namsepace 672 * @ns: namsepace
697 * @ids : IPC identifier set 673 * @ids: ipc identifier set
698 * @ops : operations to be called on ipc object creation, permission checks 674 * @ops: operations to be called on ipc object creation, permission checks
699 * and further checks 675 * and further checks
700 * @params : the parameters needed by the previous operations. 676 * @params: the parameters needed by the previous operations.
701 * 677 *
702 * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). 678 * Common routine called by sys_msgget(), sys_semget() and sys_shmget().
703 */ 679 */
@@ -711,7 +687,7 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
711} 687}
712 688
713/** 689/**
714 * ipc_update_perm - update the permissions of an IPC. 690 * ipc_update_perm - update the permissions of an ipc object
715 * @in: the permission given as input. 691 * @in: the permission given as input.
716 * @out: the permission of the ipc to set. 692 * @out: the permission of the ipc to set.
717 */ 693 */
@@ -732,7 +708,7 @@ int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
732 708
733/** 709/**
734 * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd 710 * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd
735 * @ns: the ipc namespace 711 * @ns: ipc namespace
736 * @ids: the table of ids where to look for the ipc 712 * @ids: the table of ids where to look for the ipc
737 * @id: the id of the ipc to retrieve 713 * @id: the id of the ipc to retrieve
738 * @cmd: the cmd to check 714 * @cmd: the cmd to check
@@ -779,15 +755,14 @@ err:
779 755
780 756
781/** 757/**
782 * ipc_parse_version - IPC call version 758 * ipc_parse_version - ipc call version
783 * @cmd: pointer to command 759 * @cmd: pointer to command
784 * 760 *
785 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. 761 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
786 * The @cmd value is turned from an encoding command and version into 762 * The @cmd value is turned from an encoding command and version into
787 * just the command code. 763 * just the command code.
788 */ 764 */
789 765int ipc_parse_version(int *cmd)
790int ipc_parse_version (int *cmd)
791{ 766{
792 if (*cmd & IPC_64) { 767 if (*cmd & IPC_64) {
793 *cmd ^= IPC_64; 768 *cmd ^= IPC_64;
@@ -824,7 +799,7 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
824 if (total >= ids->in_use) 799 if (total >= ids->in_use)
825 return NULL; 800 return NULL;
826 801
827 for ( ; pos < IPCMNI; pos++) { 802 for (; pos < IPCMNI; pos++) {
828 ipc = idr_find(&ids->ipcs_idr, pos); 803 ipc = idr_find(&ids->ipcs_idr, pos);
829 if (ipc != NULL) { 804 if (ipc != NULL) {
830 *new_pos = pos + 1; 805 *new_pos = pos + 1;
@@ -927,8 +902,10 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file)
927 goto out; 902 goto out;
928 903
929 ret = seq_open(file, &sysvipc_proc_seqops); 904 ret = seq_open(file, &sysvipc_proc_seqops);
930 if (ret) 905 if (ret) {
931 goto out_kfree; 906 kfree(iter);
907 goto out;
908 }
932 909
933 seq = file->private_data; 910 seq = file->private_data;
934 seq->private = iter; 911 seq->private = iter;
@@ -937,9 +914,6 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file)
937 iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); 914 iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
938out: 915out:
939 return ret; 916 return ret;
940out_kfree:
941 kfree(iter);
942 goto out;
943} 917}
944 918
945static int sysvipc_proc_release(struct inode *inode, struct file *file) 919static int sysvipc_proc_release(struct inode *inode, struct file *file)
diff --git a/ipc/util.h b/ipc/util.h
index 59d78aa94987..9c47d6f6c7b4 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -15,9 +15,9 @@
15 15
16#define SEQ_MULTIPLIER (IPCMNI) 16#define SEQ_MULTIPLIER (IPCMNI)
17 17
18void sem_init (void); 18void sem_init(void);
19void msg_init (void); 19void msg_init(void);
20void shm_init (void); 20void shm_init(void);
21 21
22struct ipc_namespace; 22struct ipc_namespace;
23 23
@@ -100,6 +100,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
100 100
101#define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER) 101#define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER)
102#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER) 102#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER)
103#define IPCID_SEQ_MAX min_t(int, INT_MAX/SEQ_MULTIPLIER, USHRT_MAX)
103 104
104/* must be called with ids->rwsem acquired for writing */ 105/* must be called with ids->rwsem acquired for writing */
105int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); 106int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int);
@@ -116,8 +117,8 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg);
116/* for rare, potentially huge allocations. 117/* for rare, potentially huge allocations.
117 * both function can sleep 118 * both function can sleep
118 */ 119 */
119void* ipc_alloc(int size); 120void *ipc_alloc(int size);
120void ipc_free(void* ptr, int size); 121void ipc_free(void *ptr, int size);
121 122
122/* 123/*
123 * For allocation that need to be freed by RCU. 124 * For allocation that need to be freed by RCU.
@@ -125,7 +126,7 @@ void ipc_free(void* ptr, int size);
125 * getref increases the refcount, the putref call that reduces the recount 126 * getref increases the refcount, the putref call that reduces the recount
126 * to 0 schedules the rcu destruction. Caller must guarantee locking. 127 * to 0 schedules the rcu destruction. Caller must guarantee locking.
127 */ 128 */
128void* ipc_rcu_alloc(int size); 129void *ipc_rcu_alloc(int size);
129int ipc_rcu_getref(void *ptr); 130int ipc_rcu_getref(void *ptr);
130void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); 131void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head));
131void ipc_rcu_free(struct rcu_head *head); 132void ipc_rcu_free(struct rcu_head *head);
@@ -144,7 +145,7 @@ struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
144 /* On IA-64, we always use the "64-bit version" of the IPC structures. */ 145 /* On IA-64, we always use the "64-bit version" of the IPC structures. */
145# define ipc_parse_version(cmd) IPC_64 146# define ipc_parse_version(cmd) IPC_64
146#else 147#else
147int ipc_parse_version (int *cmd); 148int ipc_parse_version(int *cmd);
148#endif 149#endif
149 150
150extern void free_msg(struct msg_msg *msg); 151extern void free_msg(struct msg_msg *msg);
@@ -185,6 +186,19 @@ static inline void ipc_unlock(struct kern_ipc_perm *perm)
185 rcu_read_unlock(); 186 rcu_read_unlock();
186} 187}
187 188
189/*
190 * ipc_valid_object() - helper to sort out IPC_RMID races for codepaths
191 * where the respective ipc_ids.rwsem is not being held down.
192 * Checks whether the ipc object is still around or if it's gone already, as
193 * ipc_rmid() may have already freed the ID while the ipc lock was spinning.
194 * Needs to be called with kern_ipc_perm.lock held -- exception made for one
195 * checkpoint case at sys_semtimedop() as noted in code commentary.
196 */
197static inline bool ipc_valid_object(struct kern_ipc_perm *perm)
198{
199 return !perm->deleted;
200}
201
188struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); 202struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id);
189int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, 203int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
190 struct ipc_ops *ops, struct ipc_params *params); 204 struct ipc_ops *ops, struct ipc_params *params);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ac738781d356..60bafbed06ab 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1537,7 +1537,7 @@ void vmcoreinfo_append_str(const char *fmt, ...)
1537 size_t r; 1537 size_t r;
1538 1538
1539 va_start(args, fmt); 1539 va_start(args, fmt);
1540 r = vsnprintf(buf, sizeof(buf), fmt, args); 1540 r = vscnprintf(buf, sizeof(buf), fmt, args);
1541 va_end(args); 1541 va_end(args);
1542 1542
1543 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size); 1543 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 8a1e6e104892..850967068aaf 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -8,6 +8,8 @@
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 */ 9 */
10 10
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
11#include <linux/export.h> 13#include <linux/export.h>
12#include <linux/kernel_stat.h> 14#include <linux/kernel_stat.h>
13#include <linux/interrupt.h> 15#include <linux/interrupt.h>
@@ -54,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
54 56
55DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 57DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
56 58
57char *softirq_to_name[NR_SOFTIRQS] = { 59const char * const softirq_to_name[NR_SOFTIRQS] = {
58 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59 "TASKLET", "SCHED", "HRTIMER", "RCU" 61 "TASKLET", "SCHED", "HRTIMER", "RCU"
60}; 62};
@@ -136,7 +138,6 @@ void _local_bh_enable(void)
136 WARN_ON_ONCE(in_irq()); 138 WARN_ON_ONCE(in_irq());
137 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET); 139 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
138} 140}
139
140EXPORT_SYMBOL(_local_bh_enable); 141EXPORT_SYMBOL(_local_bh_enable);
141 142
142void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) 143void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
@@ -153,7 +154,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
153 /* 154 /*
154 * Keep preemption disabled until we are done with 155 * Keep preemption disabled until we are done with
155 * softirq processing: 156 * softirq processing:
156 */ 157 */
157 preempt_count_sub(cnt - 1); 158 preempt_count_sub(cnt - 1);
158 159
159 if (unlikely(!in_interrupt() && local_softirq_pending())) { 160 if (unlikely(!in_interrupt() && local_softirq_pending())) {
@@ -229,6 +230,7 @@ asmlinkage void __do_softirq(void)
229 struct softirq_action *h; 230 struct softirq_action *h;
230 bool in_hardirq; 231 bool in_hardirq;
231 __u32 pending; 232 __u32 pending;
233 int softirq_bit;
232 int cpu; 234 int cpu;
233 235
234 /* 236 /*
@@ -253,30 +255,30 @@ restart:
253 255
254 h = softirq_vec; 256 h = softirq_vec;
255 257
256 do { 258 while ((softirq_bit = ffs(pending))) {
257 if (pending & 1) { 259 unsigned int vec_nr;
258 unsigned int vec_nr = h - softirq_vec; 260 int prev_count;
259 int prev_count = preempt_count();
260
261 kstat_incr_softirqs_this_cpu(vec_nr);
262
263 trace_softirq_entry(vec_nr);
264 h->action(h);
265 trace_softirq_exit(vec_nr);
266 if (unlikely(prev_count != preempt_count())) {
267 printk(KERN_ERR "huh, entered softirq %u %s %p"
268 "with preempt_count %08x,"
269 " exited with %08x?\n", vec_nr,
270 softirq_to_name[vec_nr], h->action,
271 prev_count, preempt_count());
272 preempt_count_set(prev_count);
273 }
274 261
275 rcu_bh_qs(cpu); 262 h += softirq_bit - 1;
263
264 vec_nr = h - softirq_vec;
265 prev_count = preempt_count();
266
267 kstat_incr_softirqs_this_cpu(vec_nr);
268
269 trace_softirq_entry(vec_nr);
270 h->action(h);
271 trace_softirq_exit(vec_nr);
272 if (unlikely(prev_count != preempt_count())) {
273 pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n",
274 vec_nr, softirq_to_name[vec_nr], h->action,
275 prev_count, preempt_count());
276 preempt_count_set(prev_count);
276 } 277 }
278 rcu_bh_qs(cpu);
277 h++; 279 h++;
278 pending >>= 1; 280 pending >>= softirq_bit;
279 } while (pending); 281 }
280 282
281 local_irq_disable(); 283 local_irq_disable();
282 284
@@ -433,8 +435,7 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
433/* 435/*
434 * Tasklets 436 * Tasklets
435 */ 437 */
436struct tasklet_head 438struct tasklet_head {
437{
438 struct tasklet_struct *head; 439 struct tasklet_struct *head;
439 struct tasklet_struct **tail; 440 struct tasklet_struct **tail;
440}; 441};
@@ -453,7 +454,6 @@ void __tasklet_schedule(struct tasklet_struct *t)
453 raise_softirq_irqoff(TASKLET_SOFTIRQ); 454 raise_softirq_irqoff(TASKLET_SOFTIRQ);
454 local_irq_restore(flags); 455 local_irq_restore(flags);
455} 456}
456
457EXPORT_SYMBOL(__tasklet_schedule); 457EXPORT_SYMBOL(__tasklet_schedule);
458 458
459void __tasklet_hi_schedule(struct tasklet_struct *t) 459void __tasklet_hi_schedule(struct tasklet_struct *t)
@@ -467,7 +467,6 @@ void __tasklet_hi_schedule(struct tasklet_struct *t)
467 raise_softirq_irqoff(HI_SOFTIRQ); 467 raise_softirq_irqoff(HI_SOFTIRQ);
468 local_irq_restore(flags); 468 local_irq_restore(flags);
469} 469}
470
471EXPORT_SYMBOL(__tasklet_hi_schedule); 470EXPORT_SYMBOL(__tasklet_hi_schedule);
472 471
473void __tasklet_hi_schedule_first(struct tasklet_struct *t) 472void __tasklet_hi_schedule_first(struct tasklet_struct *t)
@@ -478,7 +477,6 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
478 __this_cpu_write(tasklet_hi_vec.head, t); 477 __this_cpu_write(tasklet_hi_vec.head, t);
479 __raise_softirq_irqoff(HI_SOFTIRQ); 478 __raise_softirq_irqoff(HI_SOFTIRQ);
480} 479}
481
482EXPORT_SYMBOL(__tasklet_hi_schedule_first); 480EXPORT_SYMBOL(__tasklet_hi_schedule_first);
483 481
484static void tasklet_action(struct softirq_action *a) 482static void tasklet_action(struct softirq_action *a)
@@ -498,7 +496,8 @@ static void tasklet_action(struct softirq_action *a)
498 496
499 if (tasklet_trylock(t)) { 497 if (tasklet_trylock(t)) {
500 if (!atomic_read(&t->count)) { 498 if (!atomic_read(&t->count)) {
501 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 499 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
500 &t->state))
502 BUG(); 501 BUG();
503 t->func(t->data); 502 t->func(t->data);
504 tasklet_unlock(t); 503 tasklet_unlock(t);
@@ -533,7 +532,8 @@ static void tasklet_hi_action(struct softirq_action *a)
533 532
534 if (tasklet_trylock(t)) { 533 if (tasklet_trylock(t)) {
535 if (!atomic_read(&t->count)) { 534 if (!atomic_read(&t->count)) {
536 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) 535 if (!test_and_clear_bit(TASKLET_STATE_SCHED,
536 &t->state))
537 BUG(); 537 BUG();
538 t->func(t->data); 538 t->func(t->data);
539 tasklet_unlock(t); 539 tasklet_unlock(t);
@@ -551,7 +551,6 @@ static void tasklet_hi_action(struct softirq_action *a)
551 } 551 }
552} 552}
553 553
554
555void tasklet_init(struct tasklet_struct *t, 554void tasklet_init(struct tasklet_struct *t,
556 void (*func)(unsigned long), unsigned long data) 555 void (*func)(unsigned long), unsigned long data)
557{ 556{
@@ -561,13 +560,12 @@ void tasklet_init(struct tasklet_struct *t,
561 t->func = func; 560 t->func = func;
562 t->data = data; 561 t->data = data;
563} 562}
564
565EXPORT_SYMBOL(tasklet_init); 563EXPORT_SYMBOL(tasklet_init);
566 564
567void tasklet_kill(struct tasklet_struct *t) 565void tasklet_kill(struct tasklet_struct *t)
568{ 566{
569 if (in_interrupt()) 567 if (in_interrupt())
570 printk("Attempt to kill tasklet from interrupt\n"); 568 pr_notice("Attempt to kill tasklet from interrupt\n");
571 569
572 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { 570 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
573 do { 571 do {
@@ -577,7 +575,6 @@ void tasklet_kill(struct tasklet_struct *t)
577 tasklet_unlock_wait(t); 575 tasklet_unlock_wait(t);
578 clear_bit(TASKLET_STATE_SCHED, &t->state); 576 clear_bit(TASKLET_STATE_SCHED, &t->state);
579} 577}
580
581EXPORT_SYMBOL(tasklet_kill); 578EXPORT_SYMBOL(tasklet_kill);
582 579
583/* 580/*
@@ -727,9 +724,8 @@ static void takeover_tasklets(unsigned int cpu)
727} 724}
728#endif /* CONFIG_HOTPLUG_CPU */ 725#endif /* CONFIG_HOTPLUG_CPU */
729 726
730static int cpu_callback(struct notifier_block *nfb, 727static int cpu_callback(struct notifier_block *nfb, unsigned long action,
731 unsigned long action, 728 void *hcpu)
732 void *hcpu)
733{ 729{
734 switch (action) { 730 switch (action) {
735#ifdef CONFIG_HOTPLUG_CPU 731#ifdef CONFIG_HOTPLUG_CPU
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 600ac57e2777..7288e38e1757 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -268,14 +268,12 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
268 */ 268 */
269static inline int parse_lineno(const char *str, unsigned int *val) 269static inline int parse_lineno(const char *str, unsigned int *val)
270{ 270{
271 char *end = NULL;
272 BUG_ON(str == NULL); 271 BUG_ON(str == NULL);
273 if (*str == '\0') { 272 if (*str == '\0') {
274 *val = 0; 273 *val = 0;
275 return 0; 274 return 0;
276 } 275 }
277 *val = simple_strtoul(str, &end, 10); 276 if (kstrtouint(str, 10, val) < 0) {
278 if (end == NULL || end == str || *end != '\0') {
279 pr_err("bad line-number: %s\n", str); 277 pr_err("bad line-number: %s\n", str);
280 return -EINVAL; 278 return -EINVAL;
281 } 279 }
@@ -348,14 +346,14 @@ static int ddebug_parse_query(char *words[], int nwords,
348 } 346 }
349 if (last) 347 if (last)
350 *last++ = '\0'; 348 *last++ = '\0';
351 if (parse_lineno(first, &query->first_lineno) < 0) { 349 if (parse_lineno(first, &query->first_lineno) < 0)
352 pr_err("line-number is <0\n");
353 return -EINVAL; 350 return -EINVAL;
354 }
355 if (last) { 351 if (last) {
356 /* range <first>-<last> */ 352 /* range <first>-<last> */
357 if (parse_lineno(last, &query->last_lineno) 353 if (parse_lineno(last, &query->last_lineno) < 0)
358 < query->first_lineno) { 354 return -EINVAL;
355
356 if (query->last_lineno < query->first_lineno) {
359 pr_err("last-line:%d < 1st-line:%d\n", 357 pr_err("last-line:%d < 1st-line:%d\n",
360 query->last_lineno, 358 query->last_lineno,
361 query->first_lineno); 359 query->first_lineno);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 2e1c102759ce..b604b831f4d1 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -172,7 +172,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
172 /* 172 /*
173 * Get the overflow emergency buffer 173 * Get the overflow emergency buffer
174 */ 174 */
175 v_overflow_buffer = memblock_virt_alloc_nopanic( 175 v_overflow_buffer = memblock_virt_alloc_low_nopanic(
176 PAGE_ALIGN(io_tlb_overflow), 176 PAGE_ALIGN(io_tlb_overflow),
177 PAGE_SIZE); 177 PAGE_SIZE);
178 if (!v_overflow_buffer) 178 if (!v_overflow_buffer)
@@ -220,7 +220,7 @@ swiotlb_init(int verbose)
220 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 220 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
221 221
222 /* Get IO TLB memory from the low pages */ 222 /* Get IO TLB memory from the low pages */
223 vstart = memblock_virt_alloc_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); 223 vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
224 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) 224 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
225 return; 225 return;
226 226
diff --git a/mm/memblock.c b/mm/memblock.c
index 9c0aeef19440..87d21a6ff63c 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -984,9 +984,6 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
984 if (!align) 984 if (!align)
985 align = SMP_CACHE_BYTES; 985 align = SMP_CACHE_BYTES;
986 986
987 /* align @size to avoid excessive fragmentation on reserved array */
988 size = round_up(size, align);
989
990 found = memblock_find_in_range_node(size, align, 0, max_addr, nid); 987 found = memblock_find_in_range_node(size, align, 0, max_addr, nid);
991 if (found && !memblock_reserve(found, size)) 988 if (found && !memblock_reserve(found, size))
992 return found; 989 return found;
@@ -1080,9 +1077,6 @@ static void * __init memblock_virt_alloc_internal(
1080 if (!align) 1077 if (!align)
1081 align = SMP_CACHE_BYTES; 1078 align = SMP_CACHE_BYTES;
1082 1079
1083 /* align @size to avoid excessive fragmentation on reserved array */
1084 size = round_up(size, align);
1085
1086again: 1080again:
1087 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 1081 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1088 nid); 1082 nid);
diff --git a/mm/migrate.c b/mm/migrate.c
index 734704f6f29b..482a33d89134 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1548,8 +1548,6 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
1548 __GFP_NOMEMALLOC | __GFP_NORETRY | 1548 __GFP_NOMEMALLOC | __GFP_NORETRY |
1549 __GFP_NOWARN) & 1549 __GFP_NOWARN) &
1550 ~GFP_IOFS, 0); 1550 ~GFP_IOFS, 0);
1551 if (newpage)
1552 page_cpupid_xchg_last(newpage, page_cpupid_last(page));
1553 1551
1554 return newpage; 1552 return newpage;
1555} 1553}
diff --git a/mm/mm_init.c b/mm/mm_init.c
index 857a6434e3a5..4074caf9936b 100644
--- a/mm/mm_init.c
+++ b/mm/mm_init.c
@@ -202,4 +202,4 @@ static int __init mm_sysfs_init(void)
202 202
203 return 0; 203 return 0;
204} 204}
205pure_initcall(mm_sysfs_init); 205postcore_initcall(mm_sysfs_init);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e4f0db2a3eae..0fdf96803c5b 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -220,12 +220,12 @@ int is_vmalloc_or_module_addr(const void *x)
220} 220}
221 221
222/* 222/*
223 * Walk a vmap address to the physical pfn it maps to. 223 * Walk a vmap address to the struct page it maps.
224 */ 224 */
225unsigned long vmalloc_to_pfn(const void *vmalloc_addr) 225struct page *vmalloc_to_page(const void *vmalloc_addr)
226{ 226{
227 unsigned long addr = (unsigned long) vmalloc_addr; 227 unsigned long addr = (unsigned long) vmalloc_addr;
228 unsigned long pfn = 0; 228 struct page *page = NULL;
229 pgd_t *pgd = pgd_offset_k(addr); 229 pgd_t *pgd = pgd_offset_k(addr);
230 230
231 /* 231 /*
@@ -244,23 +244,23 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
244 ptep = pte_offset_map(pmd, addr); 244 ptep = pte_offset_map(pmd, addr);
245 pte = *ptep; 245 pte = *ptep;
246 if (pte_present(pte)) 246 if (pte_present(pte))
247 pfn = pte_pfn(pte); 247 page = pte_page(pte);
248 pte_unmap(ptep); 248 pte_unmap(ptep);
249 } 249 }
250 } 250 }
251 } 251 }
252 return pfn; 252 return page;
253} 253}
254EXPORT_SYMBOL(vmalloc_to_pfn); 254EXPORT_SYMBOL(vmalloc_to_page);
255 255
256/* 256/*
257 * Map a vmalloc()-space virtual address to the struct page. 257 * Map a vmalloc()-space virtual address to the physical page frame number.
258 */ 258 */
259struct page *vmalloc_to_page(const void *vmalloc_addr) 259unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
260{ 260{
261 return pfn_to_page(vmalloc_to_pfn(vmalloc_addr)); 261 return page_to_pfn(vmalloc_to_page(vmalloc_addr));
262} 262}
263EXPORT_SYMBOL(vmalloc_to_page); 263EXPORT_SYMBOL(vmalloc_to_pfn);
264 264
265 265
266/*** Global kva allocator ***/ 266/*** Global kva allocator ***/
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 1dbd6d1cd1b5..0ea2a1e24ade 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2665,6 +2665,15 @@ sub process {
2665 $herecurr); 2665 $herecurr);
2666 } 2666 }
2667 2667
2668# check for function declarations without arguments like "int foo()"
2669 if ($line =~ /(\b$Type\s+$Ident)\s*\(\s*\)/) {
2670 if (ERROR("FUNCTION_WITHOUT_ARGS",
2671 "Bad function definition - $1() should probably be $1(void)\n" . $herecurr) &&
2672 $fix) {
2673 $fixed[$linenr - 1] =~ s/(\b($Type)\s+($Ident))\s*\(\s*\)/$2 $3(void)/;
2674 }
2675 }
2676
2668# check for uses of DEFINE_PCI_DEVICE_TABLE 2677# check for uses of DEFINE_PCI_DEVICE_TABLE
2669 if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) { 2678 if ($line =~ /\bDEFINE_PCI_DEVICE_TABLE\s*\(\s*(\w+)\s*\)\s*=/) {
2670 if (WARN("DEFINE_PCI_DEVICE_TABLE", 2679 if (WARN("DEFINE_PCI_DEVICE_TABLE",