diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-31 21:10:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-31 21:10:18 -0400 |
commit | 08615d7d85e5aa02c05bf6c4dde87d940e7f85f6 (patch) | |
tree | 18906149d313d25914160aca21cedf54b3a7e818 /arch | |
parent | 9fdadb2cbaf4b482dfd6086e8bd3d2db071a1702 (diff) | |
parent | 0a4dd35c67b144d8ef9432120105f1aab9293ee9 (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge misc patches from Andrew Morton:
- the "misc" tree - stuff from all over the map
- checkpatch updates
- fatfs
- kmod changes
- procfs
- cpumask
- UML
- kexec
- mqueue
- rapidio
- pidns
- some checkpoint-restore feature work. Reluctantly. Most of it
delayed a release. I'm still rather worried that we don't have a
clear roadmap to completion for this work.
* emailed from Andrew Morton <akpm@linux-foundation.org>: (78 patches)
kconfig: update compression algorithm info
c/r: prctl: add ability to set new mm_struct::exe_file
c/r: prctl: extend PR_SET_MM to set up more mm_struct entries
c/r: procfs: add arg_start/end, env_start/end and exit_code members to /proc/$pid/stat
syscalls, x86: add __NR_kcmp syscall
fs, proc: introduce /proc/<pid>/task/<tid>/children entry
sysctl: make kernel.ns_last_pid control dependent on CHECKPOINT_RESTORE
aio/vfs: cleanup of rw_copy_check_uvector() and compat_rw_copy_check_uvector()
eventfd: change int to __u64 in eventfd_signal()
fs/nls: add Apple NLS
pidns: make killed children autoreap
pidns: use task_active_pid_ns in do_notify_parent
rapidio/tsi721: add DMA engine support
rapidio: add DMA engine support for RIO data transfers
ipc/mqueue: add rbtree node caching support
tools/selftests: add mq_perf_tests
ipc/mqueue: strengthen checks on mqueue creation
ipc/mqueue: correct mq_attr_ok test
ipc/mqueue: improve performance of send/recv
selftests: add mq_open_tests
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/smp.c | 8 | ||||
-rw-r--r-- | arch/blackfin/kernel/trace.c | 32 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_nohash.c | 11 | ||||
-rw-r--r-- | arch/sh/kernel/smp.c | 7 | ||||
-rw-r--r-- | arch/um/kernel/reboot.c | 13 | ||||
-rw-r--r-- | arch/um/kernel/trap.c | 24 | ||||
-rw-r--r-- | arch/x86/syscalls/syscall_32.tbl | 1 | ||||
-rw-r--r-- | arch/x86/syscalls/syscall_64.tbl | 2 |
8 files changed, 52 insertions, 46 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index b735521a4a54..2c7217d971db 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -109,7 +109,6 @@ static void percpu_timer_stop(void); | |||
109 | int __cpu_disable(void) | 109 | int __cpu_disable(void) |
110 | { | 110 | { |
111 | unsigned int cpu = smp_processor_id(); | 111 | unsigned int cpu = smp_processor_id(); |
112 | struct task_struct *p; | ||
113 | int ret; | 112 | int ret; |
114 | 113 | ||
115 | ret = platform_cpu_disable(cpu); | 114 | ret = platform_cpu_disable(cpu); |
@@ -139,12 +138,7 @@ int __cpu_disable(void) | |||
139 | flush_cache_all(); | 138 | flush_cache_all(); |
140 | local_flush_tlb_all(); | 139 | local_flush_tlb_all(); |
141 | 140 | ||
142 | read_lock(&tasklist_lock); | 141 | clear_tasks_mm_cpumask(cpu); |
143 | for_each_process(p) { | ||
144 | if (p->mm) | ||
145 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | ||
146 | } | ||
147 | read_unlock(&tasklist_lock); | ||
148 | 142 | ||
149 | return 0; | 143 | return 0; |
150 | } | 144 | } |
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index 44bbf2f564cb..f7f7a18abca9 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c | |||
@@ -10,6 +10,8 @@ | |||
10 | #include <linux/hardirq.h> | 10 | #include <linux/hardirq.h> |
11 | #include <linux/thread_info.h> | 11 | #include <linux/thread_info.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/oom.h> | ||
14 | #include <linux/sched.h> | ||
13 | #include <linux/uaccess.h> | 15 | #include <linux/uaccess.h> |
14 | #include <linux/module.h> | 16 | #include <linux/module.h> |
15 | #include <linux/kallsyms.h> | 17 | #include <linux/kallsyms.h> |
@@ -27,8 +29,7 @@ void decode_address(char *buf, unsigned long address) | |||
27 | { | 29 | { |
28 | struct task_struct *p; | 30 | struct task_struct *p; |
29 | struct mm_struct *mm; | 31 | struct mm_struct *mm; |
30 | unsigned long flags, offset; | 32 | unsigned long offset; |
31 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | ||
32 | struct rb_node *n; | 33 | struct rb_node *n; |
33 | 34 | ||
34 | #ifdef CONFIG_KALLSYMS | 35 | #ifdef CONFIG_KALLSYMS |
@@ -112,17 +113,17 @@ void decode_address(char *buf, unsigned long address) | |||
112 | * mappings of all our processes and see if we can't be a whee | 113 | * mappings of all our processes and see if we can't be a whee |
113 | * bit more specific | 114 | * bit more specific |
114 | */ | 115 | */ |
115 | write_lock_irqsave(&tasklist_lock, flags); | 116 | read_lock(&tasklist_lock); |
116 | for_each_process(p) { | 117 | for_each_process(p) { |
117 | mm = (in_atomic ? p->mm : get_task_mm(p)); | 118 | struct task_struct *t; |
118 | if (!mm) | ||
119 | continue; | ||
120 | 119 | ||
121 | if (!down_read_trylock(&mm->mmap_sem)) { | 120 | t = find_lock_task_mm(p); |
122 | if (!in_atomic) | 121 | if (!t) |
123 | mmput(mm); | ||
124 | continue; | 122 | continue; |
125 | } | 123 | |
124 | mm = t->mm; | ||
125 | if (!down_read_trylock(&mm->mmap_sem)) | ||
126 | goto __continue; | ||
126 | 127 | ||
127 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | 128 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { |
128 | struct vm_area_struct *vma; | 129 | struct vm_area_struct *vma; |
@@ -131,7 +132,7 @@ void decode_address(char *buf, unsigned long address) | |||
131 | 132 | ||
132 | if (address >= vma->vm_start && address < vma->vm_end) { | 133 | if (address >= vma->vm_start && address < vma->vm_end) { |
133 | char _tmpbuf[256]; | 134 | char _tmpbuf[256]; |
134 | char *name = p->comm; | 135 | char *name = t->comm; |
135 | struct file *file = vma->vm_file; | 136 | struct file *file = vma->vm_file; |
136 | 137 | ||
137 | if (file) { | 138 | if (file) { |
@@ -164,8 +165,7 @@ void decode_address(char *buf, unsigned long address) | |||
164 | name, vma->vm_start, vma->vm_end); | 165 | name, vma->vm_start, vma->vm_end); |
165 | 166 | ||
166 | up_read(&mm->mmap_sem); | 167 | up_read(&mm->mmap_sem); |
167 | if (!in_atomic) | 168 | task_unlock(t); |
168 | mmput(mm); | ||
169 | 169 | ||
170 | if (buf[0] == '\0') | 170 | if (buf[0] == '\0') |
171 | sprintf(buf, "[ %s ] dynamic memory", name); | 171 | sprintf(buf, "[ %s ] dynamic memory", name); |
@@ -175,8 +175,8 @@ void decode_address(char *buf, unsigned long address) | |||
175 | } | 175 | } |
176 | 176 | ||
177 | up_read(&mm->mmap_sem); | 177 | up_read(&mm->mmap_sem); |
178 | if (!in_atomic) | 178 | __continue: |
179 | mmput(mm); | 179 | task_unlock(t); |
180 | } | 180 | } |
181 | 181 | ||
182 | /* | 182 | /* |
@@ -186,7 +186,7 @@ void decode_address(char *buf, unsigned long address) | |||
186 | sprintf(buf, "/* kernel dynamic memory */"); | 186 | sprintf(buf, "/* kernel dynamic memory */"); |
187 | 187 | ||
188 | done: | 188 | done: |
189 | write_unlock_irqrestore(&tasklist_lock, flags); | 189 | read_unlock(&tasklist_lock); |
190 | } | 190 | } |
191 | 191 | ||
192 | #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) | 192 | #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) |
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index 5b63bd3da4a9..e779642c25e5 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c | |||
@@ -333,9 +333,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, | |||
333 | unsigned long action, void *hcpu) | 333 | unsigned long action, void *hcpu) |
334 | { | 334 | { |
335 | unsigned int cpu = (unsigned int)(long)hcpu; | 335 | unsigned int cpu = (unsigned int)(long)hcpu; |
336 | #ifdef CONFIG_HOTPLUG_CPU | 336 | |
337 | struct task_struct *p; | ||
338 | #endif | ||
339 | /* We don't touch CPU 0 map, it's allocated at aboot and kept | 337 | /* We don't touch CPU 0 map, it's allocated at aboot and kept |
340 | * around forever | 338 | * around forever |
341 | */ | 339 | */ |
@@ -358,12 +356,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, | |||
358 | stale_map[cpu] = NULL; | 356 | stale_map[cpu] = NULL; |
359 | 357 | ||
360 | /* We also clear the cpu_vm_mask bits of CPUs going away */ | 358 | /* We also clear the cpu_vm_mask bits of CPUs going away */ |
361 | read_lock(&tasklist_lock); | 359 | clear_tasks_mm_cpumask(cpu); |
362 | for_each_process(p) { | ||
363 | if (p->mm) | ||
364 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | ||
365 | } | ||
366 | read_unlock(&tasklist_lock); | ||
367 | break; | 360 | break; |
368 | #endif /* CONFIG_HOTPLUG_CPU */ | 361 | #endif /* CONFIG_HOTPLUG_CPU */ |
369 | } | 362 | } |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index b86e9ca79455..2062aa88af41 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -123,7 +123,6 @@ void native_play_dead(void) | |||
123 | int __cpu_disable(void) | 123 | int __cpu_disable(void) |
124 | { | 124 | { |
125 | unsigned int cpu = smp_processor_id(); | 125 | unsigned int cpu = smp_processor_id(); |
126 | struct task_struct *p; | ||
127 | int ret; | 126 | int ret; |
128 | 127 | ||
129 | ret = mp_ops->cpu_disable(cpu); | 128 | ret = mp_ops->cpu_disable(cpu); |
@@ -153,11 +152,7 @@ int __cpu_disable(void) | |||
153 | flush_cache_all(); | 152 | flush_cache_all(); |
154 | local_flush_tlb_all(); | 153 | local_flush_tlb_all(); |
155 | 154 | ||
156 | read_lock(&tasklist_lock); | 155 | clear_tasks_mm_cpumask(cpu); |
157 | for_each_process(p) | ||
158 | if (p->mm) | ||
159 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); | ||
160 | read_unlock(&tasklist_lock); | ||
161 | 156 | ||
162 | return 0; | 157 | return 0; |
163 | } | 158 | } |
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c index 4d93dff6b371..3d15243ce692 100644 --- a/arch/um/kernel/reboot.c +++ b/arch/um/kernel/reboot.c | |||
@@ -4,7 +4,9 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include "linux/sched.h" | 6 | #include "linux/sched.h" |
7 | #include "linux/spinlock.h" | ||
7 | #include "linux/slab.h" | 8 | #include "linux/slab.h" |
9 | #include "linux/oom.h" | ||
8 | #include "kern_util.h" | 10 | #include "kern_util.h" |
9 | #include "os.h" | 11 | #include "os.h" |
10 | #include "skas.h" | 12 | #include "skas.h" |
@@ -22,13 +24,18 @@ static void kill_off_processes(void) | |||
22 | struct task_struct *p; | 24 | struct task_struct *p; |
23 | int pid; | 25 | int pid; |
24 | 26 | ||
27 | read_lock(&tasklist_lock); | ||
25 | for_each_process(p) { | 28 | for_each_process(p) { |
26 | if (p->mm == NULL) | 29 | struct task_struct *t; |
27 | continue; | ||
28 | 30 | ||
29 | pid = p->mm->context.id.u.pid; | 31 | t = find_lock_task_mm(p); |
32 | if (!t) | ||
33 | continue; | ||
34 | pid = t->mm->context.id.u.pid; | ||
35 | task_unlock(t); | ||
30 | os_kill_ptraced_process(pid, 1); | 36 | os_kill_ptraced_process(pid, 1); |
31 | } | 37 | } |
38 | read_unlock(&tasklist_lock); | ||
32 | } | 39 | } |
33 | } | 40 | } |
34 | 41 | ||
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index dafc94715950..3be60765c0e2 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c | |||
@@ -30,6 +30,8 @@ int handle_page_fault(unsigned long address, unsigned long ip, | |||
30 | pmd_t *pmd; | 30 | pmd_t *pmd; |
31 | pte_t *pte; | 31 | pte_t *pte; |
32 | int err = -EFAULT; | 32 | int err = -EFAULT; |
33 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
34 | (is_write ? FAULT_FLAG_WRITE : 0); | ||
33 | 35 | ||
34 | *code_out = SEGV_MAPERR; | 36 | *code_out = SEGV_MAPERR; |
35 | 37 | ||
@@ -40,6 +42,7 @@ int handle_page_fault(unsigned long address, unsigned long ip, | |||
40 | if (in_atomic()) | 42 | if (in_atomic()) |
41 | goto out_nosemaphore; | 43 | goto out_nosemaphore; |
42 | 44 | ||
45 | retry: | ||
43 | down_read(&mm->mmap_sem); | 46 | down_read(&mm->mmap_sem); |
44 | vma = find_vma(mm, address); | 47 | vma = find_vma(mm, address); |
45 | if (!vma) | 48 | if (!vma) |
@@ -65,7 +68,11 @@ good_area: | |||
65 | do { | 68 | do { |
66 | int fault; | 69 | int fault; |
67 | 70 | ||
68 | fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); | 71 | fault = handle_mm_fault(mm, vma, address, flags); |
72 | |||
73 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
74 | goto out_nosemaphore; | ||
75 | |||
69 | if (unlikely(fault & VM_FAULT_ERROR)) { | 76 | if (unlikely(fault & VM_FAULT_ERROR)) { |
70 | if (fault & VM_FAULT_OOM) { | 77 | if (fault & VM_FAULT_OOM) { |
71 | goto out_of_memory; | 78 | goto out_of_memory; |
@@ -75,10 +82,17 @@ good_area: | |||
75 | } | 82 | } |
76 | BUG(); | 83 | BUG(); |
77 | } | 84 | } |
78 | if (fault & VM_FAULT_MAJOR) | 85 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
79 | current->maj_flt++; | 86 | if (fault & VM_FAULT_MAJOR) |
80 | else | 87 | current->maj_flt++; |
81 | current->min_flt++; | 88 | else |
89 | current->min_flt++; | ||
90 | if (fault & VM_FAULT_RETRY) { | ||
91 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
92 | |||
93 | goto retry; | ||
94 | } | ||
95 | } | ||
82 | 96 | ||
83 | pgd = pgd_offset(mm, address); | 97 | pgd = pgd_offset(mm, address); |
84 | pud = pud_offset(pgd, address); | 98 | pud = pud_offset(pgd, address); |
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl index 29f9f0554f7d..7a35a6e71d44 100644 --- a/arch/x86/syscalls/syscall_32.tbl +++ b/arch/x86/syscalls/syscall_32.tbl | |||
@@ -355,3 +355,4 @@ | |||
355 | 346 i386 setns sys_setns | 355 | 346 i386 setns sys_setns |
356 | 347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv | 356 | 347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv |
357 | 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev | 357 | 348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev |
358 | 349 i386 kcmp sys_kcmp | ||
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl index dd29a9ea27c5..51171aeff0dc 100644 --- a/arch/x86/syscalls/syscall_64.tbl +++ b/arch/x86/syscalls/syscall_64.tbl | |||
@@ -318,6 +318,8 @@ | |||
318 | 309 common getcpu sys_getcpu | 318 | 309 common getcpu sys_getcpu |
319 | 310 64 process_vm_readv sys_process_vm_readv | 319 | 310 64 process_vm_readv sys_process_vm_readv |
320 | 311 64 process_vm_writev sys_process_vm_writev | 320 | 311 64 process_vm_writev sys_process_vm_writev |
321 | 312 64 kcmp sys_kcmp | ||
322 | |||
321 | # | 323 | # |
322 | # x32-specific system call numbers start at 512 to avoid cache impact | 324 | # x32-specific system call numbers start at 512 to avoid cache impact |
323 | # for native 64-bit operation. | 325 | # for native 64-bit operation. |