aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 16:00:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 16:00:36 -0500
commit78a45c6f067824cf5d0a9fedea7339ac2e28603c (patch)
treeb4f78c8b6b9059ddace0a18c11629b8d2045f793 /kernel
parentf96fe225677b3efb74346ebd56fafe3997b02afa (diff)
parent29d293b6007b91a4463f05bc8d0b26e0e65c5816 (diff)
Merge branch 'akpm' (second patch-bomb from Andrew)
Merge second patchbomb from Andrew Morton: - the rest of MM - misc fs fixes - add execveat() syscall - new ratelimit feature for fault-injection - decompressor updates - ipc/ updates - fallocate feature creep - fsnotify cleanups - a few other misc things * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (99 commits) cgroups: Documentation: fix trivial typos and wrong paragraph numberings parisc: percpu: update comments referring to __get_cpu_var percpu: update local_ops.txt to reflect this_cpu operations percpu: remove __get_cpu_var and __raw_get_cpu_var macros fsnotify: remove destroy_list from fsnotify_mark fsnotify: unify inode and mount marks handling fallocate: create FAN_MODIFY and IN_MODIFY events mm/cma: make kmemleak ignore CMA regions slub: fix cpuset check in get_any_partial slab: fix cpuset check in fallback_alloc shmdt: use i_size_read() instead of ->i_size ipc/shm.c: fix overly aggressive shmdt() when calls span multiple segments ipc/msg: increase MSGMNI, remove scaling ipc/sem.c: increase SEMMSL, SEMMNI, SEMOPM ipc/sem.c: change memory barrier in sem_lock() to smp_rmb() lib/decompress.c: consistency of compress formats for kernel image decompress_bunzip2: off by one in get_next_block() usr/Kconfig: make initrd compression algorithm selection not expert fault-inject: add ratelimit option ratelimit: add initialization macro ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c16
-rw-r--r--kernel/events/uprobes.c6
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/gcov/Kconfig5
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/stacktrace.c32
-rw-r--r--kernel/sys_ni.c3
7 files changed, 53 insertions, 15 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 80f29e015570..2e0c97427b33 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -174,9 +174,9 @@ static void insert_hash(struct audit_chunk *chunk)
174 struct fsnotify_mark *entry = &chunk->mark; 174 struct fsnotify_mark *entry = &chunk->mark;
175 struct list_head *list; 175 struct list_head *list;
176 176
177 if (!entry->i.inode) 177 if (!entry->inode)
178 return; 178 return;
179 list = chunk_hash(entry->i.inode); 179 list = chunk_hash(entry->inode);
180 list_add_rcu(&chunk->hash, list); 180 list_add_rcu(&chunk->hash, list);
181} 181}
182 182
@@ -188,7 +188,7 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
188 188
189 list_for_each_entry_rcu(p, list, hash) { 189 list_for_each_entry_rcu(p, list, hash) {
190 /* mark.inode may have gone NULL, but who cares? */ 190 /* mark.inode may have gone NULL, but who cares? */
191 if (p->mark.i.inode == inode) { 191 if (p->mark.inode == inode) {
192 atomic_long_inc(&p->refs); 192 atomic_long_inc(&p->refs);
193 return p; 193 return p;
194 } 194 }
@@ -231,7 +231,7 @@ static void untag_chunk(struct node *p)
231 new = alloc_chunk(size); 231 new = alloc_chunk(size);
232 232
233 spin_lock(&entry->lock); 233 spin_lock(&entry->lock);
234 if (chunk->dead || !entry->i.inode) { 234 if (chunk->dead || !entry->inode) {
235 spin_unlock(&entry->lock); 235 spin_unlock(&entry->lock);
236 if (new) 236 if (new)
237 free_chunk(new); 237 free_chunk(new);
@@ -258,7 +258,7 @@ static void untag_chunk(struct node *p)
258 goto Fallback; 258 goto Fallback;
259 259
260 fsnotify_duplicate_mark(&new->mark, entry); 260 fsnotify_duplicate_mark(&new->mark, entry);
261 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { 261 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
262 fsnotify_put_mark(&new->mark); 262 fsnotify_put_mark(&new->mark);
263 goto Fallback; 263 goto Fallback;
264 } 264 }
@@ -386,7 +386,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
386 chunk_entry = &chunk->mark; 386 chunk_entry = &chunk->mark;
387 387
388 spin_lock(&old_entry->lock); 388 spin_lock(&old_entry->lock);
389 if (!old_entry->i.inode) { 389 if (!old_entry->inode) {
390 /* old_entry is being shot, lets just lie */ 390 /* old_entry is being shot, lets just lie */
391 spin_unlock(&old_entry->lock); 391 spin_unlock(&old_entry->lock);
392 fsnotify_put_mark(old_entry); 392 fsnotify_put_mark(old_entry);
@@ -395,7 +395,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
395 } 395 }
396 396
397 fsnotify_duplicate_mark(chunk_entry, old_entry); 397 fsnotify_duplicate_mark(chunk_entry, old_entry);
398 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { 398 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
399 spin_unlock(&old_entry->lock); 399 spin_unlock(&old_entry->lock);
400 fsnotify_put_mark(chunk_entry); 400 fsnotify_put_mark(chunk_entry);
401 fsnotify_put_mark(old_entry); 401 fsnotify_put_mark(old_entry);
@@ -611,7 +611,7 @@ void audit_trim_trees(void)
611 list_for_each_entry(node, &tree->chunks, list) { 611 list_for_each_entry(node, &tree->chunks, list) {
612 struct audit_chunk *chunk = find_chunk(node); 612 struct audit_chunk *chunk = find_chunk(node);
613 /* this could be NULL if the watch is dying else where... */ 613 /* this could be NULL if the watch is dying else where... */
614 struct inode *inode = chunk->mark.i.inode; 614 struct inode *inode = chunk->mark.inode;
615 node->index |= 1U<<31; 615 node->index |= 1U<<31;
616 if (iterate_mounts(compare_root, inode, root_mnt)) 616 if (iterate_mounts(compare_root, inode, root_mnt))
617 node->index &= ~(1U<<31); 617 node->index &= ~(1U<<31);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index ed8f2cde34c5..995a95f61a19 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -724,14 +724,14 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
724 int more = 0; 724 int more = 0;
725 725
726 again: 726 again:
727 mutex_lock(&mapping->i_mmap_mutex); 727 i_mmap_lock_read(mapping);
728 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 728 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
729 if (!valid_vma(vma, is_register)) 729 if (!valid_vma(vma, is_register))
730 continue; 730 continue;
731 731
732 if (!prev && !more) { 732 if (!prev && !more) {
733 /* 733 /*
734 * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through 734 * Needs GFP_NOWAIT to avoid i_mmap_rwsem recursion through
735 * reclaim. This is optimistic, no harm done if it fails. 735 * reclaim. This is optimistic, no harm done if it fails.
736 */ 736 */
737 prev = kmalloc(sizeof(struct map_info), 737 prev = kmalloc(sizeof(struct map_info),
@@ -755,7 +755,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
755 info->mm = vma->vm_mm; 755 info->mm = vma->vm_mm;
756 info->vaddr = offset_to_vaddr(vma, offset); 756 info->vaddr = offset_to_vaddr(vma, offset);
757 } 757 }
758 mutex_unlock(&mapping->i_mmap_mutex); 758 i_mmap_unlock_read(mapping);
759 759
760 if (!more) 760 if (!more)
761 goto out; 761 goto out;
diff --git a/kernel/fork.c b/kernel/fork.c
index 9ca84189cfc2..4dc2ddade9f1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -433,7 +433,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
433 get_file(file); 433 get_file(file);
434 if (tmp->vm_flags & VM_DENYWRITE) 434 if (tmp->vm_flags & VM_DENYWRITE)
435 atomic_dec(&inode->i_writecount); 435 atomic_dec(&inode->i_writecount);
436 mutex_lock(&mapping->i_mmap_mutex); 436 i_mmap_lock_write(mapping);
437 if (tmp->vm_flags & VM_SHARED) 437 if (tmp->vm_flags & VM_SHARED)
438 atomic_inc(&mapping->i_mmap_writable); 438 atomic_inc(&mapping->i_mmap_writable);
439 flush_dcache_mmap_lock(mapping); 439 flush_dcache_mmap_lock(mapping);
@@ -445,7 +445,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
445 vma_interval_tree_insert_after(tmp, mpnt, 445 vma_interval_tree_insert_after(tmp, mpnt,
446 &mapping->i_mmap); 446 &mapping->i_mmap);
447 flush_dcache_mmap_unlock(mapping); 447 flush_dcache_mmap_unlock(mapping);
448 mutex_unlock(&mapping->i_mmap_mutex); 448 i_mmap_unlock_write(mapping);
449 } 449 }
450 450
451 /* 451 /*
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 3b7408759bdf..c92e44855ddd 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -32,10 +32,13 @@ config GCOV_KERNEL
32 Note that the debugfs filesystem has to be mounted to access 32 Note that the debugfs filesystem has to be mounted to access
33 profiling data. 33 profiling data.
34 34
35config ARCH_HAS_GCOV_PROFILE_ALL
36 def_bool n
37
35config GCOV_PROFILE_ALL 38config GCOV_PROFILE_ALL
36 bool "Profile entire Kernel" 39 bool "Profile entire Kernel"
37 depends on GCOV_KERNEL 40 depends on GCOV_KERNEL
38 depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM || ARM64 41 depends on ARCH_HAS_GCOV_PROFILE_ALL
39 default n 42 default n
40 ---help--- 43 ---help---
41 This options activates profiling for the entire kernel. 44 This options activates profiling for the entire kernel.
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 2abf9f6e9a61..9a8a01abbaed 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -600,7 +600,7 @@ kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
600 if (!kexec_on_panic) { 600 if (!kexec_on_panic) {
601 image->swap_page = kimage_alloc_control_pages(image, 0); 601 image->swap_page = kimage_alloc_control_pages(image, 0);
602 if (!image->swap_page) { 602 if (!image->swap_page) {
603 pr_err(KERN_ERR "Could not allocate swap buffer\n"); 603 pr_err("Could not allocate swap buffer\n");
604 goto out_free_control_pages; 604 goto out_free_control_pages;
605 } 605 }
606 } 606 }
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index 00fe55cc5a82..b6e4c16377c7 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -25,6 +25,38 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
25} 25}
26EXPORT_SYMBOL_GPL(print_stack_trace); 26EXPORT_SYMBOL_GPL(print_stack_trace);
27 27
28int snprint_stack_trace(char *buf, size_t size,
29 struct stack_trace *trace, int spaces)
30{
31 int i;
32 unsigned long ip;
33 int generated;
34 int total = 0;
35
36 if (WARN_ON(!trace->entries))
37 return 0;
38
39 for (i = 0; i < trace->nr_entries; i++) {
40 ip = trace->entries[i];
41 generated = snprintf(buf, size, "%*c[<%p>] %pS\n",
42 1 + spaces, ' ', (void *) ip, (void *) ip);
43
44 total += generated;
45
46 /* Assume that generated isn't a negative number */
47 if (generated >= size) {
48 buf += size;
49 size = 0;
50 } else {
51 buf += generated;
52 size -= generated;
53 }
54 }
55
56 return total;
57}
58EXPORT_SYMBOL_GPL(snprint_stack_trace);
59
28/* 60/*
29 * Architectures that do not implement save_stack_trace_tsk or 61 * Architectures that do not implement save_stack_trace_tsk or
30 * save_stack_trace_regs get this weak alias and a once-per-bootup warning 62 * save_stack_trace_regs get this weak alias and a once-per-bootup warning
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 61eea02b53f5..5adcb0ae3a58 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -226,3 +226,6 @@ cond_syscall(sys_seccomp);
226 226
227/* access BPF programs and maps */ 227/* access BPF programs and maps */
228cond_syscall(sys_bpf); 228cond_syscall(sys_bpf);
229
230/* execveat */
231cond_syscall(sys_execveat);