aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 22:05:45 -0500
commitdf32e43a54d04eda35d2859beaf90e3864d53288 (patch)
tree7a61cf658b2949bd426285eb9902be7758ced1ba /kernel
parentfbd918a2026d0464ce9c23f57b7de4bcfccdc2e6 (diff)
parent78d5506e82b21a1a1de68c24182db2c2fe521422 (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge first patch-bomb from Andrew Morton: - a couple of misc things - inotify/fsnotify work from Jan - ocfs2 updates (partial) - about half of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (117 commits) mm/migrate: remove unused function, fail_migrate_page() mm/migrate: remove putback_lru_pages, fix comment on putback_movable_pages mm/migrate: correct failure handling if !hugepage_migration_support() mm/migrate: add comment about permanent failure path mm, page_alloc: warn for non-blockable __GFP_NOFAIL allocation failure mm: compaction: reset scanner positions immediately when they meet mm: compaction: do not mark unmovable pageblocks as skipped in async compaction mm: compaction: detect when scanners meet in isolate_freepages mm: compaction: reset cached scanner pfn's before reading them mm: compaction: encapsulate defer reset logic mm: compaction: trace compaction begin and end memcg, oom: lock mem_cgroup_print_oom_info sched: add tracepoints related to NUMA task migration mm: numa: do not automatically migrate KSM pages mm: numa: trace tasks that fail migration due to rate limiting mm: numa: limit scope of lock for NUMA migrate rate limiting mm: numa: make NUMA-migrate related functions static lib/show_mem.c: show num_poisoned_pages when oom mm/hwpoison: add '#' to hwpoison_inject mm/memblock: use WARN_ONCE when MAX_NUMNODES passed as input parameter ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c20
-rw-r--r--kernel/audit_watch.c24
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c7
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/printk/printk.c10
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/sysctl.c11
9 files changed, 38 insertions, 45 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 43c307dc9453..67ccf0e7cca9 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -912,12 +912,13 @@ static void evict_chunk(struct audit_chunk *chunk)
912} 912}
913 913
914static int audit_tree_handle_event(struct fsnotify_group *group, 914static int audit_tree_handle_event(struct fsnotify_group *group,
915 struct inode *to_tell,
915 struct fsnotify_mark *inode_mark, 916 struct fsnotify_mark *inode_mark,
916 struct fsnotify_mark *vfsmonut_mark, 917 struct fsnotify_mark *vfsmount_mark,
917 struct fsnotify_event *event) 918 u32 mask, void *data, int data_type,
919 const unsigned char *file_name)
918{ 920{
919 BUG(); 921 return 0;
920 return -EOPNOTSUPP;
921} 922}
922 923
923static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) 924static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
@@ -933,19 +934,8 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify
933 BUG_ON(atomic_read(&entry->refcnt) < 1); 934 BUG_ON(atomic_read(&entry->refcnt) < 1);
934} 935}
935 936
936static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
937 struct fsnotify_mark *inode_mark,
938 struct fsnotify_mark *vfsmount_mark,
939 __u32 mask, void *data, int data_type)
940{
941 return false;
942}
943
944static const struct fsnotify_ops audit_tree_ops = { 937static const struct fsnotify_ops audit_tree_ops = {
945 .handle_event = audit_tree_handle_event, 938 .handle_event = audit_tree_handle_event,
946 .should_send_event = audit_tree_send_event,
947 .free_group_priv = NULL,
948 .free_event_priv = NULL,
949 .freeing_mark = audit_tree_freeing_mark, 939 .freeing_mark = audit_tree_freeing_mark,
950}; 940};
951 941
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 22831c4d369c..2596fac5dcb4 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -465,35 +465,27 @@ void audit_remove_watch_rule(struct audit_krule *krule)
465 } 465 }
466} 466}
467 467
468static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode,
469 struct fsnotify_mark *inode_mark,
470 struct fsnotify_mark *vfsmount_mark,
471 __u32 mask, void *data, int data_type)
472{
473 return true;
474}
475
476/* Update watch data in audit rules based on fsnotify events. */ 468/* Update watch data in audit rules based on fsnotify events. */
477static int audit_watch_handle_event(struct fsnotify_group *group, 469static int audit_watch_handle_event(struct fsnotify_group *group,
470 struct inode *to_tell,
478 struct fsnotify_mark *inode_mark, 471 struct fsnotify_mark *inode_mark,
479 struct fsnotify_mark *vfsmount_mark, 472 struct fsnotify_mark *vfsmount_mark,
480 struct fsnotify_event *event) 473 u32 mask, void *data, int data_type,
474 const unsigned char *dname)
481{ 475{
482 struct inode *inode; 476 struct inode *inode;
483 __u32 mask = event->mask;
484 const char *dname = event->file_name;
485 struct audit_parent *parent; 477 struct audit_parent *parent;
486 478
487 parent = container_of(inode_mark, struct audit_parent, mark); 479 parent = container_of(inode_mark, struct audit_parent, mark);
488 480
489 BUG_ON(group != audit_watch_group); 481 BUG_ON(group != audit_watch_group);
490 482
491 switch (event->data_type) { 483 switch (data_type) {
492 case (FSNOTIFY_EVENT_PATH): 484 case (FSNOTIFY_EVENT_PATH):
493 inode = event->path.dentry->d_inode; 485 inode = ((struct path *)data)->dentry->d_inode;
494 break; 486 break;
495 case (FSNOTIFY_EVENT_INODE): 487 case (FSNOTIFY_EVENT_INODE):
496 inode = event->inode; 488 inode = (struct inode *)data;
497 break; 489 break;
498 default: 490 default:
499 BUG(); 491 BUG();
@@ -512,11 +504,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group,
512} 504}
513 505
514static const struct fsnotify_ops audit_watch_fsnotify_ops = { 506static const struct fsnotify_ops audit_watch_fsnotify_ops = {
515 .should_send_event = audit_watch_should_send_event,
516 .handle_event = audit_watch_handle_event, 507 .handle_event = audit_watch_handle_event,
517 .free_group_priv = NULL,
518 .freeing_mark = NULL,
519 .free_event_priv = NULL,
520}; 508};
521 509
522static int __init audit_watch_init(void) 510static int __init audit_watch_init(void)
diff --git a/kernel/exit.c b/kernel/exit.c
index a949819055d5..1e77fc645317 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -74,6 +74,7 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
74 __this_cpu_dec(process_counts); 74 __this_cpu_dec(process_counts);
75 } 75 }
76 list_del_rcu(&p->thread_group); 76 list_del_rcu(&p->thread_group);
77 list_del_rcu(&p->thread_node);
77} 78}
78 79
79/* 80/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 294189fc7ac8..2f11bbe376b0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1035,6 +1035,11 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1035 sig->nr_threads = 1; 1035 sig->nr_threads = 1;
1036 atomic_set(&sig->live, 1); 1036 atomic_set(&sig->live, 1);
1037 atomic_set(&sig->sigcnt, 1); 1037 atomic_set(&sig->sigcnt, 1);
1038
1039 /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
1040 sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
1041 tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
1042
1038 init_waitqueue_head(&sig->wait_chldexit); 1043 init_waitqueue_head(&sig->wait_chldexit);
1039 sig->curr_target = tsk; 1044 sig->curr_target = tsk;
1040 init_sigpending(&sig->shared_pending); 1045 init_sigpending(&sig->shared_pending);
@@ -1474,6 +1479,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1474 atomic_inc(&current->signal->sigcnt); 1479 atomic_inc(&current->signal->sigcnt);
1475 list_add_tail_rcu(&p->thread_group, 1480 list_add_tail_rcu(&p->thread_group,
1476 &p->group_leader->thread_group); 1481 &p->group_leader->thread_group);
1482 list_add_tail_rcu(&p->thread_node,
1483 &p->signal->thread_head);
1477 } 1484 }
1478 attach_pid(p, PIDTYPE_PID); 1485 attach_pid(p, PIDTYPE_PID);
1479 nr_threads++; 1486 nr_threads++;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index b38109e204af..d9f61a145802 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -637,7 +637,7 @@ __register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
637 BUG_ON(!region); 637 BUG_ON(!region);
638 } else 638 } else
639 /* This allocation cannot fail */ 639 /* This allocation cannot fail */
640 region = alloc_bootmem(sizeof(struct nosave_region)); 640 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
641 region->start_pfn = start_pfn; 641 region->start_pfn = start_pfn;
642 region->end_pfn = end_pfn; 642 region->end_pfn = end_pfn;
643 list_add_tail(&region->list, &nosave_regions); 643 list_add_tail(&region->list, &nosave_regions);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index be7c86bae576..f8b41bddc6dc 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -757,14 +757,10 @@ void __init setup_log_buf(int early)
757 return; 757 return;
758 758
759 if (early) { 759 if (early) {
760 unsigned long mem; 760 new_log_buf =
761 761 memblock_virt_alloc(new_log_buf_len, PAGE_SIZE);
762 mem = memblock_alloc(new_log_buf_len, PAGE_SIZE);
763 if (!mem)
764 return;
765 new_log_buf = __va(mem);
766 } else { 762 } else {
767 new_log_buf = alloc_bootmem_nopanic(new_log_buf_len); 763 new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len, 0);
768 } 764 }
769 765
770 if (unlikely(!new_log_buf)) { 766 if (unlikely(!new_log_buf)) {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3897e09e86a2..4d6964e49711 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1108,6 +1108,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p)
1108 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task))) 1108 if (!cpumask_test_cpu(arg.src_cpu, tsk_cpus_allowed(arg.dst_task)))
1109 goto out; 1109 goto out;
1110 1110
1111 trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
1111 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg); 1112 ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
1112 1113
1113out: 1114out:
@@ -4603,6 +4604,7 @@ int migrate_task_to(struct task_struct *p, int target_cpu)
4603 4604
4604 /* TODO: This is not properly updating schedstats */ 4605 /* TODO: This is not properly updating schedstats */
4605 4606
4607 trace_sched_move_numa(p, curr_cpu, target_cpu);
4606 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg); 4608 return stop_one_cpu(curr_cpu, migration_cpu_stop, &arg);
4607} 4609}
4608 4610
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b24b6cfde9aa..867b0a4b0893 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1250,11 +1250,15 @@ static int task_numa_migrate(struct task_struct *p)
1250 p->numa_scan_period = task_scan_min(p); 1250 p->numa_scan_period = task_scan_min(p);
1251 1251
1252 if (env.best_task == NULL) { 1252 if (env.best_task == NULL) {
1253 int ret = migrate_task_to(p, env.best_cpu); 1253 ret = migrate_task_to(p, env.best_cpu);
1254 if (ret != 0)
1255 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
1254 return ret; 1256 return ret;
1255 } 1257 }
1256 1258
1257 ret = migrate_swap(p, env.best_task); 1259 ret = migrate_swap(p, env.best_task);
1260 if (ret != 0)
1261 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
1258 put_task_struct(env.best_task); 1262 put_task_struct(env.best_task);
1259 return ret; 1263 return ret;
1260} 1264}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c8da99f905cf..332cefcdb04b 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -95,8 +95,6 @@
95#if defined(CONFIG_SYSCTL) 95#if defined(CONFIG_SYSCTL)
96 96
97/* External variables not in a header file. */ 97/* External variables not in a header file. */
98extern int sysctl_overcommit_memory;
99extern int sysctl_overcommit_ratio;
100extern int max_threads; 98extern int max_threads;
101extern int suid_dumpable; 99extern int suid_dumpable;
102#ifdef CONFIG_COREDUMP 100#ifdef CONFIG_COREDUMP
@@ -1121,7 +1119,14 @@ static struct ctl_table vm_table[] = {
1121 .data = &sysctl_overcommit_ratio, 1119 .data = &sysctl_overcommit_ratio,
1122 .maxlen = sizeof(sysctl_overcommit_ratio), 1120 .maxlen = sizeof(sysctl_overcommit_ratio),
1123 .mode = 0644, 1121 .mode = 0644,
1124 .proc_handler = proc_dointvec, 1122 .proc_handler = overcommit_ratio_handler,
1123 },
1124 {
1125 .procname = "overcommit_kbytes",
1126 .data = &sysctl_overcommit_kbytes,
1127 .maxlen = sizeof(sysctl_overcommit_kbytes),
1128 .mode = 0644,
1129 .proc_handler = overcommit_kbytes_handler,
1125 }, 1130 },
1126 { 1131 {
1127 .procname = "page-cluster", 1132 .procname = "page-cluster",