diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup/cgroup.c | 12 | ||||
| -rw-r--r-- | kernel/crash_core.c | 2 | ||||
| -rw-r--r-- | kernel/kthread.c | 3 | ||||
| -rw-r--r-- | kernel/power/snapshot.c | 17 | ||||
| -rw-r--r-- | kernel/sched/core.c | 3 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 15 | ||||
| -rw-r--r-- | kernel/sysctl.c | 2 |
7 files changed, 39 insertions, 15 deletions
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index cef98502b124..17828333f7c3 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c | |||
| @@ -3534,6 +3534,16 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf, | |||
| 3534 | return ret ?: nbytes; | 3534 | return ret ?: nbytes; |
| 3535 | } | 3535 | } |
| 3536 | 3536 | ||
| 3537 | static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt) | ||
| 3538 | { | ||
| 3539 | struct cftype *cft = of->kn->priv; | ||
| 3540 | |||
| 3541 | if (cft->poll) | ||
| 3542 | return cft->poll(of, pt); | ||
| 3543 | |||
| 3544 | return kernfs_generic_poll(of, pt); | ||
| 3545 | } | ||
| 3546 | |||
| 3537 | static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) | 3547 | static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos) |
| 3538 | { | 3548 | { |
| 3539 | return seq_cft(seq)->seq_start(seq, ppos); | 3549 | return seq_cft(seq)->seq_start(seq, ppos); |
| @@ -3572,6 +3582,7 @@ static struct kernfs_ops cgroup_kf_single_ops = { | |||
| 3572 | .open = cgroup_file_open, | 3582 | .open = cgroup_file_open, |
| 3573 | .release = cgroup_file_release, | 3583 | .release = cgroup_file_release, |
| 3574 | .write = cgroup_file_write, | 3584 | .write = cgroup_file_write, |
| 3585 | .poll = cgroup_file_poll, | ||
| 3575 | .seq_show = cgroup_seqfile_show, | 3586 | .seq_show = cgroup_seqfile_show, |
| 3576 | }; | 3587 | }; |
| 3577 | 3588 | ||
| @@ -3580,6 +3591,7 @@ static struct kernfs_ops cgroup_kf_ops = { | |||
| 3580 | .open = cgroup_file_open, | 3591 | .open = cgroup_file_open, |
| 3581 | .release = cgroup_file_release, | 3592 | .release = cgroup_file_release, |
| 3582 | .write = cgroup_file_write, | 3593 | .write = cgroup_file_write, |
| 3594 | .poll = cgroup_file_poll, | ||
| 3583 | .seq_start = cgroup_seqfile_start, | 3595 | .seq_start = cgroup_seqfile_start, |
| 3584 | .seq_next = cgroup_seqfile_next, | 3596 | .seq_next = cgroup_seqfile_next, |
| 3585 | .seq_stop = cgroup_seqfile_stop, | 3597 | .seq_stop = cgroup_seqfile_stop, |
diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 933cb3e45b98..093c9f917ed0 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c | |||
| @@ -464,6 +464,8 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
| 464 | VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); | 464 | VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); |
| 465 | #ifdef CONFIG_HUGETLB_PAGE | 465 | #ifdef CONFIG_HUGETLB_PAGE |
| 466 | VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR); | 466 | VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR); |
| 467 | #define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline) | ||
| 468 | VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE); | ||
| 467 | #endif | 469 | #endif |
| 468 | 470 | ||
| 469 | arch_crash_save_vmcoreinfo(); | 471 | arch_crash_save_vmcoreinfo(); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 9cf20cc5ebe3..5942eeafb9ac 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/freezer.h> | 20 | #include <linux/freezer.h> |
| 21 | #include <linux/ptrace.h> | 21 | #include <linux/ptrace.h> |
| 22 | #include <linux/uaccess.h> | 22 | #include <linux/uaccess.h> |
| 23 | #include <linux/numa.h> | ||
| 23 | #include <trace/events/sched.h> | 24 | #include <trace/events/sched.h> |
| 24 | 25 | ||
| 25 | static DEFINE_SPINLOCK(kthread_create_lock); | 26 | static DEFINE_SPINLOCK(kthread_create_lock); |
| @@ -681,7 +682,7 @@ __kthread_create_worker(int cpu, unsigned int flags, | |||
| 681 | { | 682 | { |
| 682 | struct kthread_worker *worker; | 683 | struct kthread_worker *worker; |
| 683 | struct task_struct *task; | 684 | struct task_struct *task; |
| 684 | int node = -1; | 685 | int node = NUMA_NO_NODE; |
| 685 | 686 | ||
| 686 | worker = kzalloc(sizeof(*worker), GFP_KERNEL); | 687 | worker = kzalloc(sizeof(*worker), GFP_KERNEL); |
| 687 | if (!worker) | 688 | if (!worker) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 640b2034edd6..4802b039b89f 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
| @@ -1215,14 +1215,16 @@ static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn) | |||
| 1215 | if (!pfn_valid(pfn)) | 1215 | if (!pfn_valid(pfn)) |
| 1216 | return NULL; | 1216 | return NULL; |
| 1217 | 1217 | ||
| 1218 | page = pfn_to_page(pfn); | 1218 | page = pfn_to_online_page(pfn); |
| 1219 | if (page_zone(page) != zone) | 1219 | if (!page || page_zone(page) != zone) |
| 1220 | return NULL; | 1220 | return NULL; |
| 1221 | 1221 | ||
| 1222 | BUG_ON(!PageHighMem(page)); | 1222 | BUG_ON(!PageHighMem(page)); |
| 1223 | 1223 | ||
| 1224 | if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) || | 1224 | if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) |
| 1225 | PageReserved(page)) | 1225 | return NULL; |
| 1226 | |||
| 1227 | if (PageReserved(page) || PageOffline(page)) | ||
| 1226 | return NULL; | 1228 | return NULL; |
| 1227 | 1229 | ||
| 1228 | if (page_is_guard(page)) | 1230 | if (page_is_guard(page)) |
| @@ -1277,8 +1279,8 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn) | |||
| 1277 | if (!pfn_valid(pfn)) | 1279 | if (!pfn_valid(pfn)) |
| 1278 | return NULL; | 1280 | return NULL; |
| 1279 | 1281 | ||
| 1280 | page = pfn_to_page(pfn); | 1282 | page = pfn_to_online_page(pfn); |
| 1281 | if (page_zone(page) != zone) | 1283 | if (!page || page_zone(page) != zone) |
| 1282 | return NULL; | 1284 | return NULL; |
| 1283 | 1285 | ||
| 1284 | BUG_ON(PageHighMem(page)); | 1286 | BUG_ON(PageHighMem(page)); |
| @@ -1286,6 +1288,9 @@ static struct page *saveable_page(struct zone *zone, unsigned long pfn) | |||
| 1286 | if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) | 1288 | if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page)) |
| 1287 | return NULL; | 1289 | return NULL; |
| 1288 | 1290 | ||
| 1291 | if (PageOffline(page)) | ||
| 1292 | return NULL; | ||
| 1293 | |||
| 1289 | if (PageReserved(page) | 1294 | if (PageReserved(page) |
| 1290 | && (!kernel_page_present(page) || pfn_is_nosave(pfn))) | 1295 | && (!kernel_page_present(page) || pfn_is_nosave(pfn))) |
| 1291 | return NULL; | 1296 | return NULL; |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f3901b84d217..ead464a0f2e5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2220,6 +2220,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) | |||
| 2220 | INIT_HLIST_HEAD(&p->preempt_notifiers); | 2220 | INIT_HLIST_HEAD(&p->preempt_notifiers); |
| 2221 | #endif | 2221 | #endif |
| 2222 | 2222 | ||
| 2223 | #ifdef CONFIG_COMPACTION | ||
| 2224 | p->capture_control = NULL; | ||
| 2225 | #endif | ||
| 2223 | init_numa_balancing(clone_flags, p); | 2226 | init_numa_balancing(clone_flags, p); |
| 2224 | } | 2227 | } |
| 2225 | 2228 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8213ff6e365d..ea74d43924b2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -1173,7 +1173,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) | |||
| 1173 | 1173 | ||
| 1174 | /* New address space, reset the preferred nid */ | 1174 | /* New address space, reset the preferred nid */ |
| 1175 | if (!(clone_flags & CLONE_VM)) { | 1175 | if (!(clone_flags & CLONE_VM)) { |
| 1176 | p->numa_preferred_nid = -1; | 1176 | p->numa_preferred_nid = NUMA_NO_NODE; |
| 1177 | return; | 1177 | return; |
| 1178 | } | 1178 | } |
| 1179 | 1179 | ||
| @@ -1193,13 +1193,13 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p) | |||
| 1193 | 1193 | ||
| 1194 | static void account_numa_enqueue(struct rq *rq, struct task_struct *p) | 1194 | static void account_numa_enqueue(struct rq *rq, struct task_struct *p) |
| 1195 | { | 1195 | { |
| 1196 | rq->nr_numa_running += (p->numa_preferred_nid != -1); | 1196 | rq->nr_numa_running += (p->numa_preferred_nid != NUMA_NO_NODE); |
| 1197 | rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); | 1197 | rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p)); |
| 1198 | } | 1198 | } |
| 1199 | 1199 | ||
| 1200 | static void account_numa_dequeue(struct rq *rq, struct task_struct *p) | 1200 | static void account_numa_dequeue(struct rq *rq, struct task_struct *p) |
| 1201 | { | 1201 | { |
| 1202 | rq->nr_numa_running -= (p->numa_preferred_nid != -1); | 1202 | rq->nr_numa_running -= (p->numa_preferred_nid != NUMA_NO_NODE); |
| 1203 | rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); | 1203 | rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p)); |
| 1204 | } | 1204 | } |
| 1205 | 1205 | ||
| @@ -1413,7 +1413,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, | |||
| 1413 | * two full passes of the "multi-stage node selection" test that is | 1413 | * two full passes of the "multi-stage node selection" test that is |
| 1414 | * executed below. | 1414 | * executed below. |
| 1415 | */ | 1415 | */ |
| 1416 | if ((p->numa_preferred_nid == -1 || p->numa_scan_seq <= 4) && | 1416 | if ((p->numa_preferred_nid == NUMA_NO_NODE || p->numa_scan_seq <= 4) && |
| 1417 | (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) | 1417 | (cpupid_pid_unset(last_cpupid) || cpupid_match_pid(p, last_cpupid))) |
| 1418 | return true; | 1418 | return true; |
| 1419 | 1419 | ||
| @@ -1861,7 +1861,7 @@ static void numa_migrate_preferred(struct task_struct *p) | |||
| 1861 | unsigned long interval = HZ; | 1861 | unsigned long interval = HZ; |
| 1862 | 1862 | ||
| 1863 | /* This task has no NUMA fault statistics yet */ | 1863 | /* This task has no NUMA fault statistics yet */ |
| 1864 | if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults)) | 1864 | if (unlikely(p->numa_preferred_nid == NUMA_NO_NODE || !p->numa_faults)) |
| 1865 | return; | 1865 | return; |
| 1866 | 1866 | ||
| 1867 | /* Periodically retry migrating the task to the preferred node */ | 1867 | /* Periodically retry migrating the task to the preferred node */ |
| @@ -2108,7 +2108,7 @@ static int preferred_group_nid(struct task_struct *p, int nid) | |||
| 2108 | 2108 | ||
| 2109 | static void task_numa_placement(struct task_struct *p) | 2109 | static void task_numa_placement(struct task_struct *p) |
| 2110 | { | 2110 | { |
| 2111 | int seq, nid, max_nid = -1; | 2111 | int seq, nid, max_nid = NUMA_NO_NODE; |
| 2112 | unsigned long max_faults = 0; | 2112 | unsigned long max_faults = 0; |
| 2113 | unsigned long fault_types[2] = { 0, 0 }; | 2113 | unsigned long fault_types[2] = { 0, 0 }; |
| 2114 | unsigned long total_faults; | 2114 | unsigned long total_faults; |
| @@ -2651,7 +2651,8 @@ static void update_scan_period(struct task_struct *p, int new_cpu) | |||
| 2651 | * the preferred node. | 2651 | * the preferred node. |
| 2652 | */ | 2652 | */ |
| 2653 | if (dst_nid == p->numa_preferred_nid || | 2653 | if (dst_nid == p->numa_preferred_nid || |
| 2654 | (p->numa_preferred_nid != -1 && src_nid != p->numa_preferred_nid)) | 2654 | (p->numa_preferred_nid != NUMA_NO_NODE && |
| 2655 | src_nid != p->numa_preferred_nid)) | ||
| 2655 | return; | 2656 | return; |
| 2656 | } | 2657 | } |
| 2657 | 2658 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 7c2b9bc88ee8..14f30b4a1b64 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -1471,7 +1471,7 @@ static struct ctl_table vm_table[] = { | |||
| 1471 | .data = &sysctl_extfrag_threshold, | 1471 | .data = &sysctl_extfrag_threshold, |
| 1472 | .maxlen = sizeof(int), | 1472 | .maxlen = sizeof(int), |
| 1473 | .mode = 0644, | 1473 | .mode = 0644, |
| 1474 | .proc_handler = sysctl_extfrag_handler, | 1474 | .proc_handler = proc_dointvec_minmax, |
| 1475 | .extra1 = &min_extfrag_threshold, | 1475 | .extra1 = &min_extfrag_threshold, |
| 1476 | .extra2 = &max_extfrag_threshold, | 1476 | .extra2 = &max_extfrag_threshold, |
| 1477 | }, | 1477 | }, |
