diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 1 | ||||
-rw-r--r-- | kernel/cpuset.c | 106 | ||||
-rw-r--r-- | kernel/cred.c | 6 | ||||
-rw-r--r-- | kernel/early_res.c | 6 | ||||
-rw-r--r-- | kernel/irq/chip.c | 35 | ||||
-rw-r--r-- | kernel/irq/manage.c | 22 | ||||
-rw-r--r-- | kernel/kthread.c | 2 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 10 | ||||
-rw-r--r-- | kernel/rcupdate.c | 23 | ||||
-rw-r--r-- | kernel/resource.c | 44 | ||||
-rw-r--r-- | kernel/sched.c | 12 | ||||
-rw-r--r-- | kernel/slow-work.c | 2 | ||||
-rw-r--r-- | kernel/slow-work.h | 8 | ||||
-rw-r--r-- | kernel/softlockup.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-oneshot.c | 52 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 3 | ||||
-rw-r--r-- | kernel/time/timer_list.c | 3 | ||||
-rw-r--r-- | kernel/timer.c | 1 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 14 |
19 files changed, 270 insertions, 84 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index ef909a329750..e2769e13980c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -27,7 +27,6 @@ | |||
27 | */ | 27 | */ |
28 | 28 | ||
29 | #include <linux/cgroup.h> | 29 | #include <linux/cgroup.h> |
30 | #include <linux/module.h> | ||
31 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
32 | #include <linux/errno.h> | 31 | #include <linux/errno.h> |
33 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index ba401fab459f..d10946748ec2 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -920,9 +920,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, | |||
920 | * call to guarantee_online_mems(), as we know no one is changing | 920 | * call to guarantee_online_mems(), as we know no one is changing |
921 | * our task's cpuset. | 921 | * our task's cpuset. |
922 | * | 922 | * |
923 | * Hold callback_mutex around the two modifications of our tasks | ||
924 | * mems_allowed to synchronize with cpuset_mems_allowed(). | ||
925 | * | ||
926 | * While the mm_struct we are migrating is typically from some | 923 | * While the mm_struct we are migrating is typically from some |
927 | * other task, the task_struct mems_allowed that we are hacking | 924 | * other task, the task_struct mems_allowed that we are hacking |
928 | * is for our current task, which must allocate new pages for that | 925 | * is for our current task, which must allocate new pages for that |
@@ -973,15 +970,20 @@ static void cpuset_change_nodemask(struct task_struct *p, | |||
973 | struct cpuset *cs; | 970 | struct cpuset *cs; |
974 | int migrate; | 971 | int migrate; |
975 | const nodemask_t *oldmem = scan->data; | 972 | const nodemask_t *oldmem = scan->data; |
976 | nodemask_t newmems; | 973 | NODEMASK_ALLOC(nodemask_t, newmems, GFP_KERNEL); |
974 | |||
975 | if (!newmems) | ||
976 | return; | ||
977 | 977 | ||
978 | cs = cgroup_cs(scan->cg); | 978 | cs = cgroup_cs(scan->cg); |
979 | guarantee_online_mems(cs, &newmems); | 979 | guarantee_online_mems(cs, newmems); |
980 | 980 | ||
981 | task_lock(p); | 981 | task_lock(p); |
982 | cpuset_change_task_nodemask(p, &newmems); | 982 | cpuset_change_task_nodemask(p, newmems); |
983 | task_unlock(p); | 983 | task_unlock(p); |
984 | 984 | ||
985 | NODEMASK_FREE(newmems); | ||
986 | |||
985 | mm = get_task_mm(p); | 987 | mm = get_task_mm(p); |
986 | if (!mm) | 988 | if (!mm) |
987 | return; | 989 | return; |
@@ -1051,16 +1053,21 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem, | |||
1051 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | 1053 | static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, |
1052 | const char *buf) | 1054 | const char *buf) |
1053 | { | 1055 | { |
1054 | nodemask_t oldmem; | 1056 | NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL); |
1055 | int retval; | 1057 | int retval; |
1056 | struct ptr_heap heap; | 1058 | struct ptr_heap heap; |
1057 | 1059 | ||
1060 | if (!oldmem) | ||
1061 | return -ENOMEM; | ||
1062 | |||
1058 | /* | 1063 | /* |
1059 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; | 1064 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; |
1060 | * it's read-only | 1065 | * it's read-only |
1061 | */ | 1066 | */ |
1062 | if (cs == &top_cpuset) | 1067 | if (cs == &top_cpuset) { |
1063 | return -EACCES; | 1068 | retval = -EACCES; |
1069 | goto done; | ||
1070 | } | ||
1064 | 1071 | ||
1065 | /* | 1072 | /* |
1066 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. | 1073 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. |
@@ -1076,11 +1083,13 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
1076 | goto done; | 1083 | goto done; |
1077 | 1084 | ||
1078 | if (!nodes_subset(trialcs->mems_allowed, | 1085 | if (!nodes_subset(trialcs->mems_allowed, |
1079 | node_states[N_HIGH_MEMORY])) | 1086 | node_states[N_HIGH_MEMORY])) { |
1080 | return -EINVAL; | 1087 | retval = -EINVAL; |
1088 | goto done; | ||
1089 | } | ||
1081 | } | 1090 | } |
1082 | oldmem = cs->mems_allowed; | 1091 | *oldmem = cs->mems_allowed; |
1083 | if (nodes_equal(oldmem, trialcs->mems_allowed)) { | 1092 | if (nodes_equal(*oldmem, trialcs->mems_allowed)) { |
1084 | retval = 0; /* Too easy - nothing to do */ | 1093 | retval = 0; /* Too easy - nothing to do */ |
1085 | goto done; | 1094 | goto done; |
1086 | } | 1095 | } |
@@ -1096,10 +1105,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
1096 | cs->mems_allowed = trialcs->mems_allowed; | 1105 | cs->mems_allowed = trialcs->mems_allowed; |
1097 | mutex_unlock(&callback_mutex); | 1106 | mutex_unlock(&callback_mutex); |
1098 | 1107 | ||
1099 | update_tasks_nodemask(cs, &oldmem, &heap); | 1108 | update_tasks_nodemask(cs, oldmem, &heap); |
1100 | 1109 | ||
1101 | heap_free(&heap); | 1110 | heap_free(&heap); |
1102 | done: | 1111 | done: |
1112 | NODEMASK_FREE(oldmem); | ||
1103 | return retval; | 1113 | return retval; |
1104 | } | 1114 | } |
1105 | 1115 | ||
@@ -1384,40 +1394,47 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
1384 | struct cgroup *oldcont, struct task_struct *tsk, | 1394 | struct cgroup *oldcont, struct task_struct *tsk, |
1385 | bool threadgroup) | 1395 | bool threadgroup) |
1386 | { | 1396 | { |
1387 | nodemask_t from, to; | ||
1388 | struct mm_struct *mm; | 1397 | struct mm_struct *mm; |
1389 | struct cpuset *cs = cgroup_cs(cont); | 1398 | struct cpuset *cs = cgroup_cs(cont); |
1390 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1399 | struct cpuset *oldcs = cgroup_cs(oldcont); |
1400 | NODEMASK_ALLOC(nodemask_t, from, GFP_KERNEL); | ||
1401 | NODEMASK_ALLOC(nodemask_t, to, GFP_KERNEL); | ||
1402 | |||
1403 | if (from == NULL || to == NULL) | ||
1404 | goto alloc_fail; | ||
1391 | 1405 | ||
1392 | if (cs == &top_cpuset) { | 1406 | if (cs == &top_cpuset) { |
1393 | cpumask_copy(cpus_attach, cpu_possible_mask); | 1407 | cpumask_copy(cpus_attach, cpu_possible_mask); |
1394 | to = node_possible_map; | ||
1395 | } else { | 1408 | } else { |
1396 | guarantee_online_cpus(cs, cpus_attach); | 1409 | guarantee_online_cpus(cs, cpus_attach); |
1397 | guarantee_online_mems(cs, &to); | ||
1398 | } | 1410 | } |
1411 | guarantee_online_mems(cs, to); | ||
1399 | 1412 | ||
1400 | /* do per-task migration stuff possibly for each in the threadgroup */ | 1413 | /* do per-task migration stuff possibly for each in the threadgroup */ |
1401 | cpuset_attach_task(tsk, &to, cs); | 1414 | cpuset_attach_task(tsk, to, cs); |
1402 | if (threadgroup) { | 1415 | if (threadgroup) { |
1403 | struct task_struct *c; | 1416 | struct task_struct *c; |
1404 | rcu_read_lock(); | 1417 | rcu_read_lock(); |
1405 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | 1418 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { |
1406 | cpuset_attach_task(c, &to, cs); | 1419 | cpuset_attach_task(c, to, cs); |
1407 | } | 1420 | } |
1408 | rcu_read_unlock(); | 1421 | rcu_read_unlock(); |
1409 | } | 1422 | } |
1410 | 1423 | ||
1411 | /* change mm; only needs to be done once even if threadgroup */ | 1424 | /* change mm; only needs to be done once even if threadgroup */ |
1412 | from = oldcs->mems_allowed; | 1425 | *from = oldcs->mems_allowed; |
1413 | to = cs->mems_allowed; | 1426 | *to = cs->mems_allowed; |
1414 | mm = get_task_mm(tsk); | 1427 | mm = get_task_mm(tsk); |
1415 | if (mm) { | 1428 | if (mm) { |
1416 | mpol_rebind_mm(mm, &to); | 1429 | mpol_rebind_mm(mm, to); |
1417 | if (is_memory_migrate(cs)) | 1430 | if (is_memory_migrate(cs)) |
1418 | cpuset_migrate_mm(mm, &from, &to); | 1431 | cpuset_migrate_mm(mm, from, to); |
1419 | mmput(mm); | 1432 | mmput(mm); |
1420 | } | 1433 | } |
1434 | |||
1435 | alloc_fail: | ||
1436 | NODEMASK_FREE(from); | ||
1437 | NODEMASK_FREE(to); | ||
1421 | } | 1438 | } |
1422 | 1439 | ||
1423 | /* The various types of files and directories in a cpuset file system */ | 1440 | /* The various types of files and directories in a cpuset file system */ |
@@ -1562,13 +1579,21 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs) | |||
1562 | 1579 | ||
1563 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | 1580 | static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) |
1564 | { | 1581 | { |
1565 | nodemask_t mask; | 1582 | NODEMASK_ALLOC(nodemask_t, mask, GFP_KERNEL); |
1583 | int retval; | ||
1584 | |||
1585 | if (mask == NULL) | ||
1586 | return -ENOMEM; | ||
1566 | 1587 | ||
1567 | mutex_lock(&callback_mutex); | 1588 | mutex_lock(&callback_mutex); |
1568 | mask = cs->mems_allowed; | 1589 | *mask = cs->mems_allowed; |
1569 | mutex_unlock(&callback_mutex); | 1590 | mutex_unlock(&callback_mutex); |
1570 | 1591 | ||
1571 | return nodelist_scnprintf(page, PAGE_SIZE, mask); | 1592 | retval = nodelist_scnprintf(page, PAGE_SIZE, *mask); |
1593 | |||
1594 | NODEMASK_FREE(mask); | ||
1595 | |||
1596 | return retval; | ||
1572 | } | 1597 | } |
1573 | 1598 | ||
1574 | static ssize_t cpuset_common_file_read(struct cgroup *cont, | 1599 | static ssize_t cpuset_common_file_read(struct cgroup *cont, |
@@ -1997,7 +2022,10 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
1997 | struct cpuset *cp; /* scans cpusets being updated */ | 2022 | struct cpuset *cp; /* scans cpusets being updated */ |
1998 | struct cpuset *child; /* scans child cpusets of cp */ | 2023 | struct cpuset *child; /* scans child cpusets of cp */ |
1999 | struct cgroup *cont; | 2024 | struct cgroup *cont; |
2000 | nodemask_t oldmems; | 2025 | NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); |
2026 | |||
2027 | if (oldmems == NULL) | ||
2028 | return; | ||
2001 | 2029 | ||
2002 | list_add_tail((struct list_head *)&root->stack_list, &queue); | 2030 | list_add_tail((struct list_head *)&root->stack_list, &queue); |
2003 | 2031 | ||
@@ -2014,7 +2042,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
2014 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) | 2042 | nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) |
2015 | continue; | 2043 | continue; |
2016 | 2044 | ||
2017 | oldmems = cp->mems_allowed; | 2045 | *oldmems = cp->mems_allowed; |
2018 | 2046 | ||
2019 | /* Remove offline cpus and mems from this cpuset. */ | 2047 | /* Remove offline cpus and mems from this cpuset. */ |
2020 | mutex_lock(&callback_mutex); | 2048 | mutex_lock(&callback_mutex); |
@@ -2030,9 +2058,10 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
2030 | remove_tasks_in_empty_cpuset(cp); | 2058 | remove_tasks_in_empty_cpuset(cp); |
2031 | else { | 2059 | else { |
2032 | update_tasks_cpumask(cp, NULL); | 2060 | update_tasks_cpumask(cp, NULL); |
2033 | update_tasks_nodemask(cp, &oldmems, NULL); | 2061 | update_tasks_nodemask(cp, oldmems, NULL); |
2034 | } | 2062 | } |
2035 | } | 2063 | } |
2064 | NODEMASK_FREE(oldmems); | ||
2036 | } | 2065 | } |
2037 | 2066 | ||
2038 | /* | 2067 | /* |
@@ -2090,20 +2119,33 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |||
2090 | static int cpuset_track_online_nodes(struct notifier_block *self, | 2119 | static int cpuset_track_online_nodes(struct notifier_block *self, |
2091 | unsigned long action, void *arg) | 2120 | unsigned long action, void *arg) |
2092 | { | 2121 | { |
2122 | NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); | ||
2123 | |||
2124 | if (oldmems == NULL) | ||
2125 | return NOTIFY_DONE; | ||
2126 | |||
2093 | cgroup_lock(); | 2127 | cgroup_lock(); |
2094 | switch (action) { | 2128 | switch (action) { |
2095 | case MEM_ONLINE: | 2129 | case MEM_ONLINE: |
2096 | case MEM_OFFLINE: | 2130 | *oldmems = top_cpuset.mems_allowed; |
2097 | mutex_lock(&callback_mutex); | 2131 | mutex_lock(&callback_mutex); |
2098 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 2132 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
2099 | mutex_unlock(&callback_mutex); | 2133 | mutex_unlock(&callback_mutex); |
2100 | if (action == MEM_OFFLINE) | 2134 | update_tasks_nodemask(&top_cpuset, oldmems, NULL); |
2101 | scan_for_empty_cpusets(&top_cpuset); | 2135 | break; |
2136 | case MEM_OFFLINE: | ||
2137 | /* | ||
2138 | * needn't update top_cpuset.mems_allowed explicitly because | ||
2139 | * scan_for_empty_cpusets() will update it. | ||
2140 | */ | ||
2141 | scan_for_empty_cpusets(&top_cpuset); | ||
2102 | break; | 2142 | break; |
2103 | default: | 2143 | default: |
2104 | break; | 2144 | break; |
2105 | } | 2145 | } |
2106 | cgroup_unlock(); | 2146 | cgroup_unlock(); |
2147 | |||
2148 | NODEMASK_FREE(oldmems); | ||
2107 | return NOTIFY_OK; | 2149 | return NOTIFY_OK; |
2108 | } | 2150 | } |
2109 | #endif | 2151 | #endif |
diff --git a/kernel/cred.c b/kernel/cred.c index 1ed8ca18790c..1b1129d0cce8 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -364,7 +364,7 @@ struct cred *prepare_usermodehelper_creds(void) | |||
364 | 364 | ||
365 | new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); | 365 | new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); |
366 | if (!new) | 366 | if (!new) |
367 | return NULL; | 367 | goto free_tgcred; |
368 | 368 | ||
369 | kdebug("prepare_usermodehelper_creds() alloc %p", new); | 369 | kdebug("prepare_usermodehelper_creds() alloc %p", new); |
370 | 370 | ||
@@ -397,6 +397,10 @@ struct cred *prepare_usermodehelper_creds(void) | |||
397 | 397 | ||
398 | error: | 398 | error: |
399 | put_cred(new); | 399 | put_cred(new); |
400 | free_tgcred: | ||
401 | #ifdef CONFIG_KEYS | ||
402 | kfree(tgcred); | ||
403 | #endif | ||
400 | return NULL; | 404 | return NULL; |
401 | } | 405 | } |
402 | 406 | ||
diff --git a/kernel/early_res.c b/kernel/early_res.c index 3cb2c661bb78..31aa9332ef3f 100644 --- a/kernel/early_res.c +++ b/kernel/early_res.c | |||
@@ -333,6 +333,12 @@ void __init free_early_partial(u64 start, u64 end) | |||
333 | struct early_res *r; | 333 | struct early_res *r; |
334 | int i; | 334 | int i; |
335 | 335 | ||
336 | if (start == end) | ||
337 | return; | ||
338 | |||
339 | if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end)) | ||
340 | return; | ||
341 | |||
336 | try_next: | 342 | try_next: |
337 | i = find_overlapped_early(start, end); | 343 | i = find_overlapped_early(start, end); |
338 | if (i >= max_early_res) | 344 | if (i >= max_early_res) |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 42ec11b2af8a..b7091d5ca2f8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -359,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
359 | if (desc->chip->ack) | 359 | if (desc->chip->ack) |
360 | desc->chip->ack(irq); | 360 | desc->chip->ack(irq); |
361 | } | 361 | } |
362 | desc->status |= IRQ_MASKED; | ||
363 | } | ||
364 | |||
365 | static inline void mask_irq(struct irq_desc *desc, int irq) | ||
366 | { | ||
367 | if (desc->chip->mask) { | ||
368 | desc->chip->mask(irq); | ||
369 | desc->status |= IRQ_MASKED; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | static inline void unmask_irq(struct irq_desc *desc, int irq) | ||
374 | { | ||
375 | if (desc->chip->unmask) { | ||
376 | desc->chip->unmask(irq); | ||
377 | desc->status &= ~IRQ_MASKED; | ||
378 | } | ||
362 | } | 379 | } |
363 | 380 | ||
364 | /* | 381 | /* |
@@ -484,10 +501,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
484 | raw_spin_lock(&desc->lock); | 501 | raw_spin_lock(&desc->lock); |
485 | desc->status &= ~IRQ_INPROGRESS; | 502 | desc->status &= ~IRQ_INPROGRESS; |
486 | 503 | ||
487 | if (unlikely(desc->status & IRQ_ONESHOT)) | 504 | if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) |
488 | desc->status |= IRQ_MASKED; | 505 | unmask_irq(desc, irq); |
489 | else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | ||
490 | desc->chip->unmask(irq); | ||
491 | out_unlock: | 506 | out_unlock: |
492 | raw_spin_unlock(&desc->lock); | 507 | raw_spin_unlock(&desc->lock); |
493 | } | 508 | } |
@@ -524,8 +539,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
524 | action = desc->action; | 539 | action = desc->action; |
525 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { | 540 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { |
526 | desc->status |= IRQ_PENDING; | 541 | desc->status |= IRQ_PENDING; |
527 | if (desc->chip->mask) | 542 | mask_irq(desc, irq); |
528 | desc->chip->mask(irq); | ||
529 | goto out; | 543 | goto out; |
530 | } | 544 | } |
531 | 545 | ||
@@ -593,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
593 | irqreturn_t action_ret; | 607 | irqreturn_t action_ret; |
594 | 608 | ||
595 | if (unlikely(!action)) { | 609 | if (unlikely(!action)) { |
596 | desc->chip->mask(irq); | 610 | mask_irq(desc, irq); |
597 | goto out_unlock; | 611 | goto out_unlock; |
598 | } | 612 | } |
599 | 613 | ||
@@ -605,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
605 | if (unlikely((desc->status & | 619 | if (unlikely((desc->status & |
606 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == | 620 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == |
607 | (IRQ_PENDING | IRQ_MASKED))) { | 621 | (IRQ_PENDING | IRQ_MASKED))) { |
608 | desc->chip->unmask(irq); | 622 | unmask_irq(desc, irq); |
609 | desc->status &= ~IRQ_MASKED; | ||
610 | } | 623 | } |
611 | 624 | ||
612 | desc->status &= ~IRQ_PENDING; | 625 | desc->status &= ~IRQ_PENDING; |
@@ -716,7 +729,7 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | |||
716 | __set_irq_handler(irq, handle, 0, name); | 729 | __set_irq_handler(irq, handle, 0, name); |
717 | } | 730 | } |
718 | 731 | ||
719 | void __init set_irq_noprobe(unsigned int irq) | 732 | void set_irq_noprobe(unsigned int irq) |
720 | { | 733 | { |
721 | struct irq_desc *desc = irq_to_desc(irq); | 734 | struct irq_desc *desc = irq_to_desc(irq); |
722 | unsigned long flags; | 735 | unsigned long flags; |
@@ -731,7 +744,7 @@ void __init set_irq_noprobe(unsigned int irq) | |||
731 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 744 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
732 | } | 745 | } |
733 | 746 | ||
734 | void __init set_irq_probe(unsigned int irq) | 747 | void set_irq_probe(unsigned int irq) |
735 | { | 748 | { |
736 | struct irq_desc *desc = irq_to_desc(irq); | 749 | struct irq_desc *desc = irq_to_desc(irq); |
737 | unsigned long flags; | 750 | unsigned long flags; |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index eb6078ca60c7..398fda155f6e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -382,6 +382,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
382 | { | 382 | { |
383 | struct irq_desc *desc = irq_to_desc(irq); | 383 | struct irq_desc *desc = irq_to_desc(irq); |
384 | struct irqaction *action; | 384 | struct irqaction *action; |
385 | unsigned long flags; | ||
385 | 386 | ||
386 | if (!desc) | 387 | if (!desc) |
387 | return 0; | 388 | return 0; |
@@ -389,11 +390,14 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) | |||
389 | if (desc->status & IRQ_NOREQUEST) | 390 | if (desc->status & IRQ_NOREQUEST) |
390 | return 0; | 391 | return 0; |
391 | 392 | ||
393 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
392 | action = desc->action; | 394 | action = desc->action; |
393 | if (action) | 395 | if (action) |
394 | if (irqflags & action->flags & IRQF_SHARED) | 396 | if (irqflags & action->flags & IRQF_SHARED) |
395 | action = NULL; | 397 | action = NULL; |
396 | 398 | ||
399 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
400 | |||
397 | return !action; | 401 | return !action; |
398 | } | 402 | } |
399 | 403 | ||
@@ -483,8 +487,26 @@ static int irq_wait_for_interrupt(struct irqaction *action) | |||
483 | */ | 487 | */ |
484 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | 488 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) |
485 | { | 489 | { |
490 | again: | ||
486 | chip_bus_lock(irq, desc); | 491 | chip_bus_lock(irq, desc); |
487 | raw_spin_lock_irq(&desc->lock); | 492 | raw_spin_lock_irq(&desc->lock); |
493 | |||
494 | /* | ||
495 | * Implausible though it may be we need to protect us against | ||
496 | * the following scenario: | ||
497 | * | ||
498 | * The thread is faster done than the hard interrupt handler | ||
499 | * on the other CPU. If we unmask the irq line then the | ||
500 | * interrupt can come in again and masks the line, leaves due | ||
501 | * to IRQ_INPROGRESS and the irq line is masked forever. | ||
502 | */ | ||
503 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | ||
504 | raw_spin_unlock_irq(&desc->lock); | ||
505 | chip_bus_sync_unlock(irq, desc); | ||
506 | cpu_relax(); | ||
507 | goto again; | ||
508 | } | ||
509 | |||
488 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 510 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { |
489 | desc->status &= ~IRQ_MASKED; | 511 | desc->status &= ~IRQ_MASKED; |
490 | desc->chip->unmask(irq); | 512 | desc->chip->unmask(irq); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 82ed0ea15194..83911c780175 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -219,7 +219,7 @@ int kthreadd(void *unused) | |||
219 | set_task_comm(tsk, "kthreadd"); | 219 | set_task_comm(tsk, "kthreadd"); |
220 | ignore_signals(tsk); | 220 | ignore_signals(tsk); |
221 | set_cpus_allowed_ptr(tsk, cpu_all_mask); | 221 | set_cpus_allowed_ptr(tsk, cpu_all_mask); |
222 | set_mems_allowed(node_possible_map); | 222 | set_mems_allowed(node_states[N_HIGH_MEMORY]); |
223 | 223 | ||
224 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; | 224 | current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; |
225 | 225 | ||
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 1a22dfd42df9..bc7704b3a443 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -1061,9 +1061,9 @@ static void check_thread_timers(struct task_struct *tsk, | |||
1061 | } | 1061 | } |
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | static void stop_process_timers(struct task_struct *tsk) | 1064 | static void stop_process_timers(struct signal_struct *sig) |
1065 | { | 1065 | { |
1066 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 1066 | struct thread_group_cputimer *cputimer = &sig->cputimer; |
1067 | unsigned long flags; | 1067 | unsigned long flags; |
1068 | 1068 | ||
1069 | if (!cputimer->running) | 1069 | if (!cputimer->running) |
@@ -1072,6 +1072,10 @@ static void stop_process_timers(struct task_struct *tsk) | |||
1072 | spin_lock_irqsave(&cputimer->lock, flags); | 1072 | spin_lock_irqsave(&cputimer->lock, flags); |
1073 | cputimer->running = 0; | 1073 | cputimer->running = 0; |
1074 | spin_unlock_irqrestore(&cputimer->lock, flags); | 1074 | spin_unlock_irqrestore(&cputimer->lock, flags); |
1075 | |||
1076 | sig->cputime_expires.prof_exp = cputime_zero; | ||
1077 | sig->cputime_expires.virt_exp = cputime_zero; | ||
1078 | sig->cputime_expires.sched_exp = 0; | ||
1075 | } | 1079 | } |
1076 | 1080 | ||
1077 | static u32 onecputick; | 1081 | static u32 onecputick; |
@@ -1133,7 +1137,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
1133 | list_empty(&timers[CPUCLOCK_VIRT]) && | 1137 | list_empty(&timers[CPUCLOCK_VIRT]) && |
1134 | cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && | 1138 | cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && |
1135 | list_empty(&timers[CPUCLOCK_SCHED])) { | 1139 | list_empty(&timers[CPUCLOCK_SCHED])) { |
1136 | stop_process_timers(tsk); | 1140 | stop_process_timers(sig); |
1137 | return; | 1141 | return; |
1138 | } | 1142 | } |
1139 | 1143 | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index f1125c1a6321..63fe25433980 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/mutex.h> | 45 | #include <linux/mutex.h> |
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/kernel_stat.h> | 47 | #include <linux/kernel_stat.h> |
48 | #include <linux/hardirq.h> | ||
48 | 49 | ||
49 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 50 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
50 | static struct lock_class_key rcu_lock_key; | 51 | static struct lock_class_key rcu_lock_key; |
@@ -66,6 +67,28 @@ EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | |||
66 | int rcu_scheduler_active __read_mostly; | 67 | int rcu_scheduler_active __read_mostly; |
67 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); | 68 | EXPORT_SYMBOL_GPL(rcu_scheduler_active); |
68 | 69 | ||
70 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
71 | |||
72 | /** | ||
73 | * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section? | ||
74 | * | ||
75 | * Check for bottom half being disabled, which covers both the | ||
76 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses | ||
77 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) | ||
78 | * will show the situation. | ||
79 | * | ||
80 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. | ||
81 | */ | ||
82 | int rcu_read_lock_bh_held(void) | ||
83 | { | ||
84 | if (!debug_lockdep_rcu_enabled()) | ||
85 | return 1; | ||
86 | return in_softirq(); | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); | ||
89 | |||
90 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | ||
91 | |||
69 | /* | 92 | /* |
70 | * This function is invoked towards the end of the scheduler's initialization | 93 | * This function is invoked towards the end of the scheduler's initialization |
71 | * process. Before this is called, the idle task might contain | 94 | * process. Before this is called, the idle task might contain |
diff --git a/kernel/resource.c b/kernel/resource.c index 2d5be5d9bf5f..9c358e263534 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -219,19 +219,34 @@ void release_child_resources(struct resource *r) | |||
219 | } | 219 | } |
220 | 220 | ||
221 | /** | 221 | /** |
222 | * request_resource - request and reserve an I/O or memory resource | 222 | * request_resource_conflict - request and reserve an I/O or memory resource |
223 | * @root: root resource descriptor | 223 | * @root: root resource descriptor |
224 | * @new: resource descriptor desired by caller | 224 | * @new: resource descriptor desired by caller |
225 | * | 225 | * |
226 | * Returns 0 for success, negative error code on error. | 226 | * Returns 0 for success, conflict resource on error. |
227 | */ | 227 | */ |
228 | int request_resource(struct resource *root, struct resource *new) | 228 | struct resource *request_resource_conflict(struct resource *root, struct resource *new) |
229 | { | 229 | { |
230 | struct resource *conflict; | 230 | struct resource *conflict; |
231 | 231 | ||
232 | write_lock(&resource_lock); | 232 | write_lock(&resource_lock); |
233 | conflict = __request_resource(root, new); | 233 | conflict = __request_resource(root, new); |
234 | write_unlock(&resource_lock); | 234 | write_unlock(&resource_lock); |
235 | return conflict; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * request_resource - request and reserve an I/O or memory resource | ||
240 | * @root: root resource descriptor | ||
241 | * @new: resource descriptor desired by caller | ||
242 | * | ||
243 | * Returns 0 for success, negative error code on error. | ||
244 | */ | ||
245 | int request_resource(struct resource *root, struct resource *new) | ||
246 | { | ||
247 | struct resource *conflict; | ||
248 | |||
249 | conflict = request_resource_conflict(root, new); | ||
235 | return conflict ? -EBUSY : 0; | 250 | return conflict ? -EBUSY : 0; |
236 | } | 251 | } |
237 | 252 | ||
@@ -474,25 +489,40 @@ static struct resource * __insert_resource(struct resource *parent, struct resou | |||
474 | } | 489 | } |
475 | 490 | ||
476 | /** | 491 | /** |
477 | * insert_resource - Inserts a resource in the resource tree | 492 | * insert_resource_conflict - Inserts resource in the resource tree |
478 | * @parent: parent of the new resource | 493 | * @parent: parent of the new resource |
479 | * @new: new resource to insert | 494 | * @new: new resource to insert |
480 | * | 495 | * |
481 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | 496 | * Returns 0 on success, conflict resource if the resource can't be inserted. |
482 | * | 497 | * |
483 | * This function is equivalent to request_resource when no conflict | 498 | * This function is equivalent to request_resource_conflict when no conflict |
484 | * happens. If a conflict happens, and the conflicting resources | 499 | * happens. If a conflict happens, and the conflicting resources |
485 | * entirely fit within the range of the new resource, then the new | 500 | * entirely fit within the range of the new resource, then the new |
486 | * resource is inserted and the conflicting resources become children of | 501 | * resource is inserted and the conflicting resources become children of |
487 | * the new resource. | 502 | * the new resource. |
488 | */ | 503 | */ |
489 | int insert_resource(struct resource *parent, struct resource *new) | 504 | struct resource *insert_resource_conflict(struct resource *parent, struct resource *new) |
490 | { | 505 | { |
491 | struct resource *conflict; | 506 | struct resource *conflict; |
492 | 507 | ||
493 | write_lock(&resource_lock); | 508 | write_lock(&resource_lock); |
494 | conflict = __insert_resource(parent, new); | 509 | conflict = __insert_resource(parent, new); |
495 | write_unlock(&resource_lock); | 510 | write_unlock(&resource_lock); |
511 | return conflict; | ||
512 | } | ||
513 | |||
514 | /** | ||
515 | * insert_resource - Inserts a resource in the resource tree | ||
516 | * @parent: parent of the new resource | ||
517 | * @new: new resource to insert | ||
518 | * | ||
519 | * Returns 0 on success, -EBUSY if the resource can't be inserted. | ||
520 | */ | ||
521 | int insert_resource(struct resource *parent, struct resource *new) | ||
522 | { | ||
523 | struct resource *conflict; | ||
524 | |||
525 | conflict = insert_resource_conflict(parent, new); | ||
496 | return conflict ? -EBUSY : 0; | 526 | return conflict ? -EBUSY : 0; |
497 | } | 527 | } |
498 | 528 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 9ab3cd7858d3..49d2fa7b687a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2650,7 +2650,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2650 | { | 2650 | { |
2651 | unsigned long flags; | 2651 | unsigned long flags; |
2652 | struct rq *rq; | 2652 | struct rq *rq; |
2653 | int cpu = get_cpu(); | 2653 | int cpu __maybe_unused = get_cpu(); |
2654 | 2654 | ||
2655 | #ifdef CONFIG_SMP | 2655 | #ifdef CONFIG_SMP |
2656 | /* | 2656 | /* |
@@ -4902,7 +4902,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
4902 | int ret; | 4902 | int ret; |
4903 | cpumask_var_t mask; | 4903 | cpumask_var_t mask; |
4904 | 4904 | ||
4905 | if (len < cpumask_size()) | 4905 | if (len < nr_cpu_ids) |
4906 | return -EINVAL; | ||
4907 | if (len & (sizeof(unsigned long)-1)) | ||
4906 | return -EINVAL; | 4908 | return -EINVAL; |
4907 | 4909 | ||
4908 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) | 4910 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
@@ -4910,10 +4912,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
4910 | 4912 | ||
4911 | ret = sched_getaffinity(pid, mask); | 4913 | ret = sched_getaffinity(pid, mask); |
4912 | if (ret == 0) { | 4914 | if (ret == 0) { |
4913 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | 4915 | size_t retlen = min_t(size_t, len, cpumask_size()); |
4916 | |||
4917 | if (copy_to_user(user_mask_ptr, mask, retlen)) | ||
4914 | ret = -EFAULT; | 4918 | ret = -EFAULT; |
4915 | else | 4919 | else |
4916 | ret = cpumask_size(); | 4920 | ret = retlen; |
4917 | } | 4921 | } |
4918 | free_cpumask_var(mask); | 4922 | free_cpumask_var(mask); |
4919 | 4923 | ||
diff --git a/kernel/slow-work.c b/kernel/slow-work.c index 7494bbf5a270..7d3f4fa9ef4f 100644 --- a/kernel/slow-work.c +++ b/kernel/slow-work.c | |||
@@ -637,7 +637,7 @@ int delayed_slow_work_enqueue(struct delayed_slow_work *dwork, | |||
637 | goto cancelled; | 637 | goto cancelled; |
638 | 638 | ||
639 | /* the timer holds a reference whilst it is pending */ | 639 | /* the timer holds a reference whilst it is pending */ |
640 | ret = work->ops->get_ref(work); | 640 | ret = slow_work_get_ref(work); |
641 | if (ret < 0) | 641 | if (ret < 0) |
642 | goto cant_get_ref; | 642 | goto cant_get_ref; |
643 | 643 | ||
diff --git a/kernel/slow-work.h b/kernel/slow-work.h index 321f3c59d732..a29ebd1ef41d 100644 --- a/kernel/slow-work.h +++ b/kernel/slow-work.h | |||
@@ -43,28 +43,28 @@ extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *); | |||
43 | */ | 43 | */ |
44 | static inline void slow_work_set_thread_pid(int id, pid_t pid) | 44 | static inline void slow_work_set_thread_pid(int id, pid_t pid) |
45 | { | 45 | { |
46 | #ifdef CONFIG_SLOW_WORK_PROC | 46 | #ifdef CONFIG_SLOW_WORK_DEBUG |
47 | slow_work_pids[id] = pid; | 47 | slow_work_pids[id] = pid; |
48 | #endif | 48 | #endif |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline void slow_work_mark_time(struct slow_work *work) | 51 | static inline void slow_work_mark_time(struct slow_work *work) |
52 | { | 52 | { |
53 | #ifdef CONFIG_SLOW_WORK_PROC | 53 | #ifdef CONFIG_SLOW_WORK_DEBUG |
54 | work->mark = CURRENT_TIME; | 54 | work->mark = CURRENT_TIME; |
55 | #endif | 55 | #endif |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline void slow_work_begin_exec(int id, struct slow_work *work) | 58 | static inline void slow_work_begin_exec(int id, struct slow_work *work) |
59 | { | 59 | { |
60 | #ifdef CONFIG_SLOW_WORK_PROC | 60 | #ifdef CONFIG_SLOW_WORK_DEBUG |
61 | slow_work_execs[id] = work; | 61 | slow_work_execs[id] = work; |
62 | #endif | 62 | #endif |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void slow_work_end_exec(int id, struct slow_work *work) | 65 | static inline void slow_work_end_exec(int id, struct slow_work *work) |
66 | { | 66 | { |
67 | #ifdef CONFIG_SLOW_WORK_PROC | 67 | #ifdef CONFIG_SLOW_WORK_DEBUG |
68 | write_lock(&slow_work_execs_lock); | 68 | write_lock(&slow_work_execs_lock); |
69 | slow_work_execs[id] = NULL; | 69 | slow_work_execs[id] = NULL; |
70 | write_unlock(&slow_work_execs_lock); | 70 | write_unlock(&slow_work_execs_lock); |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 0d4c7898ab80..4b493f67dcb5 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -155,11 +155,11 @@ void softlockup_tick(void) | |||
155 | * Wake up the high-prio watchdog task twice per | 155 | * Wake up the high-prio watchdog task twice per |
156 | * threshold timespan. | 156 | * threshold timespan. |
157 | */ | 157 | */ |
158 | if (now > touch_ts + softlockup_thresh/2) | 158 | if (time_after(now - softlockup_thresh/2, touch_ts)) |
159 | wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); | 159 | wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); |
160 | 160 | ||
161 | /* Warn about unreasonable delays: */ | 161 | /* Warn about unreasonable delays: */ |
162 | if (now <= (touch_ts + softlockup_thresh)) | 162 | if (time_before_eq(now - softlockup_thresh, touch_ts)) |
163 | return; | 163 | return; |
164 | 164 | ||
165 | per_cpu(softlockup_print_ts, this_cpu) = touch_ts; | 165 | per_cpu(softlockup_print_ts, this_cpu) = touch_ts; |
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index 0a8a213016f0..aada0e52680a 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c | |||
@@ -22,6 +22,29 @@ | |||
22 | 22 | ||
23 | #include "tick-internal.h" | 23 | #include "tick-internal.h" |
24 | 24 | ||
25 | /* Limit min_delta to a jiffie */ | ||
26 | #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ) | ||
27 | |||
28 | static int tick_increase_min_delta(struct clock_event_device *dev) | ||
29 | { | ||
30 | /* Nothing to do if we already reached the limit */ | ||
31 | if (dev->min_delta_ns >= MIN_DELTA_LIMIT) | ||
32 | return -ETIME; | ||
33 | |||
34 | if (dev->min_delta_ns < 5000) | ||
35 | dev->min_delta_ns = 5000; | ||
36 | else | ||
37 | dev->min_delta_ns += dev->min_delta_ns >> 1; | ||
38 | |||
39 | if (dev->min_delta_ns > MIN_DELTA_LIMIT) | ||
40 | dev->min_delta_ns = MIN_DELTA_LIMIT; | ||
41 | |||
42 | printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n", | ||
43 | dev->name ? dev->name : "?", | ||
44 | (unsigned long long) dev->min_delta_ns); | ||
45 | return 0; | ||
46 | } | ||
47 | |||
25 | /** | 48 | /** |
26 | * tick_program_event internal worker function | 49 | * tick_program_event internal worker function |
27 | */ | 50 | */ |
@@ -37,23 +60,28 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires, | |||
37 | if (!ret || !force) | 60 | if (!ret || !force) |
38 | return ret; | 61 | return ret; |
39 | 62 | ||
63 | dev->retries++; | ||
40 | /* | 64 | /* |
41 | * We tried 2 times to program the device with the given | 65 | * We tried 3 times to program the device with the given |
42 | * min_delta_ns. If that's not working then we double it | 66 | * min_delta_ns. If that's not working then we increase it |
43 | * and emit a warning. | 67 | * and emit a warning. |
44 | */ | 68 | */ |
45 | if (++i > 2) { | 69 | if (++i > 2) { |
46 | /* Increase the min. delta and try again */ | 70 | /* Increase the min. delta and try again */ |
47 | if (!dev->min_delta_ns) | 71 | if (tick_increase_min_delta(dev)) { |
48 | dev->min_delta_ns = 5000; | 72 | /* |
49 | else | 73 | * Get out of the loop if min_delta_ns |
50 | dev->min_delta_ns += dev->min_delta_ns >> 1; | 74 | * hit the limit already. That's |
51 | 75 | * better than staying here forever. | |
52 | printk(KERN_WARNING | 76 | * |
53 | "CE: %s increasing min_delta_ns to %llu nsec\n", | 77 | * We clear next_event so we have a |
54 | dev->name ? dev->name : "?", | 78 | * chance that the box survives. |
55 | (unsigned long long) dev->min_delta_ns << 1); | 79 | */ |
56 | 80 | printk(KERN_WARNING | |
81 | "CE: Reprogramming failure. Giving up\n"); | ||
82 | dev->next_event.tv64 = KTIME_MAX; | ||
83 | return -ETIME; | ||
84 | } | ||
57 | i = 0; | 85 | i = 0; |
58 | } | 86 | } |
59 | 87 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 16736379a9ca..39f6177fafac 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -818,7 +818,8 @@ void update_wall_time(void) | |||
818 | shift = min(shift, maxshift); | 818 | shift = min(shift, maxshift); |
819 | while (offset >= timekeeper.cycle_interval) { | 819 | while (offset >= timekeeper.cycle_interval) { |
820 | offset = logarithmic_accumulation(offset, shift); | 820 | offset = logarithmic_accumulation(offset, shift); |
821 | shift--; | 821 | if(offset < timekeeper.cycle_interval<<shift) |
822 | shift--; | ||
822 | } | 823 | } |
823 | 824 | ||
824 | /* correct the clock when NTP error is too big */ | 825 | /* correct the clock when NTP error is too big */ |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index bdfb8dd1050c..1a4a7dd78777 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -228,6 +228,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu) | |||
228 | SEQ_printf(m, " event_handler: "); | 228 | SEQ_printf(m, " event_handler: "); |
229 | print_name_offset(m, dev->event_handler); | 229 | print_name_offset(m, dev->event_handler); |
230 | SEQ_printf(m, "\n"); | 230 | SEQ_printf(m, "\n"); |
231 | SEQ_printf(m, " retries: %lu\n", dev->retries); | ||
231 | } | 232 | } |
232 | 233 | ||
233 | static void timer_list_show_tickdevices(struct seq_file *m) | 234 | static void timer_list_show_tickdevices(struct seq_file *m) |
@@ -257,7 +258,7 @@ static int timer_list_show(struct seq_file *m, void *v) | |||
257 | u64 now = ktime_to_ns(ktime_get()); | 258 | u64 now = ktime_to_ns(ktime_get()); |
258 | int cpu; | 259 | int cpu; |
259 | 260 | ||
260 | SEQ_printf(m, "Timer List Version: v0.5\n"); | 261 | SEQ_printf(m, "Timer List Version: v0.6\n"); |
261 | SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); | 262 | SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); |
262 | SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); | 263 | SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); |
263 | 264 | ||
diff --git a/kernel/timer.c b/kernel/timer.c index c61a7949387f..fc965eae0e87 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -880,6 +880,7 @@ int try_to_del_timer_sync(struct timer_list *timer) | |||
880 | if (base->running_timer == timer) | 880 | if (base->running_timer == timer) |
881 | goto out; | 881 | goto out; |
882 | 882 | ||
883 | timer_stats_timer_clear_start_info(timer); | ||
883 | ret = 0; | 884 | ret = 0; |
884 | if (timer_pending(timer)) { | 885 | if (timer_pending(timer)) { |
885 | detach_timer(timer, 1); | 886 | detach_timer(timer, 1); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 05a9f83b8819..d1187ef20caf 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -207,6 +207,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on); | |||
207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) | 207 | #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) |
208 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ | 208 | #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ |
209 | 209 | ||
210 | #if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | ||
211 | # define RB_FORCE_8BYTE_ALIGNMENT 0 | ||
212 | # define RB_ARCH_ALIGNMENT RB_ALIGNMENT | ||
213 | #else | ||
214 | # define RB_FORCE_8BYTE_ALIGNMENT 1 | ||
215 | # define RB_ARCH_ALIGNMENT 8U | ||
216 | #endif | ||
217 | |||
210 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ | 218 | /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ |
211 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX | 219 | #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX |
212 | 220 | ||
@@ -1547,7 +1555,7 @@ rb_update_event(struct ring_buffer_event *event, | |||
1547 | 1555 | ||
1548 | case 0: | 1556 | case 0: |
1549 | length -= RB_EVNT_HDR_SIZE; | 1557 | length -= RB_EVNT_HDR_SIZE; |
1550 | if (length > RB_MAX_SMALL_DATA) | 1558 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) |
1551 | event->array[0] = length; | 1559 | event->array[0] = length; |
1552 | else | 1560 | else |
1553 | event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); | 1561 | event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); |
@@ -1722,11 +1730,11 @@ static unsigned rb_calculate_event_length(unsigned length) | |||
1722 | if (!length) | 1730 | if (!length) |
1723 | length = 1; | 1731 | length = 1; |
1724 | 1732 | ||
1725 | if (length > RB_MAX_SMALL_DATA) | 1733 | if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) |
1726 | length += sizeof(event.array[0]); | 1734 | length += sizeof(event.array[0]); |
1727 | 1735 | ||
1728 | length += RB_EVNT_HDR_SIZE; | 1736 | length += RB_EVNT_HDR_SIZE; |
1729 | length = ALIGN(length, RB_ALIGNMENT); | 1737 | length = ALIGN(length, RB_ARCH_ALIGNMENT); |
1730 | 1738 | ||
1731 | return length; | 1739 | return length; |
1732 | } | 1740 | } |