aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c32
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/padata.c5
-rw-r--r--kernel/pid.c4
-rw-r--r--kernel/profile.c7
-rw-r--r--kernel/res_counter.c22
-rw-r--r--kernel/sched/core.c15
-rw-r--r--kernel/sched/fair.c18
-rw-r--r--kernel/time/timekeeping.c50
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_sched_wakeup.c2
-rw-r--r--kernel/wait.c2
15 files changed, 104 insertions, 63 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b017887d632f..7bb63eea6eb8 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -302,10 +302,10 @@ static void guarantee_online_cpus(const struct cpuset *cs,
302 * are online, with memory. If none are online with memory, walk 302 * are online, with memory. If none are online with memory, walk
303 * up the cpuset hierarchy until we find one that does have some 303 * up the cpuset hierarchy until we find one that does have some
304 * online mems. If we get all the way to the top and still haven't 304 * online mems. If we get all the way to the top and still haven't
305 * found any online mems, return node_states[N_HIGH_MEMORY]. 305 * found any online mems, return node_states[N_MEMORY].
306 * 306 *
307 * One way or another, we guarantee to return some non-empty subset 307 * One way or another, we guarantee to return some non-empty subset
308 * of node_states[N_HIGH_MEMORY]. 308 * of node_states[N_MEMORY].
309 * 309 *
310 * Call with callback_mutex held. 310 * Call with callback_mutex held.
311 */ 311 */
@@ -313,14 +313,14 @@ static void guarantee_online_cpus(const struct cpuset *cs,
313static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) 313static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
314{ 314{
315 while (cs && !nodes_intersects(cs->mems_allowed, 315 while (cs && !nodes_intersects(cs->mems_allowed,
316 node_states[N_HIGH_MEMORY])) 316 node_states[N_MEMORY]))
317 cs = cs->parent; 317 cs = cs->parent;
318 if (cs) 318 if (cs)
319 nodes_and(*pmask, cs->mems_allowed, 319 nodes_and(*pmask, cs->mems_allowed,
320 node_states[N_HIGH_MEMORY]); 320 node_states[N_MEMORY]);
321 else 321 else
322 *pmask = node_states[N_HIGH_MEMORY]; 322 *pmask = node_states[N_MEMORY];
323 BUG_ON(!nodes_intersects(*pmask, node_states[N_HIGH_MEMORY])); 323 BUG_ON(!nodes_intersects(*pmask, node_states[N_MEMORY]));
324} 324}
325 325
326/* 326/*
@@ -1100,7 +1100,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1100 return -ENOMEM; 1100 return -ENOMEM;
1101 1101
1102 /* 1102 /*
1103 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; 1103 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1104 * it's read-only 1104 * it's read-only
1105 */ 1105 */
1106 if (cs == &top_cpuset) { 1106 if (cs == &top_cpuset) {
@@ -1122,7 +1122,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1122 goto done; 1122 goto done;
1123 1123
1124 if (!nodes_subset(trialcs->mems_allowed, 1124 if (!nodes_subset(trialcs->mems_allowed,
1125 node_states[N_HIGH_MEMORY])) { 1125 node_states[N_MEMORY])) {
1126 retval = -EINVAL; 1126 retval = -EINVAL;
1127 goto done; 1127 goto done;
1128 } 1128 }
@@ -2026,7 +2026,7 @@ static struct cpuset *cpuset_next(struct list_head *queue)
2026 * before dropping down to the next. It always processes a node before 2026 * before dropping down to the next. It always processes a node before
2027 * any of its children. 2027 * any of its children.
2028 * 2028 *
2029 * In the case of memory hot-unplug, it will remove nodes from N_HIGH_MEMORY 2029 * In the case of memory hot-unplug, it will remove nodes from N_MEMORY
2030 * if all present pages from a node are offlined. 2030 * if all present pages from a node are offlined.
2031 */ 2031 */
2032static void 2032static void
@@ -2065,7 +2065,7 @@ scan_cpusets_upon_hotplug(struct cpuset *root, enum hotplug_event event)
2065 2065
2066 /* Continue past cpusets with all mems online */ 2066 /* Continue past cpusets with all mems online */
2067 if (nodes_subset(cp->mems_allowed, 2067 if (nodes_subset(cp->mems_allowed,
2068 node_states[N_HIGH_MEMORY])) 2068 node_states[N_MEMORY]))
2069 continue; 2069 continue;
2070 2070
2071 oldmems = cp->mems_allowed; 2071 oldmems = cp->mems_allowed;
@@ -2073,7 +2073,7 @@ scan_cpusets_upon_hotplug(struct cpuset *root, enum hotplug_event event)
2073 /* Remove offline mems from this cpuset. */ 2073 /* Remove offline mems from this cpuset. */
2074 mutex_lock(&callback_mutex); 2074 mutex_lock(&callback_mutex);
2075 nodes_and(cp->mems_allowed, cp->mems_allowed, 2075 nodes_and(cp->mems_allowed, cp->mems_allowed,
2076 node_states[N_HIGH_MEMORY]); 2076 node_states[N_MEMORY]);
2077 mutex_unlock(&callback_mutex); 2077 mutex_unlock(&callback_mutex);
2078 2078
2079 /* Move tasks from the empty cpuset to a parent */ 2079 /* Move tasks from the empty cpuset to a parent */
@@ -2126,8 +2126,8 @@ void cpuset_update_active_cpus(bool cpu_online)
2126 2126
2127#ifdef CONFIG_MEMORY_HOTPLUG 2127#ifdef CONFIG_MEMORY_HOTPLUG
2128/* 2128/*
2129 * Keep top_cpuset.mems_allowed tracking node_states[N_HIGH_MEMORY]. 2129 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
2130 * Call this routine anytime after node_states[N_HIGH_MEMORY] changes. 2130 * Call this routine anytime after node_states[N_MEMORY] changes.
2131 * See cpuset_update_active_cpus() for CPU hotplug handling. 2131 * See cpuset_update_active_cpus() for CPU hotplug handling.
2132 */ 2132 */
2133static int cpuset_track_online_nodes(struct notifier_block *self, 2133static int cpuset_track_online_nodes(struct notifier_block *self,
@@ -2140,7 +2140,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
2140 case MEM_ONLINE: 2140 case MEM_ONLINE:
2141 oldmems = top_cpuset.mems_allowed; 2141 oldmems = top_cpuset.mems_allowed;
2142 mutex_lock(&callback_mutex); 2142 mutex_lock(&callback_mutex);
2143 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2143 top_cpuset.mems_allowed = node_states[N_MEMORY];
2144 mutex_unlock(&callback_mutex); 2144 mutex_unlock(&callback_mutex);
2145 update_tasks_nodemask(&top_cpuset, &oldmems, NULL); 2145 update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
2146 break; 2146 break;
@@ -2169,7 +2169,7 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
2169void __init cpuset_init_smp(void) 2169void __init cpuset_init_smp(void)
2170{ 2170{
2171 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask); 2171 cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2172 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2172 top_cpuset.mems_allowed = node_states[N_MEMORY];
2173 2173
2174 hotplug_memory_notifier(cpuset_track_online_nodes, 10); 2174 hotplug_memory_notifier(cpuset_track_online_nodes, 10);
2175 2175
@@ -2237,7 +2237,7 @@ void cpuset_init_current_mems_allowed(void)
2237 * 2237 *
2238 * Description: Returns the nodemask_t mems_allowed of the cpuset 2238 * Description: Returns the nodemask_t mems_allowed of the cpuset
2239 * attached to the specified @tsk. Guaranteed to return some non-empty 2239 * attached to the specified @tsk. Guaranteed to return some non-empty
2240 * subset of node_states[N_HIGH_MEMORY], even if this means going outside the 2240 * subset of node_states[N_MEMORY], even if this means going outside the
2241 * tasks cpuset. 2241 * tasks cpuset.
2242 **/ 2242 **/
2243 2243
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 29fb60caecb5..691dc2ef9baf 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -428,7 +428,7 @@ int kthreadd(void *unused)
428 set_task_comm(tsk, "kthreadd"); 428 set_task_comm(tsk, "kthreadd");
429 ignore_signals(tsk); 429 ignore_signals(tsk);
430 set_cpus_allowed_ptr(tsk, cpu_all_mask); 430 set_cpus_allowed_ptr(tsk, cpu_all_mask);
431 set_mems_allowed(node_states[N_HIGH_MEMORY]); 431 set_mems_allowed(node_states[N_MEMORY]);
432 432
433 current->flags |= PF_NOFREEZE; 433 current->flags |= PF_NOFREEZE;
434 434
diff --git a/kernel/padata.c b/kernel/padata.c
index 89fe3d1b9efb..072f4ee4eb89 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -171,7 +171,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
171{ 171{
172 int cpu, num_cpus; 172 int cpu, num_cpus;
173 unsigned int next_nr, next_index; 173 unsigned int next_nr, next_index;
174 struct padata_parallel_queue *queue, *next_queue; 174 struct padata_parallel_queue *next_queue;
175 struct padata_priv *padata; 175 struct padata_priv *padata;
176 struct padata_list *reorder; 176 struct padata_list *reorder;
177 177
@@ -204,8 +204,7 @@ static struct padata_priv *padata_get_next(struct parallel_data *pd)
204 goto out; 204 goto out;
205 } 205 }
206 206
207 queue = per_cpu_ptr(pd->pqueue, smp_processor_id()); 207 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
208 if (queue->cpu_index == next_queue->cpu_index) {
209 padata = ERR_PTR(-ENODATA); 208 padata = ERR_PTR(-ENODATA);
210 goto out; 209 goto out;
211 } 210 }
diff --git a/kernel/pid.c b/kernel/pid.c
index aebd4f5aaf41..fd996c1ed9f8 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * Generic pidhash and scalable, time-bounded PID allocator 2 * Generic pidhash and scalable, time-bounded PID allocator
3 * 3 *
4 * (C) 2002-2003 William Irwin, IBM 4 * (C) 2002-2003 Nadia Yvette Chambers, IBM
5 * (C) 2004 William Irwin, Oracle 5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat 6 * (C) 2002-2004 Ingo Molnar, Red Hat
7 * 7 *
8 * pid-structures are backing objects for tasks sharing a given ID to chain 8 * pid-structures are backing objects for tasks sharing a given ID to chain
diff --git a/kernel/profile.c b/kernel/profile.c
index 76b8e77773ee..1f391819c42f 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -8,9 +8,10 @@
8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar, 8 * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
9 * Red Hat, July 2004 9 * Red Hat, July 2004
10 * Consolidation of architecture support code for profiling, 10 * Consolidation of architecture support code for profiling,
11 * William Irwin, Oracle, July 2004 11 * Nadia Yvette Chambers, Oracle, July 2004
12 * Amortized hit count accounting via per-cpu open-addressed hashtables 12 * Amortized hit count accounting via per-cpu open-addressed hashtables
13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004 13 * to resolve timer interrupt livelocks, Nadia Yvette Chambers,
14 * Oracle, 2004
14 */ 15 */
15 16
16#include <linux/export.h> 17#include <linux/export.h>
@@ -256,7 +257,7 @@ EXPORT_SYMBOL_GPL(unregister_timer_hook);
256 * pagetable hash functions, but uses a full hashtable full of finite 257 * pagetable hash functions, but uses a full hashtable full of finite
257 * collision chains, not just pairs of them. 258 * collision chains, not just pairs of them.
258 * 259 *
259 * -- wli 260 * -- nyc
260 */ 261 */
261static void __profile_flip_buffers(void *unused) 262static void __profile_flip_buffers(void *unused)
262{ 263{
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index ad581aa2369a..3920d593e63c 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -192,25 +192,3 @@ int res_counter_memparse_write_strategy(const char *buf,
192 *res = PAGE_ALIGN(*res); 192 *res = PAGE_ALIGN(*res);
193 return 0; 193 return 0;
194} 194}
195
196int res_counter_write(struct res_counter *counter, int member,
197 const char *buf, write_strategy_fn write_strategy)
198{
199 char *end;
200 unsigned long flags;
201 unsigned long long tmp, *val;
202
203 if (write_strategy) {
204 if (write_strategy(buf, &tmp))
205 return -EINVAL;
206 } else {
207 tmp = simple_strtoull(buf, &end, 10);
208 if (*end != '\0')
209 return -EINVAL;
210 }
211 spin_lock_irqsave(&counter->lock, flags);
212 val = res_counter_member(counter, member);
213 *val = tmp;
214 spin_unlock_irqrestore(&counter->lock, flags);
215 return 0;
216}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6271b89f87ac..0533496b6228 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -923,6 +923,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
923 rq->skip_clock_update = 1; 923 rq->skip_clock_update = 1;
924} 924}
925 925
926static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
927
928void register_task_migration_notifier(struct notifier_block *n)
929{
930 atomic_notifier_chain_register(&task_migration_notifier, n);
931}
932
926#ifdef CONFIG_SMP 933#ifdef CONFIG_SMP
927void set_task_cpu(struct task_struct *p, unsigned int new_cpu) 934void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
928{ 935{
@@ -953,10 +960,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
953 trace_sched_migrate_task(p, new_cpu); 960 trace_sched_migrate_task(p, new_cpu);
954 961
955 if (task_cpu(p) != new_cpu) { 962 if (task_cpu(p) != new_cpu) {
963 struct task_migration_notifier tmn;
964
956 if (p->sched_class->migrate_task_rq) 965 if (p->sched_class->migrate_task_rq)
957 p->sched_class->migrate_task_rq(p, new_cpu); 966 p->sched_class->migrate_task_rq(p, new_cpu);
958 p->se.nr_migrations++; 967 p->se.nr_migrations++;
959 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); 968 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
969
970 tmn.task = p;
971 tmn.from_cpu = task_cpu(p);
972 tmn.to_cpu = new_cpu;
973
974 atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
960 } 975 }
961 976
962 __set_task_cpu(p, new_cpu); 977 __set_task_cpu(p, new_cpu);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 59e072b2db97..756f9f9e8542 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1265,7 +1265,6 @@ static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
1265 } 1265 }
1266 1266
1267 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update); 1267 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
1268 update_cfs_shares(cfs_rq);
1269} 1268}
1270 1269
1271static inline void update_rq_runnable_avg(struct rq *rq, int runnable) 1270static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
@@ -1475,8 +1474,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1475 * Update run-time statistics of the 'current'. 1474 * Update run-time statistics of the 'current'.
1476 */ 1475 */
1477 update_curr(cfs_rq); 1476 update_curr(cfs_rq);
1478 account_entity_enqueue(cfs_rq, se);
1479 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP); 1477 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
1478 account_entity_enqueue(cfs_rq, se);
1479 update_cfs_shares(cfs_rq);
1480 1480
1481 if (flags & ENQUEUE_WAKEUP) { 1481 if (flags & ENQUEUE_WAKEUP) {
1482 place_entity(cfs_rq, se, 0); 1482 place_entity(cfs_rq, se, 0);
@@ -1549,6 +1549,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1549 * Update run-time statistics of the 'current'. 1549 * Update run-time statistics of the 'current'.
1550 */ 1550 */
1551 update_curr(cfs_rq); 1551 update_curr(cfs_rq);
1552 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
1552 1553
1553 update_stats_dequeue(cfs_rq, se); 1554 update_stats_dequeue(cfs_rq, se);
1554 if (flags & DEQUEUE_SLEEP) { 1555 if (flags & DEQUEUE_SLEEP) {
@@ -1568,8 +1569,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1568 1569
1569 if (se != cfs_rq->curr) 1570 if (se != cfs_rq->curr)
1570 __dequeue_entity(cfs_rq, se); 1571 __dequeue_entity(cfs_rq, se);
1572 se->on_rq = 0;
1571 account_entity_dequeue(cfs_rq, se); 1573 account_entity_dequeue(cfs_rq, se);
1572 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
1573 1574
1574 /* 1575 /*
1575 * Normalize the entity after updating the min_vruntime because the 1576 * Normalize the entity after updating the min_vruntime because the
@@ -1583,7 +1584,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1583 return_cfs_rq_runtime(cfs_rq); 1584 return_cfs_rq_runtime(cfs_rq);
1584 1585
1585 update_min_vruntime(cfs_rq); 1586 update_min_vruntime(cfs_rq);
1586 se->on_rq = 0; 1587 update_cfs_shares(cfs_rq);
1587} 1588}
1588 1589
1589/* 1590/*
@@ -2595,8 +2596,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2595 if (cfs_rq_throttled(cfs_rq)) 2596 if (cfs_rq_throttled(cfs_rq))
2596 break; 2597 break;
2597 2598
2599 update_cfs_shares(cfs_rq);
2598 update_entity_load_avg(se, 1); 2600 update_entity_load_avg(se, 1);
2599 update_cfs_rq_blocked_load(cfs_rq, 0);
2600 } 2601 }
2601 2602
2602 if (!se) { 2603 if (!se) {
@@ -2656,8 +2657,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2656 if (cfs_rq_throttled(cfs_rq)) 2657 if (cfs_rq_throttled(cfs_rq))
2657 break; 2658 break;
2658 2659
2660 update_cfs_shares(cfs_rq);
2659 update_entity_load_avg(se, 1); 2661 update_entity_load_avg(se, 1);
2660 update_cfs_rq_blocked_load(cfs_rq, 0);
2661 } 2662 }
2662 2663
2663 if (!se) { 2664 if (!se) {
@@ -5837,11 +5838,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
5837 se = tg->se[i]; 5838 se = tg->se[i];
5838 /* Propagate contribution to hierarchy */ 5839 /* Propagate contribution to hierarchy */
5839 raw_spin_lock_irqsave(&rq->lock, flags); 5840 raw_spin_lock_irqsave(&rq->lock, flags);
5840 for_each_sched_entity(se) { 5841 for_each_sched_entity(se)
5841 update_cfs_shares(group_cfs_rq(se)); 5842 update_cfs_shares(group_cfs_rq(se));
5842 /* update contribution to parent */
5843 update_entity_load_avg(se, 1);
5844 }
5845 raw_spin_unlock_irqrestore(&rq->lock, flags); 5843 raw_spin_unlock_irqrestore(&rq->lock, flags);
5846 } 5844 }
5847 5845
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 4c7de02eacdc..cbc6acb0db3f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -21,6 +21,7 @@
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/tick.h> 22#include <linux/tick.h>
23#include <linux/stop_machine.h> 23#include <linux/stop_machine.h>
24#include <linux/pvclock_gtod.h>
24 25
25 26
26static struct timekeeper timekeeper; 27static struct timekeeper timekeeper;
@@ -174,6 +175,54 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
174 return nsec + arch_gettimeoffset(); 175 return nsec + arch_gettimeoffset();
175} 176}
176 177
178static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
179
180static void update_pvclock_gtod(struct timekeeper *tk)
181{
182 raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
183}
184
185/**
186 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
187 *
188 * Must hold write on timekeeper.lock
189 */
190int pvclock_gtod_register_notifier(struct notifier_block *nb)
191{
192 struct timekeeper *tk = &timekeeper;
193 unsigned long flags;
194 int ret;
195
196 write_seqlock_irqsave(&tk->lock, flags);
197 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
198 /* update timekeeping data */
199 update_pvclock_gtod(tk);
200 write_sequnlock_irqrestore(&tk->lock, flags);
201
202 return ret;
203}
204EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
205
206/**
207 * pvclock_gtod_unregister_notifier - unregister a pvclock
208 * timedata update listener
209 *
210 * Must hold write on timekeeper.lock
211 */
212int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
213{
214 struct timekeeper *tk = &timekeeper;
215 unsigned long flags;
216 int ret;
217
218 write_seqlock_irqsave(&tk->lock, flags);
219 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
220 write_sequnlock_irqrestore(&tk->lock, flags);
221
222 return ret;
223}
224EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
225
177/* must hold write on timekeeper.lock */ 226/* must hold write on timekeeper.lock */
178static void timekeeping_update(struct timekeeper *tk, bool clearntp) 227static void timekeeping_update(struct timekeeper *tk, bool clearntp)
179{ 228{
@@ -182,6 +231,7 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp)
182 ntp_clear(); 231 ntp_clear();
183 } 232 }
184 update_vsyscall(tk); 233 update_vsyscall(tk);
234 update_pvclock_gtod(tk);
185} 235}
186 236
187/** 237/**
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 7693aaf324c6..afd092de45b7 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -10,7 +10,7 @@
10 * Based on code in the latency_tracer, that is: 10 * Based on code in the latency_tracer, that is:
11 * 11 *
12 * Copyright (C) 2004-2006 Ingo Molnar 12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III 13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */ 14 */
15 15
16#include <linux/stop_machine.h> 16#include <linux/stop_machine.h>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index b69cc380322d..61e081b4ba11 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -9,7 +9,7 @@
9 * 9 *
10 * Based on code from the latency_tracer, that is: 10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar 11 * Copyright (C) 2004-2006 Ingo Molnar
12 * Copyright (C) 2004 William Lee Irwin III 12 * Copyright (C) 2004 Nadia Yvette Chambers
13 */ 13 */
14#include <linux/ring_buffer.h> 14#include <linux/ring_buffer.h>
15#include <generated/utsrelease.h> 15#include <generated/utsrelease.h>
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index bb227e380cb5..8e3ad8082ab7 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -7,7 +7,7 @@
7 * Based on code from the latency_tracer, that is: 7 * Based on code from the latency_tracer, that is:
8 * 8 *
9 * Copyright (C) 2004-2006 Ingo Molnar 9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III 10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */ 11 */
12#include <linux/ring_buffer.h> 12#include <linux/ring_buffer.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 5ffce7b0f33c..713a2cac4881 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -7,7 +7,7 @@
7 * From code in the latency_tracer, that is: 7 * From code in the latency_tracer, that is:
8 * 8 *
9 * Copyright (C) 2004-2006 Ingo Molnar 9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III 10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */ 11 */
12#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
13#include <linux/debugfs.h> 13#include <linux/debugfs.h>
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index bc64fc137554..9fe45fcefca0 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -7,7 +7,7 @@
7 * Based on code from the latency_tracer, that is: 7 * Based on code from the latency_tracer, that is:
8 * 8 *
9 * Copyright (C) 2004-2006 Ingo Molnar 9 * Copyright (C) 2004-2006 Ingo Molnar
10 * Copyright (C) 2004 William Lee Irwin III 10 * Copyright (C) 2004 Nadia Yvette Chambers
11 */ 11 */
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
diff --git a/kernel/wait.c b/kernel/wait.c
index 7fdd9eaca2c3..6698e0c04ead 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Generic waiting primitives. 2 * Generic waiting primitives.
3 * 3 *
4 * (C) 2004 William Irwin, Oracle 4 * (C) 2004 Nadia Yvette Chambers, Oracle
5 */ 5 */
6#include <linux/init.h> 6#include <linux/init.h>
7#include <linux/export.h> 7#include <linux/export.h>