diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-13 18:31:08 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-13 18:31:08 -0500 |
commit | 66cdd0ceaf65a18996f561b770eedde1d123b019 (patch) | |
tree | 4892eaa422d366fce5d1e866ff1fe0988af95569 /kernel/sched | |
parent | 896ea17d3da5f44b2625c9cda9874d7dfe447393 (diff) | |
parent | 58b7825bc324da55415034a9f6ca5d716b8fd898 (diff) |
Merge tag 'kvm-3.8-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Marcelo Tosatti:
"Considerable KVM/PPC work, x86 kvmclock vsyscall support,
IA32_TSC_ADJUST MSR emulation, amongst others."
Fix up trivial conflict in kernel/sched/core.c due to cross-cpu
migration notifier added next to rq migration call-back.
* tag 'kvm-3.8-1' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (156 commits)
KVM: emulator: fix real mode segment checks in address linearization
VMX: remove unneeded enable_unrestricted_guest check
KVM: VMX: fix DPL during entry to protected mode
x86/kexec: crash_vmclear_local_vmcss needs __rcu
kvm: Fix irqfd resampler list walk
KVM: VMX: provide the vmclear function and a bitmap to support VMCLEAR in kdump
x86/kexec: VMCLEAR VMCSs loaded on all cpus if necessary
KVM: MMU: optimize for set_spte
KVM: PPC: booke: Get/set guest EPCR register using ONE_REG interface
KVM: PPC: bookehv: Add EPCR support in mtspr/mfspr emulation
KVM: PPC: bookehv: Add guest computation mode for irq delivery
KVM: PPC: Make EPCR a valid field for booke64 and bookehv
KVM: PPC: booke: Extend MAS2 EPN mask for 64-bit
KVM: PPC: e500: Mask MAS2 EPN high 32-bits in 32/64 tlbwe emulation
KVM: PPC: Mask ea's high 32-bits in 32/64 instr emulation
KVM: PPC: e500: Add emulation helper for getting instruction ea
KVM: PPC: bookehv64: Add support for interrupt handling
KVM: PPC: bookehv: Remove GET_VCPU macro from exception handler
KVM: PPC: booke: Fix get_tb() compile error on 64-bit
KVM: PPC: e500: Silence bogus GCC warning in tlb code
...
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 15 |
1 files changed, 15 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6271b89f87ac..0533496b6228 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -923,6 +923,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
923 | rq->skip_clock_update = 1; | 923 | rq->skip_clock_update = 1; |
924 | } | 924 | } |
925 | 925 | ||
926 | static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); | ||
927 | |||
928 | void register_task_migration_notifier(struct notifier_block *n) | ||
929 | { | ||
930 | atomic_notifier_chain_register(&task_migration_notifier, n); | ||
931 | } | ||
932 | |||
926 | #ifdef CONFIG_SMP | 933 | #ifdef CONFIG_SMP |
927 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 934 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
928 | { | 935 | { |
@@ -953,10 +960,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
953 | trace_sched_migrate_task(p, new_cpu); | 960 | trace_sched_migrate_task(p, new_cpu); |
954 | 961 | ||
955 | if (task_cpu(p) != new_cpu) { | 962 | if (task_cpu(p) != new_cpu) { |
963 | struct task_migration_notifier tmn; | ||
964 | |||
956 | if (p->sched_class->migrate_task_rq) | 965 | if (p->sched_class->migrate_task_rq) |
957 | p->sched_class->migrate_task_rq(p, new_cpu); | 966 | p->sched_class->migrate_task_rq(p, new_cpu); |
958 | p->se.nr_migrations++; | 967 | p->se.nr_migrations++; |
959 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); | 968 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); |
969 | |||
970 | tmn.task = p; | ||
971 | tmn.from_cpu = task_cpu(p); | ||
972 | tmn.to_cpu = new_cpu; | ||
973 | |||
974 | atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); | ||
960 | } | 975 | } |
961 | 976 | ||
962 | __set_task_cpu(p, new_cpu); | 977 | __set_task_cpu(p, new_cpu); |