aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 23:58:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 23:58:09 -0500
commit2a7d2b96d5cba7568139d9ab157a0e97ab32440f (patch)
treead029d8cc7b7068b7250e914360ec6315fdfa114 /arch/x86
parente3c4877de8b9d93bd47b6ee88eb594b1c1e10da5 (diff)
parentb67bfe0d42cac56c512dd5da4b1b347a23f4b70a (diff)
Merge branch 'akpm' (final batch from Andrew)
Merge third patch-bumb from Andrew Morton: "This wraps me up for -rc1. - Lots of misc stuff and things which were deferred/missed from patchbombings 1 & 2. - ocfs2 things - lib/scatterlist - hfsplus - fatfs - documentation - signals - procfs - lockdep - coredump - seqfile core - kexec - Tejun's large IDR tree reworkings - ipmi - partitions - nbd - random() things - kfifo - tools/testing/selftests updates - Sasha's large and pointless hlist cleanup" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (163 commits) hlist: drop the node parameter from iterators kcmp: make it depend on CHECKPOINT_RESTORE selftests: add a simple doc tools/testing/selftests/Makefile: rearrange targets selftests/efivarfs: add create-read test selftests/efivarfs: add empty file creation test selftests: add tests for efivarfs kfifo: fix kfifo_alloc() and kfifo_init() kfifo: move kfifo.c from kernel/ to lib/ arch Kconfig: centralise CONFIG_ARCH_NO_VIRT_TO_BUS w1: add support for DS2413 Dual Channel Addressable Switch memstick: move the dereference below the NULL test drivers/pps/clients/pps-gpio.c: use devm_kzalloc Documentation/DMA-API-HOWTO.txt: fix typo include/linux/eventfd.h: fix incorrect filename is a comment mtd: mtd_stresstest: use prandom_bytes() mtd: mtd_subpagetest: convert to use prandom library mtd: mtd_speedtest: use prandom_bytes mtd: mtd_pagetest: convert to use prandom library mtd: mtd_oobtest: convert to use prandom library ...
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/kprobes/core.c8
-rw-r--r--arch/x86/kvm/mmu.c26
3 files changed, 15 insertions, 20 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a9383370311..a4f24f5b1218 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -112,6 +112,7 @@ config X86
112 select GENERIC_STRNLEN_USER 112 select GENERIC_STRNLEN_USER
113 select HAVE_CONTEXT_TRACKING if X86_64 113 select HAVE_CONTEXT_TRACKING if X86_64
114 select HAVE_IRQ_TIME_ACCOUNTING 114 select HAVE_IRQ_TIME_ACCOUNTING
115 select HAVE_VIRT_TO_BUS
115 select MODULES_USE_ELF_REL if X86_32 116 select MODULES_USE_ELF_REL if X86_32
116 select MODULES_USE_ELF_RELA if X86_64 117 select MODULES_USE_ELF_RELA if X86_64
117 select CLONE_BACKWARDS if X86_32 118 select CLONE_BACKWARDS if X86_32
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index e124554598ee..3f06e6149981 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -652,7 +652,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
652{ 652{
653 struct kretprobe_instance *ri = NULL; 653 struct kretprobe_instance *ri = NULL;
654 struct hlist_head *head, empty_rp; 654 struct hlist_head *head, empty_rp;
655 struct hlist_node *node, *tmp; 655 struct hlist_node *tmp;
656 unsigned long flags, orig_ret_address = 0; 656 unsigned long flags, orig_ret_address = 0;
657 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline; 657 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
658 kprobe_opcode_t *correct_ret_addr = NULL; 658 kprobe_opcode_t *correct_ret_addr = NULL;
@@ -682,7 +682,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
682 * will be the real return address, and all the rest will 682 * will be the real return address, and all the rest will
683 * point to kretprobe_trampoline. 683 * point to kretprobe_trampoline.
684 */ 684 */
685 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 685 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
686 if (ri->task != current) 686 if (ri->task != current)
687 /* another task is sharing our hash bucket */ 687 /* another task is sharing our hash bucket */
688 continue; 688 continue;
@@ -701,7 +701,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
701 kretprobe_assert(ri, orig_ret_address, trampoline_address); 701 kretprobe_assert(ri, orig_ret_address, trampoline_address);
702 702
703 correct_ret_addr = ri->ret_addr; 703 correct_ret_addr = ri->ret_addr;
704 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 704 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
705 if (ri->task != current) 705 if (ri->task != current)
706 /* another task is sharing our hash bucket */ 706 /* another task is sharing our hash bucket */
707 continue; 707 continue;
@@ -728,7 +728,7 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
728 728
729 kretprobe_hash_unlock(current, &flags); 729 kretprobe_hash_unlock(current, &flags);
730 730
731 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { 731 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
732 hlist_del(&ri->hlist); 732 hlist_del(&ri->hlist);
733 kfree(ri); 733 kfree(ri);
734 } 734 }
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4ed3edbe06bd..956ca358108a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1644,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1644static void kvm_mmu_commit_zap_page(struct kvm *kvm, 1644static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1645 struct list_head *invalid_list); 1645 struct list_head *invalid_list);
1646 1646
1647#define for_each_gfn_sp(kvm, sp, gfn, pos) \ 1647#define for_each_gfn_sp(kvm, sp, gfn) \
1648 hlist_for_each_entry(sp, pos, \ 1648 hlist_for_each_entry(sp, \
1649 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1649 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1650 if ((sp)->gfn != (gfn)) {} else 1650 if ((sp)->gfn != (gfn)) {} else
1651 1651
1652#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ 1652#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \
1653 hlist_for_each_entry(sp, pos, \ 1653 hlist_for_each_entry(sp, \
1654 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ 1654 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1655 if ((sp)->gfn != (gfn) || (sp)->role.direct || \ 1655 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1656 (sp)->role.invalid) {} else 1656 (sp)->role.invalid) {} else
@@ -1706,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1706static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 1706static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1707{ 1707{
1708 struct kvm_mmu_page *s; 1708 struct kvm_mmu_page *s;
1709 struct hlist_node *node;
1710 LIST_HEAD(invalid_list); 1709 LIST_HEAD(invalid_list);
1711 bool flush = false; 1710 bool flush = false;
1712 1711
1713 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 1712 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
1714 if (!s->unsync) 1713 if (!s->unsync)
1715 continue; 1714 continue;
1716 1715
@@ -1848,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1848 union kvm_mmu_page_role role; 1847 union kvm_mmu_page_role role;
1849 unsigned quadrant; 1848 unsigned quadrant;
1850 struct kvm_mmu_page *sp; 1849 struct kvm_mmu_page *sp;
1851 struct hlist_node *node;
1852 bool need_sync = false; 1850 bool need_sync = false;
1853 1851
1854 role = vcpu->arch.mmu.base_role; 1852 role = vcpu->arch.mmu.base_role;
@@ -1863,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1863 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 1861 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1864 role.quadrant = quadrant; 1862 role.quadrant = quadrant;
1865 } 1863 }
1866 for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { 1864 for_each_gfn_sp(vcpu->kvm, sp, gfn) {
1867 if (!need_sync && sp->unsync) 1865 if (!need_sync && sp->unsync)
1868 need_sync = true; 1866 need_sync = true;
1869 1867
@@ -2151,14 +2149,13 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2151int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 2149int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
2152{ 2150{
2153 struct kvm_mmu_page *sp; 2151 struct kvm_mmu_page *sp;
2154 struct hlist_node *node;
2155 LIST_HEAD(invalid_list); 2152 LIST_HEAD(invalid_list);
2156 int r; 2153 int r;
2157 2154
2158 pgprintk("%s: looking for gfn %llx\n", __func__, gfn); 2155 pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
2159 r = 0; 2156 r = 0;
2160 spin_lock(&kvm->mmu_lock); 2157 spin_lock(&kvm->mmu_lock);
2161 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { 2158 for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
2162 pgprintk("%s: gfn %llx role %x\n", __func__, gfn, 2159 pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
2163 sp->role.word); 2160 sp->role.word);
2164 r = 1; 2161 r = 1;
@@ -2288,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
2288static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) 2285static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
2289{ 2286{
2290 struct kvm_mmu_page *s; 2287 struct kvm_mmu_page *s;
2291 struct hlist_node *node;
2292 2288
2293 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 2289 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2294 if (s->unsync) 2290 if (s->unsync)
2295 continue; 2291 continue;
2296 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); 2292 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
@@ -2302,10 +2298,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
2302 bool can_unsync) 2298 bool can_unsync)
2303{ 2299{
2304 struct kvm_mmu_page *s; 2300 struct kvm_mmu_page *s;
2305 struct hlist_node *node;
2306 bool need_unsync = false; 2301 bool need_unsync = false;
2307 2302
2308 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { 2303 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
2309 if (!can_unsync) 2304 if (!can_unsync)
2310 return 1; 2305 return 1;
2311 2306
@@ -3933,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3933 gfn_t gfn = gpa >> PAGE_SHIFT; 3928 gfn_t gfn = gpa >> PAGE_SHIFT;
3934 union kvm_mmu_page_role mask = { .word = 0 }; 3929 union kvm_mmu_page_role mask = { .word = 0 };
3935 struct kvm_mmu_page *sp; 3930 struct kvm_mmu_page *sp;
3936 struct hlist_node *node;
3937 LIST_HEAD(invalid_list); 3931 LIST_HEAD(invalid_list);
3938 u64 entry, gentry, *spte; 3932 u64 entry, gentry, *spte;
3939 int npte; 3933 int npte;
@@ -3964,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3964 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 3958 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3965 3959
3966 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; 3960 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3967 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { 3961 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
3968 if (detect_write_misaligned(sp, gpa, bytes) || 3962 if (detect_write_misaligned(sp, gpa, bytes) ||
3969 detect_write_flooding(sp)) { 3963 detect_write_flooding(sp)) {
3970 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 3964 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,