diff options
author | Sasha Levin <sasha.levin@oracle.com> | 2013-02-27 20:06:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-27 22:10:24 -0500 |
commit | b67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch) | |
tree | 3d465aea12b97683f26ffa38eba8744469de9997 /arch/x86/kvm/mmu.c | |
parent | 1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff) |
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 26 |
1 files changed, 10 insertions, 16 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 4ed3edbe06bd..956ca358108a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1644,13 +1644,13 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, | |||
1644 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, | 1644 | static void kvm_mmu_commit_zap_page(struct kvm *kvm, |
1645 | struct list_head *invalid_list); | 1645 | struct list_head *invalid_list); |
1646 | 1646 | ||
1647 | #define for_each_gfn_sp(kvm, sp, gfn, pos) \ | 1647 | #define for_each_gfn_sp(kvm, sp, gfn) \ |
1648 | hlist_for_each_entry(sp, pos, \ | 1648 | hlist_for_each_entry(sp, \ |
1649 | &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ | 1649 | &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ |
1650 | if ((sp)->gfn != (gfn)) {} else | 1650 | if ((sp)->gfn != (gfn)) {} else |
1651 | 1651 | ||
1652 | #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \ | 1652 | #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn) \ |
1653 | hlist_for_each_entry(sp, pos, \ | 1653 | hlist_for_each_entry(sp, \ |
1654 | &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ | 1654 | &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \ |
1655 | if ((sp)->gfn != (gfn) || (sp)->role.direct || \ | 1655 | if ((sp)->gfn != (gfn) || (sp)->role.direct || \ |
1656 | (sp)->role.invalid) {} else | 1656 | (sp)->role.invalid) {} else |
@@ -1706,11 +1706,10 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
1706 | static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) | 1706 | static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) |
1707 | { | 1707 | { |
1708 | struct kvm_mmu_page *s; | 1708 | struct kvm_mmu_page *s; |
1709 | struct hlist_node *node; | ||
1710 | LIST_HEAD(invalid_list); | 1709 | LIST_HEAD(invalid_list); |
1711 | bool flush = false; | 1710 | bool flush = false; |
1712 | 1711 | ||
1713 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { | 1712 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { |
1714 | if (!s->unsync) | 1713 | if (!s->unsync) |
1715 | continue; | 1714 | continue; |
1716 | 1715 | ||
@@ -1848,7 +1847,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1848 | union kvm_mmu_page_role role; | 1847 | union kvm_mmu_page_role role; |
1849 | unsigned quadrant; | 1848 | unsigned quadrant; |
1850 | struct kvm_mmu_page *sp; | 1849 | struct kvm_mmu_page *sp; |
1851 | struct hlist_node *node; | ||
1852 | bool need_sync = false; | 1850 | bool need_sync = false; |
1853 | 1851 | ||
1854 | role = vcpu->arch.mmu.base_role; | 1852 | role = vcpu->arch.mmu.base_role; |
@@ -1863,7 +1861,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1863 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; | 1861 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
1864 | role.quadrant = quadrant; | 1862 | role.quadrant = quadrant; |
1865 | } | 1863 | } |
1866 | for_each_gfn_sp(vcpu->kvm, sp, gfn, node) { | 1864 | for_each_gfn_sp(vcpu->kvm, sp, gfn) { |
1867 | if (!need_sync && sp->unsync) | 1865 | if (!need_sync && sp->unsync) |
1868 | need_sync = true; | 1866 | need_sync = true; |
1869 | 1867 | ||
@@ -2151,14 +2149,13 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) | |||
2151 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | 2149 | int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) |
2152 | { | 2150 | { |
2153 | struct kvm_mmu_page *sp; | 2151 | struct kvm_mmu_page *sp; |
2154 | struct hlist_node *node; | ||
2155 | LIST_HEAD(invalid_list); | 2152 | LIST_HEAD(invalid_list); |
2156 | int r; | 2153 | int r; |
2157 | 2154 | ||
2158 | pgprintk("%s: looking for gfn %llx\n", __func__, gfn); | 2155 | pgprintk("%s: looking for gfn %llx\n", __func__, gfn); |
2159 | r = 0; | 2156 | r = 0; |
2160 | spin_lock(&kvm->mmu_lock); | 2157 | spin_lock(&kvm->mmu_lock); |
2161 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) { | 2158 | for_each_gfn_indirect_valid_sp(kvm, sp, gfn) { |
2162 | pgprintk("%s: gfn %llx role %x\n", __func__, gfn, | 2159 | pgprintk("%s: gfn %llx role %x\n", __func__, gfn, |
2163 | sp->role.word); | 2160 | sp->role.word); |
2164 | r = 1; | 2161 | r = 1; |
@@ -2288,9 +2285,8 @@ static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
2288 | static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) | 2285 | static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn) |
2289 | { | 2286 | { |
2290 | struct kvm_mmu_page *s; | 2287 | struct kvm_mmu_page *s; |
2291 | struct hlist_node *node; | ||
2292 | 2288 | ||
2293 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { | 2289 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { |
2294 | if (s->unsync) | 2290 | if (s->unsync) |
2295 | continue; | 2291 | continue; |
2296 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); | 2292 | WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL); |
@@ -2302,10 +2298,9 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
2302 | bool can_unsync) | 2298 | bool can_unsync) |
2303 | { | 2299 | { |
2304 | struct kvm_mmu_page *s; | 2300 | struct kvm_mmu_page *s; |
2305 | struct hlist_node *node; | ||
2306 | bool need_unsync = false; | 2301 | bool need_unsync = false; |
2307 | 2302 | ||
2308 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) { | 2303 | for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) { |
2309 | if (!can_unsync) | 2304 | if (!can_unsync) |
2310 | return 1; | 2305 | return 1; |
2311 | 2306 | ||
@@ -3933,7 +3928,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
3933 | gfn_t gfn = gpa >> PAGE_SHIFT; | 3928 | gfn_t gfn = gpa >> PAGE_SHIFT; |
3934 | union kvm_mmu_page_role mask = { .word = 0 }; | 3929 | union kvm_mmu_page_role mask = { .word = 0 }; |
3935 | struct kvm_mmu_page *sp; | 3930 | struct kvm_mmu_page *sp; |
3936 | struct hlist_node *node; | ||
3937 | LIST_HEAD(invalid_list); | 3931 | LIST_HEAD(invalid_list); |
3938 | u64 entry, gentry, *spte; | 3932 | u64 entry, gentry, *spte; |
3939 | int npte; | 3933 | int npte; |
@@ -3964,7 +3958,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
3964 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); | 3958 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); |
3965 | 3959 | ||
3966 | mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; | 3960 | mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; |
3967 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { | 3961 | for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) { |
3968 | if (detect_write_misaligned(sp, gpa, bytes) || | 3962 | if (detect_write_misaligned(sp, gpa, bytes) || |
3969 | detect_write_flooding(sp)) { | 3963 | detect_write_flooding(sp)) { |
3970 | zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, | 3964 | zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp, |