aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorSasha Levin <sasha.levin@oracle.com>2013-02-27 20:06:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-27 22:10:24 -0500
commitb67bfe0d42cac56c512dd5da4b1b347a23f4b70a (patch)
tree3d465aea12b97683f26ffa38eba8744469de9997 /virt/kvm
parent1e142b29e210b5dfb2deeb6ce2210b60af16d2a6 (diff)
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived list_for_each_entry(pos, head, member) The hlist ones were greedy and wanted an extra parameter: hlist_for_each_entry(tpos, pos, head, member) Why did they need an extra pos parameter? I'm not quite sure. Not only they don't really need it, it also prevents the iterator from looking exactly like the list iterator, which is unfortunate. Besides the semantic patch, there was some manual work required: - Fix up the actual hlist iterators in linux/list.h - Fix up the declaration of other iterators based on the hlist ones. - A very small amount of places were using the 'node' parameter, this was modified to use 'obj->member' instead. - Coccinelle didn't handle the hlist_for_each_entry_safe iterator properly, so those had to be fixed up manually. The semantic patch which is mostly the work of Peter Senna Tschudin is here: @@ iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host; type T; expression a,c,d,e; identifier b; statement S; @@ -T b; <+... when != b ( hlist_for_each_entry(a, - b, c, d) S | hlist_for_each_entry_continue(a, - b, c) S | hlist_for_each_entry_from(a, - b, c) S | hlist_for_each_entry_rcu(a, - b, c, d) S | hlist_for_each_entry_rcu_bh(a, - b, c, d) S | hlist_for_each_entry_continue_rcu_bh(a, - b, c) S | for_each_busy_worker(a, c, - b, d) S | ax25_uid_for_each(a, - b, c) S | ax25_for_each(a, - b, c) S | inet_bind_bucket_for_each(a, - b, c) S | sctp_for_each_hentry(a, - b, c) S | sk_for_each(a, - b, c) S | sk_for_each_rcu(a, - b, c) S | sk_for_each_from -(a, b) +(a) S + sk_for_each_from(a) S | sk_for_each_safe(a, - b, c, d) S | sk_for_each_bound(a, - b, c) S | hlist_for_each_entry_safe(a, - b, c, d, e) S | hlist_for_each_entry_continue_rcu(a, - b, c) S | nr_neigh_for_each(a, - b, c) S | nr_neigh_for_each_safe(a, - b, c, d) S | nr_node_for_each(a, - b, c) S | nr_node_for_each_safe(a, - b, c, d) S | - for_each_gfn_sp(a, c, d, b) S + for_each_gfn_sp(a, c, d) S | - for_each_gfn_indirect_valid_sp(a, c, d, b) S + for_each_gfn_indirect_valid_sp(a, c, d) S | for_each_host(a, - b, c) S | for_each_host_safe(a, - b, c, d) S | for_each_mesh_entry(a, - b, c, d) S ) ...+> [akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c] [akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c] [akpm@linux-foundation.org: checkpatch fixes] [akpm@linux-foundation.org: fix warnings] [akpm@linux-foudnation.org: redo intrusive kvm changes] Tested-by: Peter Senna Tschudin <peter.senna@gmail.com> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Signed-off-by: Sasha Levin <sasha.levin@oracle.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Gleb Natapov <gleb@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/eventfd.c3
-rw-r--r--virt/kvm/irq_comm.c18
2 files changed, 7 insertions, 14 deletions
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b6eea5cc7b34..adb17f266b28 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -268,14 +268,13 @@ static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
268 struct kvm_irq_routing_table *irq_rt) 268 struct kvm_irq_routing_table *irq_rt)
269{ 269{
270 struct kvm_kernel_irq_routing_entry *e; 270 struct kvm_kernel_irq_routing_entry *e;
271 struct hlist_node *n;
272 271
273 if (irqfd->gsi >= irq_rt->nr_rt_entries) { 272 if (irqfd->gsi >= irq_rt->nr_rt_entries) {
274 rcu_assign_pointer(irqfd->irq_entry, NULL); 273 rcu_assign_pointer(irqfd->irq_entry, NULL);
275 return; 274 return;
276 } 275 }
277 276
278 hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) { 277 hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
279 /* Only fast-path MSI. */ 278 /* Only fast-path MSI. */
280 if (e->type == KVM_IRQ_ROUTING_MSI) 279 if (e->type == KVM_IRQ_ROUTING_MSI)
281 rcu_assign_pointer(irqfd->irq_entry, e); 280 rcu_assign_pointer(irqfd->irq_entry, e);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index ff6d40e2c06d..e9073cf4d040 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -173,7 +173,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
173 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS]; 173 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
174 int ret = -1, i = 0; 174 int ret = -1, i = 0;
175 struct kvm_irq_routing_table *irq_rt; 175 struct kvm_irq_routing_table *irq_rt;
176 struct hlist_node *n;
177 176
178 trace_kvm_set_irq(irq, level, irq_source_id); 177 trace_kvm_set_irq(irq, level, irq_source_id);
179 178
@@ -184,7 +183,7 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level)
184 rcu_read_lock(); 183 rcu_read_lock();
185 irq_rt = rcu_dereference(kvm->irq_routing); 184 irq_rt = rcu_dereference(kvm->irq_routing);
186 if (irq < irq_rt->nr_rt_entries) 185 if (irq < irq_rt->nr_rt_entries)
187 hlist_for_each_entry(e, n, &irq_rt->map[irq], link) 186 hlist_for_each_entry(e, &irq_rt->map[irq], link)
188 irq_set[i++] = *e; 187 irq_set[i++] = *e;
189 rcu_read_unlock(); 188 rcu_read_unlock();
190 189
@@ -212,7 +211,6 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
212 struct kvm_kernel_irq_routing_entry *e; 211 struct kvm_kernel_irq_routing_entry *e;
213 int ret = -EINVAL; 212 int ret = -EINVAL;
214 struct kvm_irq_routing_table *irq_rt; 213 struct kvm_irq_routing_table *irq_rt;
215 struct hlist_node *n;
216 214
217 trace_kvm_set_irq(irq, level, irq_source_id); 215 trace_kvm_set_irq(irq, level, irq_source_id);
218 216
@@ -227,7 +225,7 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
227 rcu_read_lock(); 225 rcu_read_lock();
228 irq_rt = rcu_dereference(kvm->irq_routing); 226 irq_rt = rcu_dereference(kvm->irq_routing);
229 if (irq < irq_rt->nr_rt_entries) 227 if (irq < irq_rt->nr_rt_entries)
230 hlist_for_each_entry(e, n, &irq_rt->map[irq], link) { 228 hlist_for_each_entry(e, &irq_rt->map[irq], link) {
231 if (likely(e->type == KVM_IRQ_ROUTING_MSI)) 229 if (likely(e->type == KVM_IRQ_ROUTING_MSI))
232 ret = kvm_set_msi_inatomic(e, kvm); 230 ret = kvm_set_msi_inatomic(e, kvm);
233 else 231 else
@@ -241,13 +239,12 @@ int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
241bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin) 239bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
242{ 240{
243 struct kvm_irq_ack_notifier *kian; 241 struct kvm_irq_ack_notifier *kian;
244 struct hlist_node *n;
245 int gsi; 242 int gsi;
246 243
247 rcu_read_lock(); 244 rcu_read_lock();
248 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 245 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
249 if (gsi != -1) 246 if (gsi != -1)
250 hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, 247 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
251 link) 248 link)
252 if (kian->gsi == gsi) { 249 if (kian->gsi == gsi) {
253 rcu_read_unlock(); 250 rcu_read_unlock();
@@ -263,7 +260,6 @@ EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
263void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin) 260void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
264{ 261{
265 struct kvm_irq_ack_notifier *kian; 262 struct kvm_irq_ack_notifier *kian;
266 struct hlist_node *n;
267 int gsi; 263 int gsi;
268 264
269 trace_kvm_ack_irq(irqchip, pin); 265 trace_kvm_ack_irq(irqchip, pin);
@@ -271,7 +267,7 @@ void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
271 rcu_read_lock(); 267 rcu_read_lock();
272 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 268 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
273 if (gsi != -1) 269 if (gsi != -1)
274 hlist_for_each_entry_rcu(kian, n, &kvm->irq_ack_notifier_list, 270 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
275 link) 271 link)
276 if (kian->gsi == gsi) 272 if (kian->gsi == gsi)
277 kian->irq_acked(kian); 273 kian->irq_acked(kian);
@@ -369,13 +365,12 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
369 bool mask) 365 bool mask)
370{ 366{
371 struct kvm_irq_mask_notifier *kimn; 367 struct kvm_irq_mask_notifier *kimn;
372 struct hlist_node *n;
373 int gsi; 368 int gsi;
374 369
375 rcu_read_lock(); 370 rcu_read_lock();
376 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin]; 371 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
377 if (gsi != -1) 372 if (gsi != -1)
378 hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) 373 hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
379 if (kimn->irq == gsi) 374 if (kimn->irq == gsi)
380 kimn->func(kimn, mask); 375 kimn->func(kimn, mask);
381 rcu_read_unlock(); 376 rcu_read_unlock();
@@ -396,13 +391,12 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
396 int delta; 391 int delta;
397 unsigned max_pin; 392 unsigned max_pin;
398 struct kvm_kernel_irq_routing_entry *ei; 393 struct kvm_kernel_irq_routing_entry *ei;
399 struct hlist_node *n;
400 394
401 /* 395 /*
402 * Do not allow GSI to be mapped to the same irqchip more than once. 396 * Do not allow GSI to be mapped to the same irqchip more than once.
403 * Allow only one to one mapping between GSI and MSI. 397 * Allow only one to one mapping between GSI and MSI.
404 */ 398 */
405 hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) 399 hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
406 if (ei->type == KVM_IRQ_ROUTING_MSI || 400 if (ei->type == KVM_IRQ_ROUTING_MSI ||
407 ue->type == KVM_IRQ_ROUTING_MSI || 401 ue->type == KVM_IRQ_ROUTING_MSI ||
408 ue->u.irqchip.irqchip == ei->irqchip.irqchip) 402 ue->u.irqchip.irqchip == ei->irqchip.irqchip)