diff options
author | Stuart Bennett <stuart@freedesktop.org> | 2009-04-28 15:17:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 05:33:33 -0400 |
commit | 0492e1bb8fe7d122901c9f3af75e537d4129712e (patch) | |
tree | 85ef9b00ee27b78b3fae5a514bb1d3318e48bf2e | |
parent | 7d7d2b803159d4edeb051b0e5efbc1a8d9ef1c67 (diff) |
tracing: x86, mmiotrace: code consistency/legibility improvement
kmmio_probe being *p and kmmio_fault_page being sometimes *f and
sometimes *p is not helpful.
[ Impact: cleanup ]
Signed-off-by: Stuart Bennett <stuart@freedesktop.org>
Acked-by: Pekka Paalanen <pq@iki.fi>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <1240946271-7083-3-git-send-email-stuart@freedesktop.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | arch/x86/mm/kmmio.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 4f115e00486b..869181a917d9 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
@@ -97,13 +97,13 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr) | |||
97 | static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) | 97 | static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) |
98 | { | 98 | { |
99 | struct list_head *head; | 99 | struct list_head *head; |
100 | struct kmmio_fault_page *p; | 100 | struct kmmio_fault_page *f; |
101 | 101 | ||
102 | page &= PAGE_MASK; | 102 | page &= PAGE_MASK; |
103 | head = kmmio_page_list(page); | 103 | head = kmmio_page_list(page); |
104 | list_for_each_entry_rcu(p, head, list) { | 104 | list_for_each_entry_rcu(f, head, list) { |
105 | if (p->page == page) | 105 | if (f->page == page) |
106 | return p; | 106 | return f; |
107 | } | 107 | } |
108 | return NULL; | 108 | return NULL; |
109 | } | 109 | } |
@@ -439,12 +439,12 @@ static void rcu_free_kmmio_fault_pages(struct rcu_head *head) | |||
439 | head, | 439 | head, |
440 | struct kmmio_delayed_release, | 440 | struct kmmio_delayed_release, |
441 | rcu); | 441 | rcu); |
442 | struct kmmio_fault_page *p = dr->release_list; | 442 | struct kmmio_fault_page *f = dr->release_list; |
443 | while (p) { | 443 | while (f) { |
444 | struct kmmio_fault_page *next = p->release_next; | 444 | struct kmmio_fault_page *next = f->release_next; |
445 | BUG_ON(p->count); | 445 | BUG_ON(f->count); |
446 | kfree(p); | 446 | kfree(f); |
447 | p = next; | 447 | f = next; |
448 | } | 448 | } |
449 | kfree(dr); | 449 | kfree(dr); |
450 | } | 450 | } |
@@ -453,19 +453,19 @@ static void remove_kmmio_fault_pages(struct rcu_head *head) | |||
453 | { | 453 | { |
454 | struct kmmio_delayed_release *dr = | 454 | struct kmmio_delayed_release *dr = |
455 | container_of(head, struct kmmio_delayed_release, rcu); | 455 | container_of(head, struct kmmio_delayed_release, rcu); |
456 | struct kmmio_fault_page *p = dr->release_list; | 456 | struct kmmio_fault_page *f = dr->release_list; |
457 | struct kmmio_fault_page **prevp = &dr->release_list; | 457 | struct kmmio_fault_page **prevp = &dr->release_list; |
458 | unsigned long flags; | 458 | unsigned long flags; |
459 | 459 | ||
460 | spin_lock_irqsave(&kmmio_lock, flags); | 460 | spin_lock_irqsave(&kmmio_lock, flags); |
461 | while (p) { | 461 | while (f) { |
462 | if (!p->count) { | 462 | if (!f->count) { |
463 | list_del_rcu(&p->list); | 463 | list_del_rcu(&f->list); |
464 | prevp = &p->release_next; | 464 | prevp = &f->release_next; |
465 | } else { | 465 | } else { |
466 | *prevp = p->release_next; | 466 | *prevp = f->release_next; |
467 | } | 467 | } |
468 | p = p->release_next; | 468 | f = f->release_next; |
469 | } | 469 | } |
470 | spin_unlock_irqrestore(&kmmio_lock, flags); | 470 | spin_unlock_irqrestore(&kmmio_lock, flags); |
471 | 471 | ||