diff options
author | Tej <bewith.tej@gmail.com> | 2008-12-16 14:56:06 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-16 15:05:01 -0500 |
commit | f63c2f248959366cd11bfa476f866737047cf663 (patch) | |
tree | 7bd3d723d904f13a37e2befefd817e05588a9b23 /arch/x86/xen/mmu.c | |
parent | 1bda71282ded6a2e09a2db7c8884542fb46bfd4f (diff) |
xen: whitespace/checkpatch cleanup
Impact: cleanup
Signed-off-by: Tej <bewith.tej@gmail.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 17 |
1 files changed, 10 insertions, 7 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 636ef4caa52d..773d68d3e912 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -154,13 +154,13 @@ void xen_setup_mfn_list_list(void) | |||
154 | { | 154 | { |
155 | unsigned pfn, idx; | 155 | unsigned pfn, idx; |
156 | 156 | ||
157 | for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) { | 157 | for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) { |
158 | unsigned topidx = p2m_top_index(pfn); | 158 | unsigned topidx = p2m_top_index(pfn); |
159 | 159 | ||
160 | p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]); | 160 | p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]); |
161 | } | 161 | } |
162 | 162 | ||
163 | for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) { | 163 | for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) { |
164 | unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; | 164 | unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; |
165 | p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); | 165 | p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); |
166 | } | 166 | } |
@@ -179,7 +179,7 @@ void __init xen_build_dynamic_phys_to_machine(void) | |||
179 | unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); | 179 | unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); |
180 | unsigned pfn; | 180 | unsigned pfn; |
181 | 181 | ||
182 | for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { | 182 | for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { |
183 | unsigned topidx = p2m_top_index(pfn); | 183 | unsigned topidx = p2m_top_index(pfn); |
184 | 184 | ||
185 | p2m_top[topidx] = &mfn_list[pfn]; | 185 | p2m_top[topidx] = &mfn_list[pfn]; |
@@ -207,7 +207,7 @@ static void alloc_p2m(unsigned long **pp, unsigned long *mfnp) | |||
207 | p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); | 207 | p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL); |
208 | BUG_ON(p == NULL); | 208 | BUG_ON(p == NULL); |
209 | 209 | ||
210 | for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++) | 210 | for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) |
211 | p[i] = INVALID_P2M_ENTRY; | 211 | p[i] = INVALID_P2M_ENTRY; |
212 | 212 | ||
213 | if (cmpxchg(pp, p2m_missing, p) != p2m_missing) | 213 | if (cmpxchg(pp, p2m_missing, p) != p2m_missing) |
@@ -407,7 +407,8 @@ out: | |||
407 | preempt_enable(); | 407 | preempt_enable(); |
408 | } | 408 | } |
409 | 409 | ||
410 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 410 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, |
411 | unsigned long addr, pte_t *ptep) | ||
411 | { | 412 | { |
412 | /* Just return the pte as-is. We preserve the bits on commit */ | 413 | /* Just return the pte as-is. We preserve the bits on commit */ |
413 | return *ptep; | 414 | return *ptep; |
@@ -878,7 +879,8 @@ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | |||
878 | 879 | ||
879 | if (user_pgd) { | 880 | if (user_pgd) { |
880 | xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); | 881 | xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); |
881 | xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd))); | 882 | xen_do_pin(MMUEXT_PIN_L4_TABLE, |
883 | PFN_DOWN(__pa(user_pgd))); | ||
882 | } | 884 | } |
883 | } | 885 | } |
884 | #else /* CONFIG_X86_32 */ | 886 | #else /* CONFIG_X86_32 */ |
@@ -993,7 +995,8 @@ static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) | |||
993 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | 995 | pgd_t *user_pgd = xen_get_user_pgd(pgd); |
994 | 996 | ||
995 | if (user_pgd) { | 997 | if (user_pgd) { |
996 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd))); | 998 | xen_do_pin(MMUEXT_UNPIN_TABLE, |
999 | PFN_DOWN(__pa(user_pgd))); | ||
997 | xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); | 1000 | xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); |
998 | } | 1001 | } |
999 | } | 1002 | } |