diff options
| -rw-r--r-- | arch/x86/xen/mmu.c | 284 | ||||
| -rw-r--r-- | arch/x86/xen/mmu.h | 37 | ||||
| -rw-r--r-- | mm/vmalloc.c | 4 |
3 files changed, 50 insertions, 275 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 02d752460371..dc708dcc62f1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
| @@ -75,67 +75,12 @@ | |||
| 75 | #include "mmu.h" | 75 | #include "mmu.h" |
| 76 | #include "debugfs.h" | 76 | #include "debugfs.h" |
| 77 | 77 | ||
| 78 | #define MMU_UPDATE_HISTO 30 | ||
| 79 | |||
| 80 | /* | 78 | /* |
| 81 | * Protects atomic reservation decrease/increase against concurrent increases. | 79 | * Protects atomic reservation decrease/increase against concurrent increases. |
| 82 | * Also protects non-atomic updates of current_pages and balloon lists. | 80 | * Also protects non-atomic updates of current_pages and balloon lists. |
| 83 | */ | 81 | */ |
| 84 | DEFINE_SPINLOCK(xen_reservation_lock); | 82 | DEFINE_SPINLOCK(xen_reservation_lock); |
| 85 | 83 | ||
| 86 | #ifdef CONFIG_XEN_DEBUG_FS | ||
| 87 | |||
| 88 | static struct { | ||
| 89 | u32 pgd_update; | ||
| 90 | u32 pgd_update_pinned; | ||
| 91 | u32 pgd_update_batched; | ||
| 92 | |||
| 93 | u32 pud_update; | ||
| 94 | u32 pud_update_pinned; | ||
| 95 | u32 pud_update_batched; | ||
| 96 | |||
| 97 | u32 pmd_update; | ||
| 98 | u32 pmd_update_pinned; | ||
| 99 | u32 pmd_update_batched; | ||
| 100 | |||
| 101 | u32 pte_update; | ||
| 102 | u32 pte_update_pinned; | ||
| 103 | u32 pte_update_batched; | ||
| 104 | |||
| 105 | u32 mmu_update; | ||
| 106 | u32 mmu_update_extended; | ||
| 107 | u32 mmu_update_histo[MMU_UPDATE_HISTO]; | ||
| 108 | |||
| 109 | u32 prot_commit; | ||
| 110 | u32 prot_commit_batched; | ||
| 111 | |||
| 112 | u32 set_pte_at; | ||
| 113 | u32 set_pte_at_batched; | ||
| 114 | u32 set_pte_at_pinned; | ||
| 115 | u32 set_pte_at_current; | ||
| 116 | u32 set_pte_at_kernel; | ||
| 117 | } mmu_stats; | ||
| 118 | |||
| 119 | static u8 zero_stats; | ||
| 120 | |||
| 121 | static inline void check_zero(void) | ||
| 122 | { | ||
| 123 | if (unlikely(zero_stats)) { | ||
| 124 | memset(&mmu_stats, 0, sizeof(mmu_stats)); | ||
| 125 | zero_stats = 0; | ||
| 126 | } | ||
| 127 | } | ||
| 128 | |||
| 129 | #define ADD_STATS(elem, val) \ | ||
| 130 | do { check_zero(); mmu_stats.elem += (val); } while(0) | ||
| 131 | |||
| 132 | #else /* !CONFIG_XEN_DEBUG_FS */ | ||
| 133 | |||
| 134 | #define ADD_STATS(elem, val) do { (void)(val); } while(0) | ||
| 135 | |||
| 136 | #endif /* CONFIG_XEN_DEBUG_FS */ | ||
| 137 | |||
| 138 | |||
| 139 | /* | 84 | /* |
| 140 | * Identity map, in addition to plain kernel map. This needs to be | 85 | * Identity map, in addition to plain kernel map. This needs to be |
| 141 | * large enough to allocate page table pages to allocate the rest. | 86 | * large enough to allocate page table pages to allocate the rest. |
| @@ -243,11 +188,6 @@ static bool xen_page_pinned(void *ptr) | |||
| 243 | return PagePinned(page); | 188 | return PagePinned(page); |
| 244 | } | 189 | } |
| 245 | 190 | ||
| 246 | static bool xen_iomap_pte(pte_t pte) | ||
| 247 | { | ||
| 248 | return pte_flags(pte) & _PAGE_IOMAP; | ||
| 249 | } | ||
| 250 | |||
| 251 | void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) | 191 | void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) |
| 252 | { | 192 | { |
| 253 | struct multicall_space mcs; | 193 | struct multicall_space mcs; |
| @@ -257,7 +197,7 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) | |||
| 257 | u = mcs.args; | 197 | u = mcs.args; |
| 258 | 198 | ||
| 259 | /* ptep might be kmapped when using 32-bit HIGHPTE */ | 199 | /* ptep might be kmapped when using 32-bit HIGHPTE */ |
| 260 | u->ptr = arbitrary_virt_to_machine(ptep).maddr; | 200 | u->ptr = virt_to_machine(ptep).maddr; |
| 261 | u->val = pte_val_ma(pteval); | 201 | u->val = pte_val_ma(pteval); |
| 262 | 202 | ||
| 263 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); | 203 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); |
| @@ -266,11 +206,6 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) | |||
| 266 | } | 206 | } |
| 267 | EXPORT_SYMBOL_GPL(xen_set_domain_pte); | 207 | EXPORT_SYMBOL_GPL(xen_set_domain_pte); |
| 268 | 208 | ||
| 269 | static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) | ||
| 270 | { | ||
| 271 | xen_set_domain_pte(ptep, pteval, DOMID_IO); | ||
| 272 | } | ||
| 273 | |||
| 274 | static void xen_extend_mmu_update(const struct mmu_update *update) | 209 | static void xen_extend_mmu_update(const struct mmu_update *update) |
| 275 | { | 210 | { |
| 276 | struct multicall_space mcs; | 211 | struct multicall_space mcs; |
| @@ -279,27 +214,17 @@ static void xen_extend_mmu_update(const struct mmu_update *update) | |||
| 279 | mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); | 214 | mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); |
| 280 | 215 | ||
| 281 | if (mcs.mc != NULL) { | 216 | if (mcs.mc != NULL) { |
| 282 | ADD_STATS(mmu_update_extended, 1); | ||
| 283 | ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1); | ||
| 284 | |||
| 285 | mcs.mc->args[1]++; | 217 | mcs.mc->args[1]++; |
| 286 | |||
| 287 | if (mcs.mc->args[1] < MMU_UPDATE_HISTO) | ||
| 288 | ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1); | ||
| 289 | else | ||
| 290 | ADD_STATS(mmu_update_histo[0], 1); | ||
| 291 | } else { | 218 | } else { |
| 292 | ADD_STATS(mmu_update, 1); | ||
| 293 | mcs = __xen_mc_entry(sizeof(*u)); | 219 | mcs = __xen_mc_entry(sizeof(*u)); |
| 294 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | 220 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); |
| 295 | ADD_STATS(mmu_update_histo[1], 1); | ||
| 296 | } | 221 | } |
| 297 | 222 | ||
| 298 | u = mcs.args; | 223 | u = mcs.args; |
| 299 | *u = *update; | 224 | *u = *update; |
| 300 | } | 225 | } |
| 301 | 226 | ||
| 302 | void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | 227 | static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) |
| 303 | { | 228 | { |
| 304 | struct mmu_update u; | 229 | struct mmu_update u; |
| 305 | 230 | ||
| @@ -312,17 +237,13 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | |||
| 312 | u.val = pmd_val_ma(val); | 237 | u.val = pmd_val_ma(val); |
| 313 | xen_extend_mmu_update(&u); | 238 | xen_extend_mmu_update(&u); |
| 314 | 239 | ||
| 315 | ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
| 316 | |||
| 317 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 240 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 318 | 241 | ||
| 319 | preempt_enable(); | 242 | preempt_enable(); |
| 320 | } | 243 | } |
| 321 | 244 | ||
| 322 | void xen_set_pmd(pmd_t *ptr, pmd_t val) | 245 | static void xen_set_pmd(pmd_t *ptr, pmd_t val) |
| 323 | { | 246 | { |
| 324 | ADD_STATS(pmd_update, 1); | ||
| 325 | |||
| 326 | /* If page is not pinned, we can just update the entry | 247 | /* If page is not pinned, we can just update the entry |
| 327 | directly */ | 248 | directly */ |
| 328 | if (!xen_page_pinned(ptr)) { | 249 | if (!xen_page_pinned(ptr)) { |
| @@ -330,8 +251,6 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val) | |||
| 330 | return; | 251 | return; |
| 331 | } | 252 | } |
| 332 | 253 | ||
| 333 | ADD_STATS(pmd_update_pinned, 1); | ||
| 334 | |||
| 335 | xen_set_pmd_hyper(ptr, val); | 254 | xen_set_pmd_hyper(ptr, val); |
| 336 | } | 255 | } |
| 337 | 256 | ||
| @@ -344,35 +263,34 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) | |||
| 344 | set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); | 263 | set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); |
| 345 | } | 264 | } |
| 346 | 265 | ||
| 347 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | 266 | static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) |
| 348 | pte_t *ptep, pte_t pteval) | ||
| 349 | { | 267 | { |
| 350 | if (xen_iomap_pte(pteval)) { | 268 | struct mmu_update u; |
| 351 | xen_set_iomap_pte(ptep, pteval); | ||
| 352 | goto out; | ||
| 353 | } | ||
| 354 | 269 | ||
| 355 | ADD_STATS(set_pte_at, 1); | 270 | if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) |
| 356 | // ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); | 271 | return false; |
| 357 | ADD_STATS(set_pte_at_current, mm == current->mm); | ||
| 358 | ADD_STATS(set_pte_at_kernel, mm == &init_mm); | ||
| 359 | 272 | ||
| 360 | if (mm == current->mm || mm == &init_mm) { | 273 | xen_mc_batch(); |
| 361 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | ||
| 362 | struct multicall_space mcs; | ||
| 363 | mcs = xen_mc_entry(0); | ||
| 364 | 274 | ||
| 365 | MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); | 275 | u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; |
| 366 | ADD_STATS(set_pte_at_batched, 1); | 276 | u.val = pte_val_ma(pteval); |
| 367 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 277 | xen_extend_mmu_update(&u); |
| 368 | goto out; | 278 | |
| 369 | } else | 279 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 370 | if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0) | ||
| 371 | goto out; | ||
| 372 | } | ||
| 373 | xen_set_pte(ptep, pteval); | ||
| 374 | 280 | ||
| 375 | out: return; | 281 | return true; |
| 282 | } | ||
| 283 | |||
| 284 | static void xen_set_pte(pte_t *ptep, pte_t pteval) | ||
| 285 | { | ||
| 286 | if (!xen_batched_set_pte(ptep, pteval)) | ||
| 287 | native_set_pte(ptep, pteval); | ||
| 288 | } | ||
| 289 | |||
| 290 | static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
| 291 | pte_t *ptep, pte_t pteval) | ||
| 292 | { | ||
| 293 | xen_set_pte(ptep, pteval); | ||
| 376 | } | 294 | } |
| 377 | 295 | ||
| 378 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, | 296 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, |
| @@ -389,13 +307,10 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
| 389 | 307 | ||
| 390 | xen_mc_batch(); | 308 | xen_mc_batch(); |
| 391 | 309 | ||
| 392 | u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 310 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
| 393 | u.val = pte_val_ma(pte); | 311 | u.val = pte_val_ma(pte); |
| 394 | xen_extend_mmu_update(&u); | 312 | xen_extend_mmu_update(&u); |
| 395 | 313 | ||
| 396 | ADD_STATS(prot_commit, 1); | ||
| 397 | ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
| 398 | |||
| 399 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 314 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 400 | } | 315 | } |
| 401 | 316 | ||
| @@ -463,7 +378,7 @@ static pteval_t iomap_pte(pteval_t val) | |||
| 463 | return val; | 378 | return val; |
| 464 | } | 379 | } |
| 465 | 380 | ||
| 466 | pteval_t xen_pte_val(pte_t pte) | 381 | static pteval_t xen_pte_val(pte_t pte) |
| 467 | { | 382 | { |
| 468 | pteval_t pteval = pte.pte; | 383 | pteval_t pteval = pte.pte; |
| 469 | 384 | ||
| @@ -480,7 +395,7 @@ pteval_t xen_pte_val(pte_t pte) | |||
| 480 | } | 395 | } |
| 481 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); | 396 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); |
| 482 | 397 | ||
| 483 | pgdval_t xen_pgd_val(pgd_t pgd) | 398 | static pgdval_t xen_pgd_val(pgd_t pgd) |
| 484 | { | 399 | { |
| 485 | return pte_mfn_to_pfn(pgd.pgd); | 400 | return pte_mfn_to_pfn(pgd.pgd); |
| 486 | } | 401 | } |
| @@ -511,7 +426,7 @@ void xen_set_pat(u64 pat) | |||
| 511 | WARN_ON(pat != 0x0007010600070106ull); | 426 | WARN_ON(pat != 0x0007010600070106ull); |
| 512 | } | 427 | } |
| 513 | 428 | ||
| 514 | pte_t xen_make_pte(pteval_t pte) | 429 | static pte_t xen_make_pte(pteval_t pte) |
| 515 | { | 430 | { |
| 516 | phys_addr_t addr = (pte & PTE_PFN_MASK); | 431 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
| 517 | 432 | ||
| @@ -581,20 +496,20 @@ pte_t xen_make_pte_debug(pteval_t pte) | |||
| 581 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); | 496 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); |
| 582 | #endif | 497 | #endif |
| 583 | 498 | ||
| 584 | pgd_t xen_make_pgd(pgdval_t pgd) | 499 | static pgd_t xen_make_pgd(pgdval_t pgd) |
| 585 | { | 500 | { |
| 586 | pgd = pte_pfn_to_mfn(pgd); | 501 | pgd = pte_pfn_to_mfn(pgd); |
| 587 | return native_make_pgd(pgd); | 502 | return native_make_pgd(pgd); |
| 588 | } | 503 | } |
| 589 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); | 504 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); |
| 590 | 505 | ||
| 591 | pmdval_t xen_pmd_val(pmd_t pmd) | 506 | static pmdval_t xen_pmd_val(pmd_t pmd) |
| 592 | { | 507 | { |
| 593 | return pte_mfn_to_pfn(pmd.pmd); | 508 | return pte_mfn_to_pfn(pmd.pmd); |
| 594 | } | 509 | } |
| 595 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); | 510 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); |
| 596 | 511 | ||
| 597 | void xen_set_pud_hyper(pud_t *ptr, pud_t val) | 512 | static void xen_set_pud_hyper(pud_t *ptr, pud_t val) |
| 598 | { | 513 | { |
| 599 | struct mmu_update u; | 514 | struct mmu_update u; |
| 600 | 515 | ||
| @@ -607,17 +522,13 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val) | |||
| 607 | u.val = pud_val_ma(val); | 522 | u.val = pud_val_ma(val); |
| 608 | xen_extend_mmu_update(&u); | 523 | xen_extend_mmu_update(&u); |
| 609 | 524 | ||
| 610 | ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
| 611 | |||
| 612 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 525 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
| 613 | 526 | ||
| 614 | preempt_enable(); | 527 | preempt_enable(); |
| 615 | } | 528 | } |
| 616 | 529 | ||
| 617 | void xen_set_pud(pud_t *ptr, pud_t val) | 530 | static void xen_set_pud(pud_t *ptr, pud_t val) |
| 618 | { | 531 | { |
| 619 | ADD_STATS(pud_update, 1); | ||
| 620 | |||
| 621 | /* If page is not pinned, we can just update the entry | 532 | /* If page is not pinned, we can just update the entry |
| 622 | directly */ | 533 | directly */ |
| 623 | if (!xen_page_pinned(ptr)) { | 534 | if (!xen_page_pinned(ptr)) { |
| @@ -625,56 +536,28 @@ void xen_set_pud(pud_t *ptr, pud_t val) | |||
| 625 | return; | 536 | return; |
| 626 | } | 537 | } |
| 627 | 538 | ||
| 628 | ADD_STATS(pud_update_pinned, 1); | ||
| 629 | |||
| 630 | xen_set_pud_hyper(ptr, val); | 539 | xen_set_pud_hyper(ptr, val); |
| 631 | } | 540 | } |
| 632 | 541 | ||
| 633 | void xen_set_pte(pte_t *ptep, pte_t pte) | ||
| 634 | { | ||
| 635 | if (xen_iomap_pte(pte)) { | ||
| 636 | xen_set_iomap_pte(ptep, pte); | ||
| 637 | return; | ||
| 638 | } | ||
| 639 | |||
| 640 | ADD_STATS(pte_update, 1); | ||
| 641 | // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); | ||
| 642 | ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
| 643 | |||
| 644 | #ifdef CONFIG_X86_PAE | 542 | #ifdef CONFIG_X86_PAE |
| 645 | ptep->pte_high = pte.pte_high; | 543 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
| 646 | smp_wmb(); | ||
| 647 | ptep->pte_low = pte.pte_low; | ||
| 648 | #else | ||
| 649 | *ptep = pte; | ||
| 650 | #endif | ||
| 651 | } | ||
| 652 | |||
| 653 | #ifdef CONFIG_X86_PAE | ||
| 654 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | ||
| 655 | { | 544 | { |
| 656 | if (xen_iomap_pte(pte)) { | ||
| 657 | xen_set_iomap_pte(ptep, pte); | ||
| 658 | return; | ||
| 659 | } | ||
| 660 | |||
| 661 | set_64bit((u64 *)ptep, native_pte_val(pte)); | 545 | set_64bit((u64 *)ptep, native_pte_val(pte)); |
| 662 | } | 546 | } |
| 663 | 547 | ||
| 664 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 548 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
| 665 | { | 549 | { |
| 666 | ptep->pte_low = 0; | 550 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) |
| 667 | smp_wmb(); /* make sure low gets written first */ | 551 | native_pte_clear(mm, addr, ptep); |
| 668 | ptep->pte_high = 0; | ||
| 669 | } | 552 | } |
| 670 | 553 | ||
| 671 | void xen_pmd_clear(pmd_t *pmdp) | 554 | static void xen_pmd_clear(pmd_t *pmdp) |
| 672 | { | 555 | { |
| 673 | set_pmd(pmdp, __pmd(0)); | 556 | set_pmd(pmdp, __pmd(0)); |
| 674 | } | 557 | } |
| 675 | #endif /* CONFIG_X86_PAE */ | 558 | #endif /* CONFIG_X86_PAE */ |
| 676 | 559 | ||
| 677 | pmd_t xen_make_pmd(pmdval_t pmd) | 560 | static pmd_t xen_make_pmd(pmdval_t pmd) |
| 678 | { | 561 | { |
| 679 | pmd = pte_pfn_to_mfn(pmd); | 562 | pmd = pte_pfn_to_mfn(pmd); |
| 680 | return native_make_pmd(pmd); | 563 | return native_make_pmd(pmd); |
| @@ -682,13 +565,13 @@ pmd_t xen_make_pmd(pmdval_t pmd) | |||
| 682 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); | 565 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); |
| 683 | 566 | ||
| 684 | #if PAGETABLE_LEVELS == 4 | 567 | #if PAGETABLE_LEVELS == 4 |
| 685 | pudval_t xen_pud_val(pud_t pud) | 568 | static pudval_t xen_pud_val(pud_t pud) |
| 686 | { | 569 | { |
| 687 | return pte_mfn_to_pfn(pud.pud); | 570 | return pte_mfn_to_pfn(pud.pud); |
| 688 | } | 571 | } |
| 689 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); | 572 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); |
| 690 | 573 | ||
| 691 | pud_t xen_make_pud(pudval_t pud) | 574 | static pud_t xen_make_pud(pudval_t pud) |
| 692 | { | 575 | { |
| 693 | pud = pte_pfn_to_mfn(pud); | 576 | pud = pte_pfn_to_mfn(pud); |
| 694 | 577 | ||
| @@ -696,7 +579,7 @@ pud_t xen_make_pud(pudval_t pud) | |||
| 696 | } | 579 | } |
| 697 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); | 580 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); |
| 698 | 581 | ||
| 699 | pgd_t *xen_get_user_pgd(pgd_t *pgd) | 582 | static pgd_t *xen_get_user_pgd(pgd_t *pgd) |
| 700 | { | 583 | { |
| 701 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); | 584 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); |
| 702 | unsigned offset = pgd - pgd_page; | 585 | unsigned offset = pgd - pgd_page; |
| @@ -728,7 +611,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | |||
| 728 | * 2. It is always pinned | 611 | * 2. It is always pinned |
| 729 | * 3. It has no user pagetable attached to it | 612 | * 3. It has no user pagetable attached to it |
| 730 | */ | 613 | */ |
| 731 | void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | 614 | static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) |
| 732 | { | 615 | { |
| 733 | preempt_disable(); | 616 | preempt_disable(); |
| 734 | 617 | ||
| @@ -741,12 +624,10 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | |||
| 741 | preempt_enable(); | 624 | preempt_enable(); |
| 742 | } | 625 | } |
| 743 | 626 | ||
| 744 | void xen_set_pgd(pgd_t *ptr, pgd_t val) | 627 | static void xen_set_pgd(pgd_t *ptr, pgd_t val) |
| 745 | { | 628 | { |
| 746 | pgd_t *user_ptr = xen_get_user_pgd(ptr); | 629 | pgd_t *user_ptr = xen_get_user_pgd(ptr); |
| 747 | 630 | ||
| 748 | ADD_STATS(pgd_update, 1); | ||
| 749 | |||
| 750 | /* If page is not pinned, we can just update the entry | 631 | /* If page is not pinned, we can just update the entry |
| 751 | directly */ | 632 | directly */ |
| 752 | if (!xen_page_pinned(ptr)) { | 633 | if (!xen_page_pinned(ptr)) { |
| @@ -758,9 +639,6 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val) | |||
| 758 | return; | 639 | return; |
| 759 | } | 640 | } |
| 760 | 641 | ||
| 761 | ADD_STATS(pgd_update_pinned, 1); | ||
| 762 | ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
| 763 | |||
| 764 | /* If it's pinned, then we can at least batch the kernel and | 642 | /* If it's pinned, then we can at least batch the kernel and |
| 765 | user updates together. */ | 643 | user updates together. */ |
| 766 | xen_mc_batch(); | 644 | xen_mc_batch(); |
| @@ -1162,14 +1040,14 @@ void xen_mm_unpin_all(void) | |||
| 1162 | spin_unlock(&pgd_lock); | 1040 | spin_unlock(&pgd_lock); |
| 1163 | } | 1041 | } |
| 1164 | 1042 | ||
| 1165 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 1043 | static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
| 1166 | { | 1044 | { |
| 1167 | spin_lock(&next->page_table_lock); | 1045 | spin_lock(&next->page_table_lock); |
| 1168 | xen_pgd_pin(next); | 1046 | xen_pgd_pin(next); |
| 1169 | spin_unlock(&next->page_table_lock); | 1047 | spin_unlock(&next->page_table_lock); |
| 1170 | } | 1048 | } |
| 1171 | 1049 | ||
| 1172 | void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | 1050 | static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
| 1173 | { | 1051 | { |
| 1174 | spin_lock(&mm->page_table_lock); | 1052 | spin_lock(&mm->page_table_lock); |
| 1175 | xen_pgd_pin(mm); | 1053 | xen_pgd_pin(mm); |
| @@ -1256,7 +1134,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm) | |||
| 1256 | * pagetable because of lazy tlb flushing. This means we need need to | 1134 | * pagetable because of lazy tlb flushing. This means we need need to |
| 1257 | * switch all CPUs off this pagetable before we can unpin it. | 1135 | * switch all CPUs off this pagetable before we can unpin it. |
| 1258 | */ | 1136 | */ |
| 1259 | void xen_exit_mmap(struct mm_struct *mm) | 1137 | static void xen_exit_mmap(struct mm_struct *mm) |
| 1260 | { | 1138 | { |
| 1261 | get_cpu(); /* make sure we don't move around */ | 1139 | get_cpu(); /* make sure we don't move around */ |
| 1262 | xen_drop_mm_ref(mm); | 1140 | xen_drop_mm_ref(mm); |
| @@ -2371,7 +2249,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, | |||
| 2371 | struct remap_data *rmd = data; | 2249 | struct remap_data *rmd = data; |
| 2372 | pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); | 2250 | pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); |
| 2373 | 2251 | ||
| 2374 | rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; | 2252 | rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; |
| 2375 | rmd->mmu_update->val = pte_val_ma(pte); | 2253 | rmd->mmu_update->val = pte_val_ma(pte); |
| 2376 | rmd->mmu_update++; | 2254 | rmd->mmu_update++; |
| 2377 | 2255 | ||
| @@ -2425,7 +2303,6 @@ out: | |||
| 2425 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); | 2303 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); |
| 2426 | 2304 | ||
| 2427 | #ifdef CONFIG_XEN_DEBUG_FS | 2305 | #ifdef CONFIG_XEN_DEBUG_FS |
| 2428 | |||
| 2429 | static int p2m_dump_open(struct inode *inode, struct file *filp) | 2306 | static int p2m_dump_open(struct inode *inode, struct file *filp) |
| 2430 | { | 2307 | { |
| 2431 | return single_open(filp, p2m_dump_show, NULL); | 2308 | return single_open(filp, p2m_dump_show, NULL); |
| @@ -2437,65 +2314,4 @@ static const struct file_operations p2m_dump_fops = { | |||
| 2437 | .llseek = seq_lseek, | 2314 | .llseek = seq_lseek, |
| 2438 | .release = single_release, | 2315 | .release = single_release, |
| 2439 | }; | 2316 | }; |
| 2440 | 2317 | #endif /* CONFIG_XEN_DEBUG_FS */ | |
| 2441 | static struct dentry *d_mmu_debug; | ||
| 2442 | |||
| 2443 | static int __init xen_mmu_debugfs(void) | ||
| 2444 | { | ||
| 2445 | struct dentry *d_xen = xen_init_debugfs(); | ||
| 2446 | |||
| 2447 | if (d_xen == NULL) | ||
| 2448 | return -ENOMEM; | ||
| 2449 | |||
| 2450 | d_mmu_debug = debugfs_create_dir("mmu", d_xen); | ||
| 2451 | |||
| 2452 | debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats); | ||
| 2453 | |||
| 2454 | debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update); | ||
| 2455 | debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug, | ||
| 2456 | &mmu_stats.pgd_update_pinned); | ||
| 2457 | debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug, | ||
| 2458 | &mmu_stats.pgd_update_pinned); | ||
| 2459 | |||
| 2460 | debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update); | ||
| 2461 | debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug, | ||
| 2462 | &mmu_stats.pud_update_pinned); | ||
| 2463 | debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug, | ||
| 2464 | &mmu_stats.pud_update_pinned); | ||
| 2465 | |||
| 2466 | debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update); | ||
| 2467 | debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug, | ||
| 2468 | &mmu_stats.pmd_update_pinned); | ||
| 2469 | debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug, | ||
| 2470 | &mmu_stats.pmd_update_pinned); | ||
| 2471 | |||
| 2472 | debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update); | ||
| 2473 | // debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug, | ||
| 2474 | // &mmu_stats.pte_update_pinned); | ||
| 2475 | debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug, | ||
| 2476 | &mmu_stats.pte_update_pinned); | ||
| 2477 | |||
| 2478 | debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update); | ||
| 2479 | debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug, | ||
| 2480 | &mmu_stats.mmu_update_extended); | ||
| 2481 | xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug, | ||
| 2482 | mmu_stats.mmu_update_histo, 20); | ||
| 2483 | |||
| 2484 | debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at); | ||
| 2485 | debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug, | ||
| 2486 | &mmu_stats.set_pte_at_batched); | ||
| 2487 | debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug, | ||
| 2488 | &mmu_stats.set_pte_at_current); | ||
| 2489 | debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug, | ||
| 2490 | &mmu_stats.set_pte_at_kernel); | ||
| 2491 | |||
| 2492 | debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit); | ||
| 2493 | debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, | ||
| 2494 | &mmu_stats.prot_commit_batched); | ||
| 2495 | |||
| 2496 | debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); | ||
| 2497 | return 0; | ||
| 2498 | } | ||
| 2499 | fs_initcall(xen_mmu_debugfs); | ||
| 2500 | |||
| 2501 | #endif /* CONFIG_XEN_DEBUG_FS */ | ||
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index 537bb9aab777..73809bb951b4 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h | |||
| @@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); | |||
| 15 | 15 | ||
| 16 | void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | 16 | void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); |
| 17 | 17 | ||
| 18 | |||
| 19 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next); | ||
| 20 | void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); | ||
| 21 | void xen_exit_mmap(struct mm_struct *mm); | ||
| 22 | |||
| 23 | pteval_t xen_pte_val(pte_t); | ||
| 24 | pmdval_t xen_pmd_val(pmd_t); | ||
| 25 | pgdval_t xen_pgd_val(pgd_t); | ||
| 26 | |||
| 27 | pte_t xen_make_pte(pteval_t); | ||
| 28 | pmd_t xen_make_pmd(pmdval_t); | ||
| 29 | pgd_t xen_make_pgd(pgdval_t); | ||
| 30 | |||
| 31 | void xen_set_pte(pte_t *ptep, pte_t pteval); | ||
| 32 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
| 33 | pte_t *ptep, pte_t pteval); | ||
| 34 | |||
| 35 | #ifdef CONFIG_X86_PAE | ||
| 36 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte); | ||
| 37 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
| 38 | void xen_pmd_clear(pmd_t *pmdp); | ||
| 39 | #endif /* CONFIG_X86_PAE */ | ||
| 40 | |||
| 41 | void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval); | ||
| 42 | void xen_set_pud(pud_t *ptr, pud_t val); | ||
| 43 | void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval); | ||
| 44 | void xen_set_pud_hyper(pud_t *ptr, pud_t val); | ||
| 45 | |||
| 46 | #if PAGETABLE_LEVELS == 4 | ||
| 47 | pudval_t xen_pud_val(pud_t pud); | ||
| 48 | pud_t xen_make_pud(pudval_t pudval); | ||
| 49 | void xen_set_pgd(pgd_t *pgdp, pgd_t pgd); | ||
| 50 | void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd); | ||
| 51 | #endif | ||
| 52 | |||
| 53 | pgd_t *xen_get_user_pgd(pgd_t *pgd); | ||
| 54 | |||
| 55 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 18 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
| 56 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | 19 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, |
| 57 | pte_t *ptep, pte_t pte); | 20 | pte_t *ptep, pte_t pte); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b5ccf3158d82..1d34d75366a7 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -2153,10 +2153,6 @@ struct vm_struct *alloc_vm_area(size_t size) | |||
| 2153 | return NULL; | 2153 | return NULL; |
| 2154 | } | 2154 | } |
| 2155 | 2155 | ||
| 2156 | /* Make sure the pagetables are constructed in process kernel | ||
| 2157 | mappings */ | ||
| 2158 | vmalloc_sync_all(); | ||
| 2159 | |||
| 2160 | return area; | 2156 | return area; |
| 2161 | } | 2157 | } |
| 2162 | EXPORT_SYMBOL_GPL(alloc_vm_area); | 2158 | EXPORT_SYMBOL_GPL(alloc_vm_area); |
