diff options
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 205 |
1 files changed, 122 insertions, 83 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index ad54fa10f8a2..f4bf8aa574f4 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -48,6 +48,8 @@ | |||
48 | #include <linux/memblock.h> | 48 | #include <linux/memblock.h> |
49 | #include <linux/seq_file.h> | 49 | #include <linux/seq_file.h> |
50 | 50 | ||
51 | #include <trace/events/xen.h> | ||
52 | |||
51 | #include <asm/pgtable.h> | 53 | #include <asm/pgtable.h> |
52 | #include <asm/tlbflush.h> | 54 | #include <asm/tlbflush.h> |
53 | #include <asm/fixmap.h> | 55 | #include <asm/fixmap.h> |
@@ -194,6 +196,8 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) | |||
194 | struct multicall_space mcs; | 196 | struct multicall_space mcs; |
195 | struct mmu_update *u; | 197 | struct mmu_update *u; |
196 | 198 | ||
199 | trace_xen_mmu_set_domain_pte(ptep, pteval, domid); | ||
200 | |||
197 | mcs = xen_mc_entry(sizeof(*u)); | 201 | mcs = xen_mc_entry(sizeof(*u)); |
198 | u = mcs.args; | 202 | u = mcs.args; |
199 | 203 | ||
@@ -225,6 +229,24 @@ static void xen_extend_mmu_update(const struct mmu_update *update) | |||
225 | *u = *update; | 229 | *u = *update; |
226 | } | 230 | } |
227 | 231 | ||
232 | static void xen_extend_mmuext_op(const struct mmuext_op *op) | ||
233 | { | ||
234 | struct multicall_space mcs; | ||
235 | struct mmuext_op *u; | ||
236 | |||
237 | mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); | ||
238 | |||
239 | if (mcs.mc != NULL) { | ||
240 | mcs.mc->args[1]++; | ||
241 | } else { | ||
242 | mcs = __xen_mc_entry(sizeof(*u)); | ||
243 | MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | ||
244 | } | ||
245 | |||
246 | u = mcs.args; | ||
247 | *u = *op; | ||
248 | } | ||
249 | |||
228 | static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | 250 | static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) |
229 | { | 251 | { |
230 | struct mmu_update u; | 252 | struct mmu_update u; |
@@ -245,6 +267,8 @@ static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | |||
245 | 267 | ||
246 | static void xen_set_pmd(pmd_t *ptr, pmd_t val) | 268 | static void xen_set_pmd(pmd_t *ptr, pmd_t val) |
247 | { | 269 | { |
270 | trace_xen_mmu_set_pmd(ptr, val); | ||
271 | |||
248 | /* If page is not pinned, we can just update the entry | 272 | /* If page is not pinned, we can just update the entry |
249 | directly */ | 273 | directly */ |
250 | if (!xen_page_pinned(ptr)) { | 274 | if (!xen_page_pinned(ptr)) { |
@@ -282,22 +306,30 @@ static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) | |||
282 | return true; | 306 | return true; |
283 | } | 307 | } |
284 | 308 | ||
285 | static void xen_set_pte(pte_t *ptep, pte_t pteval) | 309 | static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) |
286 | { | 310 | { |
287 | if (!xen_batched_set_pte(ptep, pteval)) | 311 | if (!xen_batched_set_pte(ptep, pteval)) |
288 | native_set_pte(ptep, pteval); | 312 | native_set_pte(ptep, pteval); |
289 | } | 313 | } |
290 | 314 | ||
315 | static void xen_set_pte(pte_t *ptep, pte_t pteval) | ||
316 | { | ||
317 | trace_xen_mmu_set_pte(ptep, pteval); | ||
318 | __xen_set_pte(ptep, pteval); | ||
319 | } | ||
320 | |||
291 | static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | 321 | static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, |
292 | pte_t *ptep, pte_t pteval) | 322 | pte_t *ptep, pte_t pteval) |
293 | { | 323 | { |
294 | xen_set_pte(ptep, pteval); | 324 | trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); |
325 | __xen_set_pte(ptep, pteval); | ||
295 | } | 326 | } |
296 | 327 | ||
297 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, | 328 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, |
298 | unsigned long addr, pte_t *ptep) | 329 | unsigned long addr, pte_t *ptep) |
299 | { | 330 | { |
300 | /* Just return the pte as-is. We preserve the bits on commit */ | 331 | /* Just return the pte as-is. We preserve the bits on commit */ |
332 | trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); | ||
301 | return *ptep; | 333 | return *ptep; |
302 | } | 334 | } |
303 | 335 | ||
@@ -306,6 +338,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
306 | { | 338 | { |
307 | struct mmu_update u; | 339 | struct mmu_update u; |
308 | 340 | ||
341 | trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); | ||
309 | xen_mc_batch(); | 342 | xen_mc_batch(); |
310 | 343 | ||
311 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 344 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
@@ -462,41 +495,6 @@ static pte_t xen_make_pte(pteval_t pte) | |||
462 | } | 495 | } |
463 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); | 496 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); |
464 | 497 | ||
465 | #ifdef CONFIG_XEN_DEBUG | ||
466 | pte_t xen_make_pte_debug(pteval_t pte) | ||
467 | { | ||
468 | phys_addr_t addr = (pte & PTE_PFN_MASK); | ||
469 | phys_addr_t other_addr; | ||
470 | bool io_page = false; | ||
471 | pte_t _pte; | ||
472 | |||
473 | if (pte & _PAGE_IOMAP) | ||
474 | io_page = true; | ||
475 | |||
476 | _pte = xen_make_pte(pte); | ||
477 | |||
478 | if (!addr) | ||
479 | return _pte; | ||
480 | |||
481 | if (io_page && | ||
482 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { | ||
483 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; | ||
484 | WARN_ONCE(addr != other_addr, | ||
485 | "0x%lx is using VM_IO, but it is 0x%lx!\n", | ||
486 | (unsigned long)addr, (unsigned long)other_addr); | ||
487 | } else { | ||
488 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; | ||
489 | other_addr = (_pte.pte & PTE_PFN_MASK); | ||
490 | WARN_ONCE((addr == other_addr) && (!io_page) && (!iomap_set), | ||
491 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", | ||
492 | (unsigned long)addr); | ||
493 | } | ||
494 | |||
495 | return _pte; | ||
496 | } | ||
497 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); | ||
498 | #endif | ||
499 | |||
500 | static pgd_t xen_make_pgd(pgdval_t pgd) | 498 | static pgd_t xen_make_pgd(pgdval_t pgd) |
501 | { | 499 | { |
502 | pgd = pte_pfn_to_mfn(pgd); | 500 | pgd = pte_pfn_to_mfn(pgd); |
@@ -530,6 +528,8 @@ static void xen_set_pud_hyper(pud_t *ptr, pud_t val) | |||
530 | 528 | ||
531 | static void xen_set_pud(pud_t *ptr, pud_t val) | 529 | static void xen_set_pud(pud_t *ptr, pud_t val) |
532 | { | 530 | { |
531 | trace_xen_mmu_set_pud(ptr, val); | ||
532 | |||
533 | /* If page is not pinned, we can just update the entry | 533 | /* If page is not pinned, we can just update the entry |
534 | directly */ | 534 | directly */ |
535 | if (!xen_page_pinned(ptr)) { | 535 | if (!xen_page_pinned(ptr)) { |
@@ -543,17 +543,20 @@ static void xen_set_pud(pud_t *ptr, pud_t val) | |||
543 | #ifdef CONFIG_X86_PAE | 543 | #ifdef CONFIG_X86_PAE |
544 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | 544 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
545 | { | 545 | { |
546 | trace_xen_mmu_set_pte_atomic(ptep, pte); | ||
546 | set_64bit((u64 *)ptep, native_pte_val(pte)); | 547 | set_64bit((u64 *)ptep, native_pte_val(pte)); |
547 | } | 548 | } |
548 | 549 | ||
549 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 550 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
550 | { | 551 | { |
552 | trace_xen_mmu_pte_clear(mm, addr, ptep); | ||
551 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) | 553 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) |
552 | native_pte_clear(mm, addr, ptep); | 554 | native_pte_clear(mm, addr, ptep); |
553 | } | 555 | } |
554 | 556 | ||
555 | static void xen_pmd_clear(pmd_t *pmdp) | 557 | static void xen_pmd_clear(pmd_t *pmdp) |
556 | { | 558 | { |
559 | trace_xen_mmu_pmd_clear(pmdp); | ||
557 | set_pmd(pmdp, __pmd(0)); | 560 | set_pmd(pmdp, __pmd(0)); |
558 | } | 561 | } |
559 | #endif /* CONFIG_X86_PAE */ | 562 | #endif /* CONFIG_X86_PAE */ |
@@ -629,6 +632,8 @@ static void xen_set_pgd(pgd_t *ptr, pgd_t val) | |||
629 | { | 632 | { |
630 | pgd_t *user_ptr = xen_get_user_pgd(ptr); | 633 | pgd_t *user_ptr = xen_get_user_pgd(ptr); |
631 | 634 | ||
635 | trace_xen_mmu_set_pgd(ptr, user_ptr, val); | ||
636 | |||
632 | /* If page is not pinned, we can just update the entry | 637 | /* If page is not pinned, we can just update the entry |
633 | directly */ | 638 | directly */ |
634 | if (!xen_page_pinned(ptr)) { | 639 | if (!xen_page_pinned(ptr)) { |
@@ -788,14 +793,12 @@ static void xen_pte_unlock(void *v) | |||
788 | 793 | ||
789 | static void xen_do_pin(unsigned level, unsigned long pfn) | 794 | static void xen_do_pin(unsigned level, unsigned long pfn) |
790 | { | 795 | { |
791 | struct mmuext_op *op; | 796 | struct mmuext_op op; |
792 | struct multicall_space mcs; | ||
793 | 797 | ||
794 | mcs = __xen_mc_entry(sizeof(*op)); | 798 | op.cmd = level; |
795 | op = mcs.args; | 799 | op.arg1.mfn = pfn_to_mfn(pfn); |
796 | op->cmd = level; | 800 | |
797 | op->arg1.mfn = pfn_to_mfn(pfn); | 801 | xen_extend_mmuext_op(&op); |
798 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | ||
799 | } | 802 | } |
800 | 803 | ||
801 | static int xen_pin_page(struct mm_struct *mm, struct page *page, | 804 | static int xen_pin_page(struct mm_struct *mm, struct page *page, |
@@ -863,6 +866,8 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, | |||
863 | read-only, and can be pinned. */ | 866 | read-only, and can be pinned. */ |
864 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | 867 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) |
865 | { | 868 | { |
869 | trace_xen_mmu_pgd_pin(mm, pgd); | ||
870 | |||
866 | xen_mc_batch(); | 871 | xen_mc_batch(); |
867 | 872 | ||
868 | if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { | 873 | if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { |
@@ -988,6 +993,8 @@ static int xen_unpin_page(struct mm_struct *mm, struct page *page, | |||
988 | /* Release a pagetables pages back as normal RW */ | 993 | /* Release a pagetables pages back as normal RW */ |
989 | static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) | 994 | static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) |
990 | { | 995 | { |
996 | trace_xen_mmu_pgd_unpin(mm, pgd); | ||
997 | |||
991 | xen_mc_batch(); | 998 | xen_mc_batch(); |
992 | 999 | ||
993 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | 1000 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
@@ -1196,6 +1203,8 @@ static void xen_flush_tlb(void) | |||
1196 | struct mmuext_op *op; | 1203 | struct mmuext_op *op; |
1197 | struct multicall_space mcs; | 1204 | struct multicall_space mcs; |
1198 | 1205 | ||
1206 | trace_xen_mmu_flush_tlb(0); | ||
1207 | |||
1199 | preempt_disable(); | 1208 | preempt_disable(); |
1200 | 1209 | ||
1201 | mcs = xen_mc_entry(sizeof(*op)); | 1210 | mcs = xen_mc_entry(sizeof(*op)); |
@@ -1214,6 +1223,8 @@ static void xen_flush_tlb_single(unsigned long addr) | |||
1214 | struct mmuext_op *op; | 1223 | struct mmuext_op *op; |
1215 | struct multicall_space mcs; | 1224 | struct multicall_space mcs; |
1216 | 1225 | ||
1226 | trace_xen_mmu_flush_tlb_single(addr); | ||
1227 | |||
1217 | preempt_disable(); | 1228 | preempt_disable(); |
1218 | 1229 | ||
1219 | mcs = xen_mc_entry(sizeof(*op)); | 1230 | mcs = xen_mc_entry(sizeof(*op)); |
@@ -1240,6 +1251,8 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, | |||
1240 | } *args; | 1251 | } *args; |
1241 | struct multicall_space mcs; | 1252 | struct multicall_space mcs; |
1242 | 1253 | ||
1254 | trace_xen_mmu_flush_tlb_others(cpus, mm, va); | ||
1255 | |||
1243 | if (cpumask_empty(cpus)) | 1256 | if (cpumask_empty(cpus)) |
1244 | return; /* nothing to do */ | 1257 | return; /* nothing to do */ |
1245 | 1258 | ||
@@ -1275,10 +1288,11 @@ static void set_current_cr3(void *v) | |||
1275 | 1288 | ||
1276 | static void __xen_write_cr3(bool kernel, unsigned long cr3) | 1289 | static void __xen_write_cr3(bool kernel, unsigned long cr3) |
1277 | { | 1290 | { |
1278 | struct mmuext_op *op; | 1291 | struct mmuext_op op; |
1279 | struct multicall_space mcs; | ||
1280 | unsigned long mfn; | 1292 | unsigned long mfn; |
1281 | 1293 | ||
1294 | trace_xen_mmu_write_cr3(kernel, cr3); | ||
1295 | |||
1282 | if (cr3) | 1296 | if (cr3) |
1283 | mfn = pfn_to_mfn(PFN_DOWN(cr3)); | 1297 | mfn = pfn_to_mfn(PFN_DOWN(cr3)); |
1284 | else | 1298 | else |
@@ -1286,13 +1300,10 @@ static void __xen_write_cr3(bool kernel, unsigned long cr3) | |||
1286 | 1300 | ||
1287 | WARN_ON(mfn == 0 && kernel); | 1301 | WARN_ON(mfn == 0 && kernel); |
1288 | 1302 | ||
1289 | mcs = __xen_mc_entry(sizeof(*op)); | 1303 | op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; |
1290 | 1304 | op.arg1.mfn = mfn; | |
1291 | op = mcs.args; | ||
1292 | op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; | ||
1293 | op->arg1.mfn = mfn; | ||
1294 | 1305 | ||
1295 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | 1306 | xen_extend_mmuext_op(&op); |
1296 | 1307 | ||
1297 | if (kernel) { | 1308 | if (kernel) { |
1298 | percpu_write(xen_cr3, cr3); | 1309 | percpu_write(xen_cr3, cr3); |
@@ -1451,19 +1462,52 @@ static void __init xen_release_pmd_init(unsigned long pfn) | |||
1451 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 1462 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1452 | } | 1463 | } |
1453 | 1464 | ||
1465 | static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | ||
1466 | { | ||
1467 | struct multicall_space mcs; | ||
1468 | struct mmuext_op *op; | ||
1469 | |||
1470 | mcs = __xen_mc_entry(sizeof(*op)); | ||
1471 | op = mcs.args; | ||
1472 | op->cmd = cmd; | ||
1473 | op->arg1.mfn = pfn_to_mfn(pfn); | ||
1474 | |||
1475 | MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | ||
1476 | } | ||
1477 | |||
1478 | static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) | ||
1479 | { | ||
1480 | struct multicall_space mcs; | ||
1481 | unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); | ||
1482 | |||
1483 | mcs = __xen_mc_entry(0); | ||
1484 | MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, | ||
1485 | pfn_pte(pfn, prot), 0); | ||
1486 | } | ||
1487 | |||
1454 | /* This needs to make sure the new pte page is pinned iff its being | 1488 | /* This needs to make sure the new pte page is pinned iff its being |
1455 | attached to a pinned pagetable. */ | 1489 | attached to a pinned pagetable. */ |
1456 | static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level) | 1490 | static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, |
1491 | unsigned level) | ||
1457 | { | 1492 | { |
1458 | struct page *page = pfn_to_page(pfn); | 1493 | bool pinned = PagePinned(virt_to_page(mm->pgd)); |
1494 | |||
1495 | trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); | ||
1496 | |||
1497 | if (pinned) { | ||
1498 | struct page *page = pfn_to_page(pfn); | ||
1459 | 1499 | ||
1460 | if (PagePinned(virt_to_page(mm->pgd))) { | ||
1461 | SetPagePinned(page); | 1500 | SetPagePinned(page); |
1462 | 1501 | ||
1463 | if (!PageHighMem(page)) { | 1502 | if (!PageHighMem(page)) { |
1464 | make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); | 1503 | xen_mc_batch(); |
1504 | |||
1505 | __set_pfn_prot(pfn, PAGE_KERNEL_RO); | ||
1506 | |||
1465 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) | 1507 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
1466 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | 1508 | __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); |
1509 | |||
1510 | xen_mc_issue(PARAVIRT_LAZY_MMU); | ||
1467 | } else { | 1511 | } else { |
1468 | /* make sure there are no stray mappings of | 1512 | /* make sure there are no stray mappings of |
1469 | this page */ | 1513 | this page */ |
@@ -1483,15 +1527,23 @@ static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) | |||
1483 | } | 1527 | } |
1484 | 1528 | ||
1485 | /* This should never happen until we're OK to use struct page */ | 1529 | /* This should never happen until we're OK to use struct page */ |
1486 | static void xen_release_ptpage(unsigned long pfn, unsigned level) | 1530 | static inline void xen_release_ptpage(unsigned long pfn, unsigned level) |
1487 | { | 1531 | { |
1488 | struct page *page = pfn_to_page(pfn); | 1532 | struct page *page = pfn_to_page(pfn); |
1533 | bool pinned = PagePinned(page); | ||
1534 | |||
1535 | trace_xen_mmu_release_ptpage(pfn, level, pinned); | ||
1489 | 1536 | ||
1490 | if (PagePinned(page)) { | 1537 | if (pinned) { |
1491 | if (!PageHighMem(page)) { | 1538 | if (!PageHighMem(page)) { |
1539 | xen_mc_batch(); | ||
1540 | |||
1492 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) | 1541 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
1493 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | 1542 | __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
1494 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 1543 | |
1544 | __set_pfn_prot(pfn, PAGE_KERNEL); | ||
1545 | |||
1546 | xen_mc_issue(PARAVIRT_LAZY_MMU); | ||
1495 | } | 1547 | } |
1496 | ClearPagePinned(page); | 1548 | ClearPagePinned(page); |
1497 | } | 1549 | } |
@@ -1626,15 +1678,17 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |||
1626 | void __init xen_setup_machphys_mapping(void) | 1678 | void __init xen_setup_machphys_mapping(void) |
1627 | { | 1679 | { |
1628 | struct xen_machphys_mapping mapping; | 1680 | struct xen_machphys_mapping mapping; |
1629 | unsigned long machine_to_phys_nr_ents; | ||
1630 | 1681 | ||
1631 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { | 1682 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { |
1632 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; | 1683 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; |
1633 | machine_to_phys_nr_ents = mapping.max_mfn + 1; | 1684 | machine_to_phys_nr = mapping.max_mfn + 1; |
1634 | } else { | 1685 | } else { |
1635 | machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; | 1686 | machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; |
1636 | } | 1687 | } |
1637 | machine_to_phys_order = fls(machine_to_phys_nr_ents - 1); | 1688 | #ifdef CONFIG_X86_32 |
1689 | WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) | ||
1690 | < machine_to_phys_mapping); | ||
1691 | #endif | ||
1638 | } | 1692 | } |
1639 | 1693 | ||
1640 | #ifdef CONFIG_X86_64 | 1694 | #ifdef CONFIG_X86_64 |
@@ -1825,6 +1879,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
1825 | # endif | 1879 | # endif |
1826 | #else | 1880 | #else |
1827 | case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: | 1881 | case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: |
1882 | case VVAR_PAGE: | ||
1828 | #endif | 1883 | #endif |
1829 | case FIX_TEXT_POKE0: | 1884 | case FIX_TEXT_POKE0: |
1830 | case FIX_TEXT_POKE1: | 1885 | case FIX_TEXT_POKE1: |
@@ -1865,7 +1920,8 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
1865 | #ifdef CONFIG_X86_64 | 1920 | #ifdef CONFIG_X86_64 |
1866 | /* Replicate changes to map the vsyscall page into the user | 1921 | /* Replicate changes to map the vsyscall page into the user |
1867 | pagetable vsyscall mapping. */ | 1922 | pagetable vsyscall mapping. */ |
1868 | if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) { | 1923 | if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) || |
1924 | idx == VVAR_PAGE) { | ||
1869 | unsigned long vaddr = __fix_to_virt(idx); | 1925 | unsigned long vaddr = __fix_to_virt(idx); |
1870 | set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); | 1926 | set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); |
1871 | } | 1927 | } |
@@ -1897,9 +1953,6 @@ void __init xen_ident_map_ISA(void) | |||
1897 | 1953 | ||
1898 | static void __init xen_post_allocator_init(void) | 1954 | static void __init xen_post_allocator_init(void) |
1899 | { | 1955 | { |
1900 | #ifdef CONFIG_XEN_DEBUG | ||
1901 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); | ||
1902 | #endif | ||
1903 | pv_mmu_ops.set_pte = xen_set_pte; | 1956 | pv_mmu_ops.set_pte = xen_set_pte; |
1904 | pv_mmu_ops.set_pmd = xen_set_pmd; | 1957 | pv_mmu_ops.set_pmd = xen_set_pmd; |
1905 | pv_mmu_ops.set_pud = xen_set_pud; | 1958 | pv_mmu_ops.set_pud = xen_set_pud; |
@@ -2309,17 +2362,3 @@ out: | |||
2309 | return err; | 2362 | return err; |
2310 | } | 2363 | } |
2311 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); | 2364 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); |
2312 | |||
2313 | #ifdef CONFIG_XEN_DEBUG_FS | ||
2314 | static int p2m_dump_open(struct inode *inode, struct file *filp) | ||
2315 | { | ||
2316 | return single_open(filp, p2m_dump_show, NULL); | ||
2317 | } | ||
2318 | |||
2319 | static const struct file_operations p2m_dump_fops = { | ||
2320 | .open = p2m_dump_open, | ||
2321 | .read = seq_read, | ||
2322 | .llseek = seq_lseek, | ||
2323 | .release = single_release, | ||
2324 | }; | ||
2325 | #endif /* CONFIG_XEN_DEBUG_FS */ | ||