diff options
author | Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> | 2018-07-20 05:01:44 -0400 |
---|---|---|
committer | Boris Ostrovsky <boris.ostrovsky@oracle.com> | 2018-07-26 23:05:13 -0400 |
commit | ae4c51a50c990d6feba7058c181dc8f22ca5f1d8 (patch) | |
tree | 3cec1163486e161d33569c386d2f51e01011a279 | |
parent | 8c3799ee25e1fda159099af09f5f2e86091e41d4 (diff) |
xen/balloon: Share common memory reservation routines
Memory {increase|decrease}_reservation and VA mappings update/reset
code used in balloon driver can be made common, so other drivers can
also re-use the same functionality without open-coding.
Create a dedicated file for the shared code and export corresponding
symbols for other kernel modules.
Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-rw-r--r-- | drivers/xen/Makefile | 1 | ||||
-rw-r--r-- | drivers/xen/balloon.c | 75 | ||||
-rw-r--r-- | drivers/xen/mem-reservation.c | 118 | ||||
-rw-r--r-- | include/xen/mem-reservation.h | 59 |
4 files changed, 184 insertions, 69 deletions
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 48b154276179..129dd1cc1b83 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -2,6 +2,7 @@ | |||
2 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o | 2 | obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o |
3 | obj-$(CONFIG_X86) += fallback.o | 3 | obj-$(CONFIG_X86) += fallback.o |
4 | obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o | 4 | obj-y += grant-table.o features.o balloon.o manage.o preempt.o time.o |
5 | obj-y += mem-reservation.o | ||
5 | obj-y += events/ | 6 | obj-y += events/ |
6 | obj-y += xenbus/ | 7 | obj-y += xenbus/ |
7 | 8 | ||
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 065f0b607373..e12bb256036f 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -71,6 +71,7 @@ | |||
71 | #include <xen/balloon.h> | 71 | #include <xen/balloon.h> |
72 | #include <xen/features.h> | 72 | #include <xen/features.h> |
73 | #include <xen/page.h> | 73 | #include <xen/page.h> |
74 | #include <xen/mem-reservation.h> | ||
74 | 75 | ||
75 | static int xen_hotplug_unpopulated; | 76 | static int xen_hotplug_unpopulated; |
76 | 77 | ||
@@ -157,13 +158,6 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); | |||
157 | #define GFP_BALLOON \ | 158 | #define GFP_BALLOON \ |
158 | (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) | 159 | (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC) |
159 | 160 | ||
160 | static void scrub_page(struct page *page) | ||
161 | { | ||
162 | #ifdef CONFIG_XEN_SCRUB_PAGES | ||
163 | clear_highpage(page); | ||
164 | #endif | ||
165 | } | ||
166 | |||
167 | /* balloon_append: add the given page to the balloon. */ | 161 | /* balloon_append: add the given page to the balloon. */ |
168 | static void __balloon_append(struct page *page) | 162 | static void __balloon_append(struct page *page) |
169 | { | 163 | { |
@@ -463,11 +457,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages) | |||
463 | int rc; | 457 | int rc; |
464 | unsigned long i; | 458 | unsigned long i; |
465 | struct page *page; | 459 | struct page *page; |
466 | struct xen_memory_reservation reservation = { | ||
467 | .address_bits = 0, | ||
468 | .extent_order = EXTENT_ORDER, | ||
469 | .domid = DOMID_SELF | ||
470 | }; | ||
471 | 460 | ||
472 | if (nr_pages > ARRAY_SIZE(frame_list)) | 461 | if (nr_pages > ARRAY_SIZE(frame_list)) |
473 | nr_pages = ARRAY_SIZE(frame_list); | 462 | nr_pages = ARRAY_SIZE(frame_list); |
@@ -479,16 +468,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages) | |||
479 | break; | 468 | break; |
480 | } | 469 | } |
481 | 470 | ||
482 | /* XENMEM_populate_physmap requires a PFN based on Xen | ||
483 | * granularity. | ||
484 | */ | ||
485 | frame_list[i] = page_to_xen_pfn(page); | 471 | frame_list[i] = page_to_xen_pfn(page); |
486 | page = balloon_next_page(page); | 472 | page = balloon_next_page(page); |
487 | } | 473 | } |
488 | 474 | ||
489 | set_xen_guest_handle(reservation.extent_start, frame_list); | 475 | rc = xenmem_reservation_increase(nr_pages, frame_list); |
490 | reservation.nr_extents = nr_pages; | ||
491 | rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); | ||
492 | if (rc <= 0) | 476 | if (rc <= 0) |
493 | return BP_EAGAIN; | 477 | return BP_EAGAIN; |
494 | 478 | ||
@@ -496,29 +480,7 @@ static enum bp_state increase_reservation(unsigned long nr_pages) | |||
496 | page = balloon_retrieve(false); | 480 | page = balloon_retrieve(false); |
497 | BUG_ON(page == NULL); | 481 | BUG_ON(page == NULL); |
498 | 482 | ||
499 | #ifdef CONFIG_XEN_HAVE_PVMMU | 483 | xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]); |
500 | /* | ||
501 | * We don't support PV MMU when Linux and Xen is using | ||
502 | * different page granularity. | ||
503 | */ | ||
504 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | ||
505 | |||
506 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
507 | unsigned long pfn = page_to_pfn(page); | ||
508 | |||
509 | set_phys_to_machine(pfn, frame_list[i]); | ||
510 | |||
511 | /* Link back into the page tables if not highmem. */ | ||
512 | if (!PageHighMem(page)) { | ||
513 | int ret; | ||
514 | ret = HYPERVISOR_update_va_mapping( | ||
515 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
516 | mfn_pte(frame_list[i], PAGE_KERNEL), | ||
517 | 0); | ||
518 | BUG_ON(ret); | ||
519 | } | ||
520 | } | ||
521 | #endif | ||
522 | 484 | ||
523 | /* Relinquish the page back to the allocator. */ | 485 | /* Relinquish the page back to the allocator. */ |
524 | free_reserved_page(page); | 486 | free_reserved_page(page); |
@@ -535,11 +497,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
535 | unsigned long i; | 497 | unsigned long i; |
536 | struct page *page, *tmp; | 498 | struct page *page, *tmp; |
537 | int ret; | 499 | int ret; |
538 | struct xen_memory_reservation reservation = { | ||
539 | .address_bits = 0, | ||
540 | .extent_order = EXTENT_ORDER, | ||
541 | .domid = DOMID_SELF | ||
542 | }; | ||
543 | LIST_HEAD(pages); | 500 | LIST_HEAD(pages); |
544 | 501 | ||
545 | if (nr_pages > ARRAY_SIZE(frame_list)) | 502 | if (nr_pages > ARRAY_SIZE(frame_list)) |
@@ -553,7 +510,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
553 | break; | 510 | break; |
554 | } | 511 | } |
555 | adjust_managed_page_count(page, -1); | 512 | adjust_managed_page_count(page, -1); |
556 | scrub_page(page); | 513 | xenmem_reservation_scrub_page(page); |
557 | list_add(&page->lru, &pages); | 514 | list_add(&page->lru, &pages); |
558 | } | 515 | } |
559 | 516 | ||
@@ -572,28 +529,10 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
572 | */ | 529 | */ |
573 | i = 0; | 530 | i = 0; |
574 | list_for_each_entry_safe(page, tmp, &pages, lru) { | 531 | list_for_each_entry_safe(page, tmp, &pages, lru) { |
575 | /* XENMEM_decrease_reservation requires a GFN */ | ||
576 | frame_list[i++] = xen_page_to_gfn(page); | 532 | frame_list[i++] = xen_page_to_gfn(page); |
577 | 533 | ||
578 | #ifdef CONFIG_XEN_HAVE_PVMMU | 534 | xenmem_reservation_va_mapping_reset(1, &page); |
579 | /* | ||
580 | * We don't support PV MMU when Linux and Xen is using | ||
581 | * different page granularity. | ||
582 | */ | ||
583 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | ||
584 | |||
585 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | ||
586 | unsigned long pfn = page_to_pfn(page); | ||
587 | 535 | ||
588 | if (!PageHighMem(page)) { | ||
589 | ret = HYPERVISOR_update_va_mapping( | ||
590 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
591 | __pte_ma(0), 0); | ||
592 | BUG_ON(ret); | ||
593 | } | ||
594 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | ||
595 | } | ||
596 | #endif | ||
597 | list_del(&page->lru); | 536 | list_del(&page->lru); |
598 | 537 | ||
599 | balloon_append(page); | 538 | balloon_append(page); |
@@ -601,9 +540,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
601 | 540 | ||
602 | flush_tlb_all(); | 541 | flush_tlb_all(); |
603 | 542 | ||
604 | set_xen_guest_handle(reservation.extent_start, frame_list); | 543 | ret = xenmem_reservation_decrease(nr_pages, frame_list); |
605 | reservation.nr_extents = nr_pages; | ||
606 | ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | ||
607 | BUG_ON(ret != nr_pages); | 544 | BUG_ON(ret != nr_pages); |
608 | 545 | ||
609 | balloon_stats.current_pages -= nr_pages; | 546 | balloon_stats.current_pages -= nr_pages; |
diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c new file mode 100644 index 000000000000..084799c6180e --- /dev/null +++ b/drivers/xen/mem-reservation.c | |||
@@ -0,0 +1,118 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | /****************************************************************************** | ||
4 | * Xen memory reservation utilities. | ||
5 | * | ||
6 | * Copyright (c) 2003, B Dragovic | ||
7 | * Copyright (c) 2003-2004, M Williamson, K Fraser | ||
8 | * Copyright (c) 2005 Dan M. Smith, IBM Corporation | ||
9 | * Copyright (c) 2010 Daniel Kiper | ||
10 | * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. | ||
11 | */ | ||
12 | |||
13 | #include <asm/xen/hypercall.h> | ||
14 | |||
15 | #include <xen/interface/memory.h> | ||
16 | #include <xen/mem-reservation.h> | ||
17 | |||
18 | /* | ||
19 | * Use one extent per PAGE_SIZE to avoid to break down the page into | ||
20 | * multiple frame. | ||
21 | */ | ||
22 | #define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1) | ||
23 | |||
24 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
25 | void __xenmem_reservation_va_mapping_update(unsigned long count, | ||
26 | struct page **pages, | ||
27 | xen_pfn_t *frames) | ||
28 | { | ||
29 | int i; | ||
30 | |||
31 | for (i = 0; i < count; i++) { | ||
32 | struct page *page = pages[i]; | ||
33 | unsigned long pfn = page_to_pfn(page); | ||
34 | |||
35 | BUG_ON(!page); | ||
36 | |||
37 | /* | ||
38 | * We don't support PV MMU when Linux and Xen is using | ||
39 | * different page granularity. | ||
40 | */ | ||
41 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | ||
42 | |||
43 | set_phys_to_machine(pfn, frames[i]); | ||
44 | |||
45 | /* Link back into the page tables if not highmem. */ | ||
46 | if (!PageHighMem(page)) { | ||
47 | int ret; | ||
48 | |||
49 | ret = HYPERVISOR_update_va_mapping( | ||
50 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
51 | mfn_pte(frames[i], PAGE_KERNEL), | ||
52 | 0); | ||
53 | BUG_ON(ret); | ||
54 | } | ||
55 | } | ||
56 | } | ||
57 | EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update); | ||
58 | |||
59 | void __xenmem_reservation_va_mapping_reset(unsigned long count, | ||
60 | struct page **pages) | ||
61 | { | ||
62 | int i; | ||
63 | |||
64 | for (i = 0; i < count; i++) { | ||
65 | struct page *page = pages[i]; | ||
66 | unsigned long pfn = page_to_pfn(page); | ||
67 | |||
68 | /* | ||
69 | * We don't support PV MMU when Linux and Xen are using | ||
70 | * different page granularity. | ||
71 | */ | ||
72 | BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); | ||
73 | |||
74 | if (!PageHighMem(page)) { | ||
75 | int ret; | ||
76 | |||
77 | ret = HYPERVISOR_update_va_mapping( | ||
78 | (unsigned long)__va(pfn << PAGE_SHIFT), | ||
79 | __pte_ma(0), 0); | ||
80 | BUG_ON(ret); | ||
81 | } | ||
82 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | ||
83 | } | ||
84 | } | ||
85 | EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset); | ||
86 | #endif /* CONFIG_XEN_HAVE_PVMMU */ | ||
87 | |||
88 | /* @frames is an array of PFNs */ | ||
89 | int xenmem_reservation_increase(int count, xen_pfn_t *frames) | ||
90 | { | ||
91 | struct xen_memory_reservation reservation = { | ||
92 | .address_bits = 0, | ||
93 | .extent_order = EXTENT_ORDER, | ||
94 | .domid = DOMID_SELF | ||
95 | }; | ||
96 | |||
97 | /* XENMEM_populate_physmap requires a PFN based on Xen granularity. */ | ||
98 | set_xen_guest_handle(reservation.extent_start, frames); | ||
99 | reservation.nr_extents = count; | ||
100 | return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); | ||
101 | } | ||
102 | EXPORT_SYMBOL_GPL(xenmem_reservation_increase); | ||
103 | |||
104 | /* @frames is an array of GFNs */ | ||
105 | int xenmem_reservation_decrease(int count, xen_pfn_t *frames) | ||
106 | { | ||
107 | struct xen_memory_reservation reservation = { | ||
108 | .address_bits = 0, | ||
109 | .extent_order = EXTENT_ORDER, | ||
110 | .domid = DOMID_SELF | ||
111 | }; | ||
112 | |||
113 | /* XENMEM_decrease_reservation requires a GFN */ | ||
114 | set_xen_guest_handle(reservation.extent_start, frames); | ||
115 | reservation.nr_extents = count; | ||
116 | return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); | ||
117 | } | ||
118 | EXPORT_SYMBOL_GPL(xenmem_reservation_decrease); | ||
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h new file mode 100644 index 000000000000..80b52b4945e9 --- /dev/null +++ b/include/xen/mem-reservation.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | /* | ||
4 | * Xen memory reservation utilities. | ||
5 | * | ||
6 | * Copyright (c) 2003, B Dragovic | ||
7 | * Copyright (c) 2003-2004, M Williamson, K Fraser | ||
8 | * Copyright (c) 2005 Dan M. Smith, IBM Corporation | ||
9 | * Copyright (c) 2010 Daniel Kiper | ||
10 | * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc. | ||
11 | */ | ||
12 | |||
13 | #ifndef _XENMEM_RESERVATION_H | ||
14 | #define _XENMEM_RESERVATION_H | ||
15 | |||
16 | #include <linux/highmem.h> | ||
17 | |||
18 | #include <xen/page.h> | ||
19 | |||
20 | static inline void xenmem_reservation_scrub_page(struct page *page) | ||
21 | { | ||
22 | #ifdef CONFIG_XEN_SCRUB_PAGES | ||
23 | clear_highpage(page); | ||
24 | #endif | ||
25 | } | ||
26 | |||
27 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
28 | void __xenmem_reservation_va_mapping_update(unsigned long count, | ||
29 | struct page **pages, | ||
30 | xen_pfn_t *frames); | ||
31 | |||
32 | void __xenmem_reservation_va_mapping_reset(unsigned long count, | ||
33 | struct page **pages); | ||
34 | #endif | ||
35 | |||
36 | static inline void xenmem_reservation_va_mapping_update(unsigned long count, | ||
37 | struct page **pages, | ||
38 | xen_pfn_t *frames) | ||
39 | { | ||
40 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
41 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | ||
42 | __xenmem_reservation_va_mapping_update(count, pages, frames); | ||
43 | #endif | ||
44 | } | ||
45 | |||
46 | static inline void xenmem_reservation_va_mapping_reset(unsigned long count, | ||
47 | struct page **pages) | ||
48 | { | ||
49 | #ifdef CONFIG_XEN_HAVE_PVMMU | ||
50 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | ||
51 | __xenmem_reservation_va_mapping_reset(count, pages); | ||
52 | #endif | ||
53 | } | ||
54 | |||
55 | int xenmem_reservation_increase(int count, xen_pfn_t *frames); | ||
56 | |||
57 | int xenmem_reservation_decrease(int count, xen_pfn_t *frames); | ||
58 | |||
59 | #endif | ||