diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-31 11:38:18 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-31 11:38:18 -0500 |
commit | 14164b46fc994bcf82963ace00372cf808a31af1 (patch) | |
tree | e2a26a3a42a55bc3d2887cf9793d03843e1fa272 /drivers | |
parent | e2a0f813e0d53014b78aae76f0359c8a41f05eeb (diff) | |
parent | f93576e1ac34fd7a93d6f3432e71295bbe6a27ce (diff) |
Merge tag 'stable/for-linus-3.14-rc0-late-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull Xen bugfixes from Konrad Rzeszutek Wilk:
"Bug-fixes for the new features that were added during this cycle.
There are also two fixes for long-standing issues for which we have a
solution: grant-table operations extra work that was not needed
causing performance issues and the self balloon code was too
aggressive causing OOMs.
Details:
- Xen ARM couldn't use the new FIFO events
- Xen ARM couldn't use the SWIOTLB if compiled as 32-bit with 64-bit PCIe devices.
- Grant table were doing needless M2P operations.
- Ratchet down the self-balloon code so it won't OOM.
- Fix misplaced kfree in Xen PVH error code paths"
* tag 'stable/for-linus-3.14-rc0-late-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen/pvh: Fix misplaced kfree from xlated_setup_gnttab_pages
drivers: xen: deaggressive selfballoon driver
xen/grant-table: Avoid m2p_override during mapping
xen/gnttab: Use phys_addr_t to describe the grant frame base address
xen: swiotlb: handle sizeof(dma_addr_t) != sizeof(phys_addr_t)
arm/xen: Initialize event channels earlier
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/xen-blkback/blkback.c | 15 | ||||
-rw-r--r-- | drivers/xen/gntdev.c | 13 | ||||
-rw-r--r-- | drivers/xen/grant-table.c | 95 | ||||
-rw-r--r-- | drivers/xen/swiotlb-xen.c | 22 | ||||
-rw-r--r-- | drivers/xen/xen-selfballoon.c | 22 |
5 files changed, 135 insertions, 32 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 4b97b86da926..da18046d0e07 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c | |||
@@ -285,8 +285,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root, | |||
285 | 285 | ||
286 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || | 286 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || |
287 | !rb_next(&persistent_gnt->node)) { | 287 | !rb_next(&persistent_gnt->node)) { |
288 | ret = gnttab_unmap_refs(unmap, NULL, pages, | 288 | ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); |
289 | segs_to_unmap); | ||
290 | BUG_ON(ret); | 289 | BUG_ON(ret); |
291 | put_free_pages(blkif, pages, segs_to_unmap); | 290 | put_free_pages(blkif, pages, segs_to_unmap); |
292 | segs_to_unmap = 0; | 291 | segs_to_unmap = 0; |
@@ -321,8 +320,7 @@ static void unmap_purged_grants(struct work_struct *work) | |||
321 | pages[segs_to_unmap] = persistent_gnt->page; | 320 | pages[segs_to_unmap] = persistent_gnt->page; |
322 | 321 | ||
323 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | 322 | if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
324 | ret = gnttab_unmap_refs(unmap, NULL, pages, | 323 | ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); |
325 | segs_to_unmap); | ||
326 | BUG_ON(ret); | 324 | BUG_ON(ret); |
327 | put_free_pages(blkif, pages, segs_to_unmap); | 325 | put_free_pages(blkif, pages, segs_to_unmap); |
328 | segs_to_unmap = 0; | 326 | segs_to_unmap = 0; |
@@ -330,7 +328,7 @@ static void unmap_purged_grants(struct work_struct *work) | |||
330 | kfree(persistent_gnt); | 328 | kfree(persistent_gnt); |
331 | } | 329 | } |
332 | if (segs_to_unmap > 0) { | 330 | if (segs_to_unmap > 0) { |
333 | ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap); | 331 | ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); |
334 | BUG_ON(ret); | 332 | BUG_ON(ret); |
335 | put_free_pages(blkif, pages, segs_to_unmap); | 333 | put_free_pages(blkif, pages, segs_to_unmap); |
336 | } | 334 | } |
@@ -670,15 +668,14 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif, | |||
670 | GNTMAP_host_map, pages[i]->handle); | 668 | GNTMAP_host_map, pages[i]->handle); |
671 | pages[i]->handle = BLKBACK_INVALID_HANDLE; | 669 | pages[i]->handle = BLKBACK_INVALID_HANDLE; |
672 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { | 670 | if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { |
673 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, | 671 | ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); |
674 | invcount); | ||
675 | BUG_ON(ret); | 672 | BUG_ON(ret); |
676 | put_free_pages(blkif, unmap_pages, invcount); | 673 | put_free_pages(blkif, unmap_pages, invcount); |
677 | invcount = 0; | 674 | invcount = 0; |
678 | } | 675 | } |
679 | } | 676 | } |
680 | if (invcount) { | 677 | if (invcount) { |
681 | ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount); | 678 | ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); |
682 | BUG_ON(ret); | 679 | BUG_ON(ret); |
683 | put_free_pages(blkif, unmap_pages, invcount); | 680 | put_free_pages(blkif, unmap_pages, invcount); |
684 | } | 681 | } |
@@ -740,7 +737,7 @@ again: | |||
740 | } | 737 | } |
741 | 738 | ||
742 | if (segs_to_map) { | 739 | if (segs_to_map) { |
743 | ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); | 740 | ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map); |
744 | BUG_ON(ret); | 741 | BUG_ON(ret); |
745 | } | 742 | } |
746 | 743 | ||
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 073b4a19a8b0..34a2704fbc88 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -284,8 +284,10 @@ static int map_grant_pages(struct grant_map *map) | |||
284 | } | 284 | } |
285 | 285 | ||
286 | pr_debug("map %d+%d\n", map->index, map->count); | 286 | pr_debug("map %d+%d\n", map->index, map->count); |
287 | err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, | 287 | err = gnttab_map_refs_userspace(map->map_ops, |
288 | map->pages, map->count); | 288 | use_ptemod ? map->kmap_ops : NULL, |
289 | map->pages, | ||
290 | map->count); | ||
289 | if (err) | 291 | if (err) |
290 | return err; | 292 | return err; |
291 | 293 | ||
@@ -315,9 +317,10 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) | |||
315 | } | 317 | } |
316 | } | 318 | } |
317 | 319 | ||
318 | err = gnttab_unmap_refs(map->unmap_ops + offset, | 320 | err = gnttab_unmap_refs_userspace(map->unmap_ops + offset, |
319 | use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, | 321 | use_ptemod ? map->kmap_ops + offset : NULL, |
320 | pages); | 322 | map->pages + offset, |
323 | pages); | ||
321 | if (err) | 324 | if (err) |
322 | return err; | 325 | return err; |
323 | 326 | ||
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 1ce1c40331f3..8ee13e2e45e2 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c | |||
@@ -837,7 +837,7 @@ unsigned int gnttab_max_grant_frames(void) | |||
837 | } | 837 | } |
838 | EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); | 838 | EXPORT_SYMBOL_GPL(gnttab_max_grant_frames); |
839 | 839 | ||
840 | int gnttab_setup_auto_xlat_frames(unsigned long addr) | 840 | int gnttab_setup_auto_xlat_frames(phys_addr_t addr) |
841 | { | 841 | { |
842 | xen_pfn_t *pfn; | 842 | xen_pfn_t *pfn; |
843 | unsigned int max_nr_gframes = __max_nr_grant_frames(); | 843 | unsigned int max_nr_gframes = __max_nr_grant_frames(); |
@@ -849,8 +849,8 @@ int gnttab_setup_auto_xlat_frames(unsigned long addr) | |||
849 | 849 | ||
850 | vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes); | 850 | vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes); |
851 | if (vaddr == NULL) { | 851 | if (vaddr == NULL) { |
852 | pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n", | 852 | pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n", |
853 | addr); | 853 | &addr); |
854 | return -ENOMEM; | 854 | return -ENOMEM; |
855 | } | 855 | } |
856 | pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); | 856 | pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL); |
@@ -928,15 +928,17 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) | |||
928 | } | 928 | } |
929 | EXPORT_SYMBOL_GPL(gnttab_batch_copy); | 929 | EXPORT_SYMBOL_GPL(gnttab_batch_copy); |
930 | 930 | ||
931 | int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | 931 | int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, |
932 | struct gnttab_map_grant_ref *kmap_ops, | 932 | struct gnttab_map_grant_ref *kmap_ops, |
933 | struct page **pages, unsigned int count) | 933 | struct page **pages, unsigned int count, |
934 | bool m2p_override) | ||
934 | { | 935 | { |
935 | int i, ret; | 936 | int i, ret; |
936 | bool lazy = false; | 937 | bool lazy = false; |
937 | pte_t *pte; | 938 | pte_t *pte; |
938 | unsigned long mfn; | 939 | unsigned long mfn, pfn; |
939 | 940 | ||
941 | BUG_ON(kmap_ops && !m2p_override); | ||
940 | ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); | 942 | ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count); |
941 | if (ret) | 943 | if (ret) |
942 | return ret; | 944 | return ret; |
@@ -955,10 +957,12 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
955 | set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, | 957 | set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT, |
956 | map_ops[i].dev_bus_addr >> PAGE_SHIFT); | 958 | map_ops[i].dev_bus_addr >> PAGE_SHIFT); |
957 | } | 959 | } |
958 | return ret; | 960 | return 0; |
959 | } | 961 | } |
960 | 962 | ||
961 | if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | 963 | if (m2p_override && |
964 | !in_interrupt() && | ||
965 | paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | ||
962 | arch_enter_lazy_mmu_mode(); | 966 | arch_enter_lazy_mmu_mode(); |
963 | lazy = true; | 967 | lazy = true; |
964 | } | 968 | } |
@@ -975,8 +979,20 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
975 | } else { | 979 | } else { |
976 | mfn = PFN_DOWN(map_ops[i].dev_bus_addr); | 980 | mfn = PFN_DOWN(map_ops[i].dev_bus_addr); |
977 | } | 981 | } |
978 | ret = m2p_add_override(mfn, pages[i], kmap_ops ? | 982 | pfn = page_to_pfn(pages[i]); |
979 | &kmap_ops[i] : NULL); | 983 | |
984 | WARN_ON(PagePrivate(pages[i])); | ||
985 | SetPagePrivate(pages[i]); | ||
986 | set_page_private(pages[i], mfn); | ||
987 | |||
988 | pages[i]->index = pfn_to_mfn(pfn); | ||
989 | if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) { | ||
990 | ret = -ENOMEM; | ||
991 | goto out; | ||
992 | } | ||
993 | if (m2p_override) | ||
994 | ret = m2p_add_override(mfn, pages[i], kmap_ops ? | ||
995 | &kmap_ops[i] : NULL); | ||
980 | if (ret) | 996 | if (ret) |
981 | goto out; | 997 | goto out; |
982 | } | 998 | } |
@@ -987,15 +1003,32 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | |||
987 | 1003 | ||
988 | return ret; | 1004 | return ret; |
989 | } | 1005 | } |
1006 | |||
1007 | int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, | ||
1008 | struct page **pages, unsigned int count) | ||
1009 | { | ||
1010 | return __gnttab_map_refs(map_ops, NULL, pages, count, false); | ||
1011 | } | ||
990 | EXPORT_SYMBOL_GPL(gnttab_map_refs); | 1012 | EXPORT_SYMBOL_GPL(gnttab_map_refs); |
991 | 1013 | ||
992 | int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | 1014 | int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops, |
1015 | struct gnttab_map_grant_ref *kmap_ops, | ||
1016 | struct page **pages, unsigned int count) | ||
1017 | { | ||
1018 | return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true); | ||
1019 | } | ||
1020 | EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace); | ||
1021 | |||
1022 | int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | ||
993 | struct gnttab_map_grant_ref *kmap_ops, | 1023 | struct gnttab_map_grant_ref *kmap_ops, |
994 | struct page **pages, unsigned int count) | 1024 | struct page **pages, unsigned int count, |
1025 | bool m2p_override) | ||
995 | { | 1026 | { |
996 | int i, ret; | 1027 | int i, ret; |
997 | bool lazy = false; | 1028 | bool lazy = false; |
1029 | unsigned long pfn, mfn; | ||
998 | 1030 | ||
1031 | BUG_ON(kmap_ops && !m2p_override); | ||
999 | ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); | 1032 | ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); |
1000 | if (ret) | 1033 | if (ret) |
1001 | return ret; | 1034 | return ret; |
@@ -1006,17 +1039,33 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
1006 | set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, | 1039 | set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT, |
1007 | INVALID_P2M_ENTRY); | 1040 | INVALID_P2M_ENTRY); |
1008 | } | 1041 | } |
1009 | return ret; | 1042 | return 0; |
1010 | } | 1043 | } |
1011 | 1044 | ||
1012 | if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | 1045 | if (m2p_override && |
1046 | !in_interrupt() && | ||
1047 | paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) { | ||
1013 | arch_enter_lazy_mmu_mode(); | 1048 | arch_enter_lazy_mmu_mode(); |
1014 | lazy = true; | 1049 | lazy = true; |
1015 | } | 1050 | } |
1016 | 1051 | ||
1017 | for (i = 0; i < count; i++) { | 1052 | for (i = 0; i < count; i++) { |
1018 | ret = m2p_remove_override(pages[i], kmap_ops ? | 1053 | pfn = page_to_pfn(pages[i]); |
1019 | &kmap_ops[i] : NULL); | 1054 | mfn = get_phys_to_machine(pfn); |
1055 | if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) { | ||
1056 | ret = -EINVAL; | ||
1057 | goto out; | ||
1058 | } | ||
1059 | |||
1060 | set_page_private(pages[i], INVALID_P2M_ENTRY); | ||
1061 | WARN_ON(!PagePrivate(pages[i])); | ||
1062 | ClearPagePrivate(pages[i]); | ||
1063 | set_phys_to_machine(pfn, pages[i]->index); | ||
1064 | if (m2p_override) | ||
1065 | ret = m2p_remove_override(pages[i], | ||
1066 | kmap_ops ? | ||
1067 | &kmap_ops[i] : NULL, | ||
1068 | mfn); | ||
1020 | if (ret) | 1069 | if (ret) |
1021 | goto out; | 1070 | goto out; |
1022 | } | 1071 | } |
@@ -1027,8 +1076,22 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, | |||
1027 | 1076 | ||
1028 | return ret; | 1077 | return ret; |
1029 | } | 1078 | } |
1079 | |||
1080 | int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops, | ||
1081 | struct page **pages, unsigned int count) | ||
1082 | { | ||
1083 | return __gnttab_unmap_refs(map_ops, NULL, pages, count, false); | ||
1084 | } | ||
1030 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs); | 1085 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs); |
1031 | 1086 | ||
1087 | int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops, | ||
1088 | struct gnttab_map_grant_ref *kmap_ops, | ||
1089 | struct page **pages, unsigned int count) | ||
1090 | { | ||
1091 | return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true); | ||
1092 | } | ||
1093 | EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace); | ||
1094 | |||
1032 | static unsigned nr_status_frames(unsigned nr_grant_frames) | 1095 | static unsigned nr_status_frames(unsigned nr_grant_frames) |
1033 | { | 1096 | { |
1034 | BUG_ON(grefs_per_grant_frame == 0); | 1097 | BUG_ON(grefs_per_grant_frame == 0); |
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 1eac0731c349..ebd8f218a788 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c | |||
@@ -75,14 +75,32 @@ static unsigned long xen_io_tlb_nslabs; | |||
75 | 75 | ||
76 | static u64 start_dma_addr; | 76 | static u64 start_dma_addr; |
77 | 77 | ||
78 | /* | ||
79 | * Both of these functions should avoid PFN_PHYS because phys_addr_t | ||
80 | * can be 32bit when dma_addr_t is 64bit leading to a loss in | ||
81 | * information if the shift is done before casting to 64bit. | ||
82 | */ | ||
78 | static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) | 83 | static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) |
79 | { | 84 | { |
80 | return phys_to_machine(XPADDR(paddr)).maddr; | 85 | unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr)); |
86 | dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT; | ||
87 | |||
88 | dma |= paddr & ~PAGE_MASK; | ||
89 | |||
90 | return dma; | ||
81 | } | 91 | } |
82 | 92 | ||
83 | static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) | 93 | static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) |
84 | { | 94 | { |
85 | return machine_to_phys(XMADDR(baddr)).paddr; | 95 | unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr)); |
96 | dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; | ||
97 | phys_addr_t paddr = dma; | ||
98 | |||
99 | BUG_ON(paddr != dma); /* truncation has occurred, should never happen */ | ||
100 | |||
101 | paddr |= baddr & ~PAGE_MASK; | ||
102 | |||
103 | return paddr; | ||
86 | } | 104 | } |
87 | 105 | ||
88 | static inline dma_addr_t xen_virt_to_bus(void *address) | 106 | static inline dma_addr_t xen_virt_to_bus(void *address) |
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c index 21e18c18c7a1..745ad79c1d8e 100644 --- a/drivers/xen/xen-selfballoon.c +++ b/drivers/xen/xen-selfballoon.c | |||
@@ -175,6 +175,7 @@ static void frontswap_selfshrink(void) | |||
175 | #endif /* CONFIG_FRONTSWAP */ | 175 | #endif /* CONFIG_FRONTSWAP */ |
176 | 176 | ||
177 | #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) | 177 | #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) |
178 | #define PAGES2MB(pages) ((pages) >> (20 - PAGE_SHIFT)) | ||
178 | 179 | ||
179 | /* | 180 | /* |
180 | * Use current balloon size, the goal (vm_committed_as), and hysteresis | 181 | * Use current balloon size, the goal (vm_committed_as), and hysteresis |
@@ -525,6 +526,7 @@ EXPORT_SYMBOL(register_xen_selfballooning); | |||
525 | int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink) | 526 | int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink) |
526 | { | 527 | { |
527 | bool enable = false; | 528 | bool enable = false; |
529 | unsigned long reserve_pages; | ||
528 | 530 | ||
529 | if (!xen_domain()) | 531 | if (!xen_domain()) |
530 | return -ENODEV; | 532 | return -ENODEV; |
@@ -549,6 +551,26 @@ int xen_selfballoon_init(bool use_selfballooning, bool use_frontswap_selfshrink) | |||
549 | if (!enable) | 551 | if (!enable) |
550 | return -ENODEV; | 552 | return -ENODEV; |
551 | 553 | ||
554 | /* | ||
555 | * Give selfballoon_reserved_mb a default value(10% of total ram pages) | ||
556 | * to make selfballoon not so aggressive. | ||
557 | * | ||
558 | * There are mainly two reasons: | ||
559 | * 1) The original goal_page didn't consider some pages used by kernel | ||
560 | * space, like slab pages and memory used by device drivers. | ||
561 | * | ||
562 | * 2) The balloon driver may not give back memory to guest OS fast | ||
563 | * enough when the workload suddenly aquries a lot of physical memory. | ||
564 | * | ||
565 | * In both cases, the guest OS will suffer from memory pressure and | ||
566 | * OOM killer may be triggered. | ||
567 | * By reserving extra 10% of total ram pages, we can keep the system | ||
568 | * much more reliably and response faster in some cases. | ||
569 | */ | ||
570 | if (!selfballoon_reserved_mb) { | ||
571 | reserve_pages = totalram_pages / 10; | ||
572 | selfballoon_reserved_mb = PAGES2MB(reserve_pages); | ||
573 | } | ||
552 | schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ); | 574 | schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ); |
553 | 575 | ||
554 | return 0; | 576 | return 0; |