diff options
-rw-r--r-- | drivers/iommu/virtio-iommu.c | 40 | ||||
-rw-r--r-- | drivers/vhost/vhost.h | 2 | ||||
-rw-r--r-- | include/uapi/linux/virtio_iommu.h | 32 | ||||
-rw-r--r-- | mm/balloon_compaction.c | 69 |
4 files changed, 89 insertions, 54 deletions
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 433f4d2ee956..80a740df0737 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c | |||
@@ -2,7 +2,7 @@ | |||
2 | /* | 2 | /* |
3 | * Virtio driver for the paravirtualized IOMMU | 3 | * Virtio driver for the paravirtualized IOMMU |
4 | * | 4 | * |
5 | * Copyright (C) 2018 Arm Limited | 5 | * Copyright (C) 2019 Arm Limited |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
@@ -47,7 +47,10 @@ struct viommu_dev { | |||
47 | /* Device configuration */ | 47 | /* Device configuration */ |
48 | struct iommu_domain_geometry geometry; | 48 | struct iommu_domain_geometry geometry; |
49 | u64 pgsize_bitmap; | 49 | u64 pgsize_bitmap; |
50 | u8 domain_bits; | 50 | u32 first_domain; |
51 | u32 last_domain; | ||
52 | /* Supported MAP flags */ | ||
53 | u32 map_flags; | ||
51 | u32 probe_size; | 54 | u32 probe_size; |
52 | }; | 55 | }; |
53 | 56 | ||
@@ -62,6 +65,7 @@ struct viommu_domain { | |||
62 | struct viommu_dev *viommu; | 65 | struct viommu_dev *viommu; |
63 | struct mutex mutex; /* protects viommu pointer */ | 66 | struct mutex mutex; /* protects viommu pointer */ |
64 | unsigned int id; | 67 | unsigned int id; |
68 | u32 map_flags; | ||
65 | 69 | ||
66 | spinlock_t mappings_lock; | 70 | spinlock_t mappings_lock; |
67 | struct rb_root_cached mappings; | 71 | struct rb_root_cached mappings; |
@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len) | |||
113 | return -ENOENT; | 117 | return -ENOENT; |
114 | case VIRTIO_IOMMU_S_FAULT: | 118 | case VIRTIO_IOMMU_S_FAULT: |
115 | return -EFAULT; | 119 | return -EFAULT; |
120 | case VIRTIO_IOMMU_S_NOMEM: | ||
121 | return -ENOMEM; | ||
116 | case VIRTIO_IOMMU_S_IOERR: | 122 | case VIRTIO_IOMMU_S_IOERR: |
117 | case VIRTIO_IOMMU_S_DEVERR: | 123 | case VIRTIO_IOMMU_S_DEVERR: |
118 | default: | 124 | default: |
@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu, | |||
607 | { | 613 | { |
608 | int ret; | 614 | int ret; |
609 | struct viommu_domain *vdomain = to_viommu_domain(domain); | 615 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
610 | unsigned int max_domain = viommu->domain_bits > 31 ? ~0 : | ||
611 | (1U << viommu->domain_bits) - 1; | ||
612 | 616 | ||
613 | vdomain->viommu = viommu; | 617 | vdomain->viommu = viommu; |
618 | vdomain->map_flags = viommu->map_flags; | ||
614 | 619 | ||
615 | domain->pgsize_bitmap = viommu->pgsize_bitmap; | 620 | domain->pgsize_bitmap = viommu->pgsize_bitmap; |
616 | domain->geometry = viommu->geometry; | 621 | domain->geometry = viommu->geometry; |
617 | 622 | ||
618 | ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL); | 623 | ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, |
624 | viommu->last_domain, GFP_KERNEL); | ||
619 | if (ret >= 0) | 625 | if (ret >= 0) |
620 | vdomain->id = (unsigned int)ret; | 626 | vdomain->id = (unsigned int)ret; |
621 | 627 | ||
@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, | |||
710 | phys_addr_t paddr, size_t size, int prot) | 716 | phys_addr_t paddr, size_t size, int prot) |
711 | { | 717 | { |
712 | int ret; | 718 | int ret; |
713 | int flags; | 719 | u32 flags; |
714 | struct virtio_iommu_req_map map; | 720 | struct virtio_iommu_req_map map; |
715 | struct viommu_domain *vdomain = to_viommu_domain(domain); | 721 | struct viommu_domain *vdomain = to_viommu_domain(domain); |
716 | 722 | ||
@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova, | |||
718 | (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | | 724 | (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) | |
719 | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); | 725 | (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0); |
720 | 726 | ||
727 | if (flags & ~vdomain->map_flags) | ||
728 | return -EINVAL; | ||
729 | |||
721 | ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); | 730 | ret = viommu_add_mapping(vdomain, iova, paddr, size, flags); |
722 | if (ret) | 731 | if (ret) |
723 | return ret; | 732 | return ret; |
@@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev) | |||
1027 | goto err_free_vqs; | 1036 | goto err_free_vqs; |
1028 | } | 1037 | } |
1029 | 1038 | ||
1030 | viommu->domain_bits = 32; | 1039 | viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE; |
1040 | viommu->last_domain = ~0U; | ||
1031 | 1041 | ||
1032 | /* Optional features */ | 1042 | /* Optional features */ |
1033 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, | 1043 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, |
@@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev) | |||
1038 | struct virtio_iommu_config, input_range.end, | 1048 | struct virtio_iommu_config, input_range.end, |
1039 | &input_end); | 1049 | &input_end); |
1040 | 1050 | ||
1041 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS, | 1051 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, |
1042 | struct virtio_iommu_config, domain_bits, | 1052 | struct virtio_iommu_config, domain_range.start, |
1043 | &viommu->domain_bits); | 1053 | &viommu->first_domain); |
1054 | |||
1055 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, | ||
1056 | struct virtio_iommu_config, domain_range.end, | ||
1057 | &viommu->last_domain); | ||
1044 | 1058 | ||
1045 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, | 1059 | virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, |
1046 | struct virtio_iommu_config, probe_size, | 1060 | struct virtio_iommu_config, probe_size, |
@@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev) | |||
1052 | .force_aperture = true, | 1066 | .force_aperture = true, |
1053 | }; | 1067 | }; |
1054 | 1068 | ||
1069 | if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO)) | ||
1070 | viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO; | ||
1071 | |||
1055 | viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; | 1072 | viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap; |
1056 | 1073 | ||
1057 | virtio_device_ready(vdev); | 1074 | virtio_device_ready(vdev); |
@@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev) | |||
1130 | 1147 | ||
1131 | static unsigned int features[] = { | 1148 | static unsigned int features[] = { |
1132 | VIRTIO_IOMMU_F_MAP_UNMAP, | 1149 | VIRTIO_IOMMU_F_MAP_UNMAP, |
1133 | VIRTIO_IOMMU_F_DOMAIN_BITS, | ||
1134 | VIRTIO_IOMMU_F_INPUT_RANGE, | 1150 | VIRTIO_IOMMU_F_INPUT_RANGE, |
1151 | VIRTIO_IOMMU_F_DOMAIN_RANGE, | ||
1135 | VIRTIO_IOMMU_F_PROBE, | 1152 | VIRTIO_IOMMU_F_PROBE, |
1153 | VIRTIO_IOMMU_F_MMIO, | ||
1136 | }; | 1154 | }; |
1137 | 1155 | ||
1138 | static struct virtio_device_id id_table[] = { | 1156 | static struct virtio_device_id id_table[] = { |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 819296332913..42a8c2a13ab1 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -96,7 +96,7 @@ struct vhost_uaddr { | |||
96 | }; | 96 | }; |
97 | 97 | ||
98 | #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 | 98 | #if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0 |
99 | #define VHOST_ARCH_CAN_ACCEL_UACCESS 1 | 99 | #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 |
100 | #else | 100 | #else |
101 | #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 | 101 | #define VHOST_ARCH_CAN_ACCEL_UACCESS 0 |
102 | #endif | 102 | #endif |
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h index ba1b460c9944..237e36a280cb 100644 --- a/include/uapi/linux/virtio_iommu.h +++ b/include/uapi/linux/virtio_iommu.h | |||
@@ -1,8 +1,8 @@ | |||
1 | /* SPDX-License-Identifier: BSD-3-Clause */ | 1 | /* SPDX-License-Identifier: BSD-3-Clause */ |
2 | /* | 2 | /* |
3 | * Virtio-iommu definition v0.9 | 3 | * Virtio-iommu definition v0.12 |
4 | * | 4 | * |
5 | * Copyright (C) 2018 Arm Ltd. | 5 | * Copyright (C) 2019 Arm Ltd. |
6 | */ | 6 | */ |
7 | #ifndef _UAPI_LINUX_VIRTIO_IOMMU_H | 7 | #ifndef _UAPI_LINUX_VIRTIO_IOMMU_H |
8 | #define _UAPI_LINUX_VIRTIO_IOMMU_H | 8 | #define _UAPI_LINUX_VIRTIO_IOMMU_H |
@@ -11,26 +11,31 @@ | |||
11 | 11 | ||
12 | /* Feature bits */ | 12 | /* Feature bits */ |
13 | #define VIRTIO_IOMMU_F_INPUT_RANGE 0 | 13 | #define VIRTIO_IOMMU_F_INPUT_RANGE 0 |
14 | #define VIRTIO_IOMMU_F_DOMAIN_BITS 1 | 14 | #define VIRTIO_IOMMU_F_DOMAIN_RANGE 1 |
15 | #define VIRTIO_IOMMU_F_MAP_UNMAP 2 | 15 | #define VIRTIO_IOMMU_F_MAP_UNMAP 2 |
16 | #define VIRTIO_IOMMU_F_BYPASS 3 | 16 | #define VIRTIO_IOMMU_F_BYPASS 3 |
17 | #define VIRTIO_IOMMU_F_PROBE 4 | 17 | #define VIRTIO_IOMMU_F_PROBE 4 |
18 | #define VIRTIO_IOMMU_F_MMIO 5 | ||
18 | 19 | ||
19 | struct virtio_iommu_range { | 20 | struct virtio_iommu_range_64 { |
20 | __u64 start; | 21 | __le64 start; |
21 | __u64 end; | 22 | __le64 end; |
23 | }; | ||
24 | |||
25 | struct virtio_iommu_range_32 { | ||
26 | __le32 start; | ||
27 | __le32 end; | ||
22 | }; | 28 | }; |
23 | 29 | ||
24 | struct virtio_iommu_config { | 30 | struct virtio_iommu_config { |
25 | /* Supported page sizes */ | 31 | /* Supported page sizes */ |
26 | __u64 page_size_mask; | 32 | __le64 page_size_mask; |
27 | /* Supported IOVA range */ | 33 | /* Supported IOVA range */ |
28 | struct virtio_iommu_range input_range; | 34 | struct virtio_iommu_range_64 input_range; |
29 | /* Max domain ID size */ | 35 | /* Max domain ID size */ |
30 | __u8 domain_bits; | 36 | struct virtio_iommu_range_32 domain_range; |
31 | __u8 padding[3]; | ||
32 | /* Probe buffer size */ | 37 | /* Probe buffer size */ |
33 | __u32 probe_size; | 38 | __le32 probe_size; |
34 | }; | 39 | }; |
35 | 40 | ||
36 | /* Request types */ | 41 | /* Request types */ |
@@ -49,6 +54,7 @@ struct virtio_iommu_config { | |||
49 | #define VIRTIO_IOMMU_S_RANGE 0x05 | 54 | #define VIRTIO_IOMMU_S_RANGE 0x05 |
50 | #define VIRTIO_IOMMU_S_NOENT 0x06 | 55 | #define VIRTIO_IOMMU_S_NOENT 0x06 |
51 | #define VIRTIO_IOMMU_S_FAULT 0x07 | 56 | #define VIRTIO_IOMMU_S_FAULT 0x07 |
57 | #define VIRTIO_IOMMU_S_NOMEM 0x08 | ||
52 | 58 | ||
53 | struct virtio_iommu_req_head { | 59 | struct virtio_iommu_req_head { |
54 | __u8 type; | 60 | __u8 type; |
@@ -78,12 +84,10 @@ struct virtio_iommu_req_detach { | |||
78 | 84 | ||
79 | #define VIRTIO_IOMMU_MAP_F_READ (1 << 0) | 85 | #define VIRTIO_IOMMU_MAP_F_READ (1 << 0) |
80 | #define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1) | 86 | #define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1) |
81 | #define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2) | 87 | #define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2) |
82 | #define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3) | ||
83 | 88 | ||
84 | #define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \ | 89 | #define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \ |
85 | VIRTIO_IOMMU_MAP_F_WRITE | \ | 90 | VIRTIO_IOMMU_MAP_F_WRITE | \ |
86 | VIRTIO_IOMMU_MAP_F_EXEC | \ | ||
87 | VIRTIO_IOMMU_MAP_F_MMIO) | 91 | VIRTIO_IOMMU_MAP_F_MMIO) |
88 | 92 | ||
89 | struct virtio_iommu_req_map { | 93 | struct virtio_iommu_req_map { |
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 83a7b614061f..798275a51887 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c | |||
@@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, | |||
21 | * memory corruption is possible and we should stop execution. | 21 | * memory corruption is possible and we should stop execution. |
22 | */ | 22 | */ |
23 | BUG_ON(!trylock_page(page)); | 23 | BUG_ON(!trylock_page(page)); |
24 | list_del(&page->lru); | ||
25 | balloon_page_insert(b_dev_info, page); | 24 | balloon_page_insert(b_dev_info, page); |
26 | unlock_page(page); | 25 | unlock_page(page); |
27 | __count_vm_event(BALLOON_INFLATE); | 26 | __count_vm_event(BALLOON_INFLATE); |
@@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, | |||
33 | * @b_dev_info: balloon device descriptor where we will insert a new page to | 32 | * @b_dev_info: balloon device descriptor where we will insert a new page to |
34 | * @pages: pages to enqueue - allocated using balloon_page_alloc. | 33 | * @pages: pages to enqueue - allocated using balloon_page_alloc. |
35 | * | 34 | * |
36 | * Driver must call it to properly enqueue a balloon pages before definitively | 35 | * Driver must call this function to properly enqueue balloon pages before |
37 | * removing it from the guest system. | 36 | * definitively removing them from the guest system. |
38 | * | 37 | * |
39 | * Return: number of pages that were enqueued. | 38 | * Return: number of pages that were enqueued. |
40 | */ | 39 | */ |
@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, | |||
47 | 46 | ||
48 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); | 47 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
49 | list_for_each_entry_safe(page, tmp, pages, lru) { | 48 | list_for_each_entry_safe(page, tmp, pages, lru) { |
49 | list_del(&page->lru); | ||
50 | balloon_page_enqueue_one(b_dev_info, page); | 50 | balloon_page_enqueue_one(b_dev_info, page); |
51 | n_pages++; | 51 | n_pages++; |
52 | } | 52 | } |
@@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); | |||
63 | * @n_req_pages: number of requested pages. | 63 | * @n_req_pages: number of requested pages. |
64 | * | 64 | * |
65 | * Driver must call this function to properly de-allocate a previous enlisted | 65 | * Driver must call this function to properly de-allocate a previous enlisted |
66 | * balloon pages before definetively releasing it back to the guest system. | 66 | * balloon pages before definitively releasing it back to the guest system. |
67 | * This function tries to remove @n_req_pages from the ballooned pages and | 67 | * This function tries to remove @n_req_pages from the ballooned pages and |
68 | * return them to the caller in the @pages list. | 68 | * return them to the caller in the @pages list. |
69 | * | 69 | * |
70 | * Note that this function may fail to dequeue some pages temporarily empty due | 70 | * Note that this function may fail to dequeue some pages even if the balloon |
71 | * to compaction isolated pages. | 71 | * isn't empty - since the page list can be temporarily empty due to compaction |
72 | * of isolated pages. | ||
72 | * | 73 | * |
73 | * Return: number of pages that were added to the @pages list. | 74 | * Return: number of pages that were added to the @pages list. |
74 | */ | 75 | */ |
@@ -112,12 +113,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); | |||
112 | 113 | ||
113 | /* | 114 | /* |
114 | * balloon_page_alloc - allocates a new page for insertion into the balloon | 115 | * balloon_page_alloc - allocates a new page for insertion into the balloon |
115 | * page list. | 116 | * page list. |
117 | * | ||
118 | * Driver must call this function to properly allocate a new balloon page. | ||
119 | * Driver must call balloon_page_enqueue before definitively removing the page | ||
120 | * from the guest system. | ||
116 | * | 121 | * |
117 | * Driver must call it to properly allocate a new enlisted balloon page. | 122 | * Return: struct page for the allocated page or NULL on allocation failure. |
118 | * Driver must call balloon_page_enqueue before definitively removing it from | ||
119 | * the guest system. This function returns the page address for the recently | ||
120 | * allocated page or NULL in the case we fail to allocate a new page this turn. | ||
121 | */ | 123 | */ |
122 | struct page *balloon_page_alloc(void) | 124 | struct page *balloon_page_alloc(void) |
123 | { | 125 | { |
@@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void) | |||
128 | EXPORT_SYMBOL_GPL(balloon_page_alloc); | 130 | EXPORT_SYMBOL_GPL(balloon_page_alloc); |
129 | 131 | ||
130 | /* | 132 | /* |
131 | * balloon_page_enqueue - allocates a new page and inserts it into the balloon | 133 | * balloon_page_enqueue - inserts a new page into the balloon page list. |
132 | * page list. | 134 | * |
133 | * @b_dev_info: balloon device descriptor where we will insert a new page to | 135 | * @b_dev_info: balloon device descriptor where we will insert a new page |
134 | * @page: new page to enqueue - allocated using balloon_page_alloc. | 136 | * @page: new page to enqueue - allocated using balloon_page_alloc. |
135 | * | 137 | * |
136 | * Driver must call it to properly enqueue a new allocated balloon page | 138 | * Drivers must call this function to properly enqueue a new allocated balloon |
137 | * before definitively removing it from the guest system. | 139 | * page before definitively removing the page from the guest system. |
138 | * This function returns the page address for the recently enqueued page or | 140 | * |
139 | * NULL in the case we fail to allocate a new page this turn. | 141 | * Drivers must not call balloon_page_enqueue on pages that have been pushed to |
142 | * a list with balloon_page_push before removing them with balloon_page_pop. To | ||
143 | * enqueue a list of pages, use balloon_page_list_enqueue instead. | ||
140 | */ | 144 | */ |
141 | void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, | 145 | void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, |
142 | struct page *page) | 146 | struct page *page) |
@@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue); | |||
151 | 155 | ||
152 | /* | 156 | /* |
153 | * balloon_page_dequeue - removes a page from balloon's page list and returns | 157 | * balloon_page_dequeue - removes a page from balloon's page list and returns |
154 | * the its address to allow the driver release the page. | 158 | * its address to allow the driver to release the page. |
155 | * @b_dev_info: balloon device decriptor where we will grab a page from. | 159 | * @b_dev_info: balloon device decriptor where we will grab a page from. |
156 | * | 160 | * |
157 | * Driver must call it to properly de-allocate a previous enlisted balloon page | 161 | * Driver must call this function to properly dequeue a previously enqueued page |
158 | * before definetively releasing it back to the guest system. | 162 | * before definitively releasing it back to the guest system. |
159 | * This function returns the page address for the recently dequeued page or | 163 | * |
160 | * NULL in the case we find balloon's page list temporarily empty due to | 164 | * Caller must perform its own accounting to ensure that this |
161 | * compaction isolated pages. | 165 | * function is called only if some pages are actually enqueued. |
166 | * | ||
167 | * Note that this function may fail to dequeue some pages even if there are | ||
168 | * some enqueued pages - since the page list can be temporarily empty due to | ||
169 | * the compaction of isolated pages. | ||
170 | * | ||
171 | * TODO: remove the caller accounting requirements, and allow caller to wait | ||
172 | * until all pages can be dequeued. | ||
173 | * | ||
174 | * Return: struct page for the dequeued page, or NULL if no page was dequeued. | ||
162 | */ | 175 | */ |
163 | struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) | 176 | struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) |
164 | { | 177 | { |
@@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) | |||
171 | if (n_pages != 1) { | 184 | if (n_pages != 1) { |
172 | /* | 185 | /* |
173 | * If we are unable to dequeue a balloon page because the page | 186 | * If we are unable to dequeue a balloon page because the page |
174 | * list is empty and there is no isolated pages, then something | 187 | * list is empty and there are no isolated pages, then something |
175 | * went out of track and some balloon pages are lost. | 188 | * went out of track and some balloon pages are lost. |
176 | * BUG() here, otherwise the balloon driver may get stuck into | 189 | * BUG() here, otherwise the balloon driver may get stuck in |
177 | * an infinite loop while attempting to release all its pages. | 190 | * an infinite loop while attempting to release all its pages. |
178 | */ | 191 | */ |
179 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); | 192 | spin_lock_irqsave(&b_dev_info->pages_lock, flags); |
@@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping, | |||
224 | 237 | ||
225 | /* | 238 | /* |
226 | * We can not easily support the no copy case here so ignore it as it | 239 | * We can not easily support the no copy case here so ignore it as it |
227 | * is unlikely to be use with ballon pages. See include/linux/hmm.h for | 240 | * is unlikely to be used with balloon pages. See include/linux/hmm.h |
228 | * user of the MIGRATE_SYNC_NO_COPY mode. | 241 | * for a user of the MIGRATE_SYNC_NO_COPY mode. |
229 | */ | 242 | */ |
230 | if (mode == MIGRATE_SYNC_NO_COPY) | 243 | if (mode == MIGRATE_SYNC_NO_COPY) |
231 | return -EINVAL; | 244 | return -EINVAL; |