aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <k.khlebnikov@samsung.com>2014-10-09 18:29:29 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:26:01 -0400
commit9d1ba8056474a208ed9efb7e58cd014795d9f818 (patch)
tree0a463b12b5ed28e666570b6b15051af1b5d93934
parentd6d86c0a7f8ddc5b38cf089222cb1d9540762dc2 (diff)
mm/balloon_compaction: remove balloon mapping and flag AS_BALLOON_MAP
Now ballooned pages are detected using PageBalloon(). Fake mapping is no longer required. This patch links ballooned pages to balloon device using field page->private instead of page->mapping. Also this patch embeds balloon_dev_info directly into struct virtio_balloon. Signed-off-by: Konstantin Khlebnikov <k.khlebnikov@samsung.com> Cc: Rafael Aquini <aquini@redhat.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--drivers/virtio/virtio_balloon.c60
-rw-r--r--include/linux/balloon_compaction.h72
-rw-r--r--include/linux/pagemap.h18
-rw-r--r--mm/balloon_compaction.c95
4 files changed, 39 insertions, 206 deletions
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c3eb93fc9261..2bad7f9dd2ac 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -59,7 +59,7 @@ struct virtio_balloon
59 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE 59 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE
60 * to num_pages above. 60 * to num_pages above.
61 */ 61 */
62 struct balloon_dev_info *vb_dev_info; 62 struct balloon_dev_info vb_dev_info;
63 63
64 /* Synchronize access/update to this struct virtio_balloon elements */ 64 /* Synchronize access/update to this struct virtio_balloon elements */
65 struct mutex balloon_lock; 65 struct mutex balloon_lock;
@@ -127,7 +127,7 @@ static void set_page_pfns(u32 pfns[], struct page *page)
127 127
128static void fill_balloon(struct virtio_balloon *vb, size_t num) 128static void fill_balloon(struct virtio_balloon *vb, size_t num)
129{ 129{
130 struct balloon_dev_info *vb_dev_info = vb->vb_dev_info; 130 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
131 131
132 /* We can only do one array worth at a time. */ 132 /* We can only do one array worth at a time. */
133 num = min(num, ARRAY_SIZE(vb->pfns)); 133 num = min(num, ARRAY_SIZE(vb->pfns));
@@ -171,7 +171,7 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
171static void leak_balloon(struct virtio_balloon *vb, size_t num) 171static void leak_balloon(struct virtio_balloon *vb, size_t num)
172{ 172{
173 struct page *page; 173 struct page *page;
174 struct balloon_dev_info *vb_dev_info = vb->vb_dev_info; 174 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
175 175
176 /* We can only do one array worth at a time. */ 176 /* We can only do one array worth at a time. */
177 num = min(num, ARRAY_SIZE(vb->pfns)); 177 num = min(num, ARRAY_SIZE(vb->pfns));
@@ -353,12 +353,11 @@ static int init_vqs(struct virtio_balloon *vb)
353 return 0; 353 return 0;
354} 354}
355 355
356static const struct address_space_operations virtio_balloon_aops;
357#ifdef CONFIG_BALLOON_COMPACTION 356#ifdef CONFIG_BALLOON_COMPACTION
358/* 357/*
359 * virtballoon_migratepage - perform the balloon page migration on behalf of 358 * virtballoon_migratepage - perform the balloon page migration on behalf of
360 * a compation thread. (called under page lock) 359 * a compation thread. (called under page lock)
361 * @mapping: the page->mapping which will be assigned to the new migrated page. 360 * @vb_dev_info: the balloon device
362 * @newpage: page that will replace the isolated page after migration finishes. 361 * @newpage: page that will replace the isolated page after migration finishes.
363 * @page : the isolated (old) page that is about to be migrated to newpage. 362 * @page : the isolated (old) page that is about to be migrated to newpage.
364 * @mode : compaction mode -- not used for balloon page migration. 363 * @mode : compaction mode -- not used for balloon page migration.
@@ -373,17 +372,13 @@ static const struct address_space_operations virtio_balloon_aops;
373 * This function preforms the balloon page migration task. 372 * This function preforms the balloon page migration task.
374 * Called through balloon_mapping->a_ops->migratepage 373 * Called through balloon_mapping->a_ops->migratepage
375 */ 374 */
376static int virtballoon_migratepage(struct address_space *mapping, 375static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
377 struct page *newpage, struct page *page, enum migrate_mode mode) 376 struct page *newpage, struct page *page, enum migrate_mode mode)
378{ 377{
379 struct balloon_dev_info *vb_dev_info = balloon_page_device(page); 378 struct virtio_balloon *vb = container_of(vb_dev_info,
380 struct virtio_balloon *vb; 379 struct virtio_balloon, vb_dev_info);
381 unsigned long flags; 380 unsigned long flags;
382 381
383 BUG_ON(!vb_dev_info);
384
385 vb = vb_dev_info->balloon_device;
386
387 /* 382 /*
388 * In order to avoid lock contention while migrating pages concurrently 383 * In order to avoid lock contention while migrating pages concurrently
389 * to leak_balloon() or fill_balloon() we just give up the balloon_lock 384 * to leak_balloon() or fill_balloon() we just give up the balloon_lock
@@ -399,7 +394,7 @@ static int virtballoon_migratepage(struct address_space *mapping,
399 394
400 /* balloon's page migration 1st step -- inflate "newpage" */ 395 /* balloon's page migration 1st step -- inflate "newpage" */
401 spin_lock_irqsave(&vb_dev_info->pages_lock, flags); 396 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
402 balloon_page_insert(newpage, mapping, &vb_dev_info->pages); 397 balloon_page_insert(vb_dev_info, newpage);
403 vb_dev_info->isolated_pages--; 398 vb_dev_info->isolated_pages--;
404 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); 399 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
405 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 400 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
@@ -418,18 +413,11 @@ static int virtballoon_migratepage(struct address_space *mapping,
418 413
419 return MIGRATEPAGE_SUCCESS; 414 return MIGRATEPAGE_SUCCESS;
420} 415}
421
422/* define the balloon_mapping->a_ops callback to allow balloon page migration */
423static const struct address_space_operations virtio_balloon_aops = {
424 .migratepage = virtballoon_migratepage,
425};
426#endif /* CONFIG_BALLOON_COMPACTION */ 416#endif /* CONFIG_BALLOON_COMPACTION */
427 417
428static int virtballoon_probe(struct virtio_device *vdev) 418static int virtballoon_probe(struct virtio_device *vdev)
429{ 419{
430 struct virtio_balloon *vb; 420 struct virtio_balloon *vb;
431 struct address_space *vb_mapping;
432 struct balloon_dev_info *vb_devinfo;
433 int err; 421 int err;
434 422
435 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); 423 vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
@@ -445,30 +433,14 @@ static int virtballoon_probe(struct virtio_device *vdev)
445 vb->vdev = vdev; 433 vb->vdev = vdev;
446 vb->need_stats_update = 0; 434 vb->need_stats_update = 0;
447 435
448 vb_devinfo = balloon_devinfo_alloc(vb); 436 balloon_devinfo_init(&vb->vb_dev_info);
449 if (IS_ERR(vb_devinfo)) { 437#ifdef CONFIG_BALLOON_COMPACTION
450 err = PTR_ERR(vb_devinfo); 438 vb->vb_dev_info.migratepage = virtballoon_migratepage;
451 goto out_free_vb; 439#endif
452 }
453
454 vb_mapping = balloon_mapping_alloc(vb_devinfo,
455 (balloon_compaction_check()) ?
456 &virtio_balloon_aops : NULL);
457 if (IS_ERR(vb_mapping)) {
458 /*
459 * IS_ERR(vb_mapping) && PTR_ERR(vb_mapping) == -EOPNOTSUPP
460 * This means !CONFIG_BALLOON_COMPACTION, otherwise we get off.
461 */
462 err = PTR_ERR(vb_mapping);
463 if (err != -EOPNOTSUPP)
464 goto out_free_vb_devinfo;
465 }
466
467 vb->vb_dev_info = vb_devinfo;
468 440
469 err = init_vqs(vb); 441 err = init_vqs(vb);
470 if (err) 442 if (err)
471 goto out_free_vb_mapping; 443 goto out_free_vb;
472 444
473 vb->thread = kthread_run(balloon, vb, "vballoon"); 445 vb->thread = kthread_run(balloon, vb, "vballoon");
474 if (IS_ERR(vb->thread)) { 446 if (IS_ERR(vb->thread)) {
@@ -480,10 +452,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
480 452
481out_del_vqs: 453out_del_vqs:
482 vdev->config->del_vqs(vdev); 454 vdev->config->del_vqs(vdev);
483out_free_vb_mapping:
484 balloon_mapping_free(vb_mapping);
485out_free_vb_devinfo:
486 balloon_devinfo_free(vb_devinfo);
487out_free_vb: 455out_free_vb:
488 kfree(vb); 456 kfree(vb);
489out: 457out:
@@ -509,8 +477,6 @@ static void virtballoon_remove(struct virtio_device *vdev)
509 477
510 kthread_stop(vb->thread); 478 kthread_stop(vb->thread);
511 remove_common(vb); 479 remove_common(vb);
512 balloon_mapping_free(vb->vb_dev_info->mapping);
513 balloon_devinfo_free(vb->vb_dev_info);
514 kfree(vb); 480 kfree(vb);
515} 481}
516 482
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 38aa07d5b81c..bc3d2985cc9a 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -57,21 +57,22 @@
57 * balloon driver as a page book-keeper for its registered balloon devices. 57 * balloon driver as a page book-keeper for its registered balloon devices.
58 */ 58 */
59struct balloon_dev_info { 59struct balloon_dev_info {
60 void *balloon_device; /* balloon device descriptor */
61 struct address_space *mapping; /* balloon special page->mapping */
62 unsigned long isolated_pages; /* # of isolated pages for migration */ 60 unsigned long isolated_pages; /* # of isolated pages for migration */
63 spinlock_t pages_lock; /* Protection to pages list */ 61 spinlock_t pages_lock; /* Protection to pages list */
64 struct list_head pages; /* Pages enqueued & handled to Host */ 62 struct list_head pages; /* Pages enqueued & handled to Host */
63 int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
64 struct page *page, enum migrate_mode mode);
65}; 65};
66 66
67extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); 67extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
68extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); 68extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
69extern struct balloon_dev_info *balloon_devinfo_alloc(
70 void *balloon_dev_descriptor);
71 69
72static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) 70static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
73{ 71{
74 kfree(b_dev_info); 72 balloon->isolated_pages = 0;
73 spin_lock_init(&balloon->pages_lock);
74 INIT_LIST_HEAD(&balloon->pages);
75 balloon->migratepage = NULL;
75} 76}
76 77
77#ifdef CONFIG_BALLOON_COMPACTION 78#ifdef CONFIG_BALLOON_COMPACTION
@@ -79,14 +80,6 @@ extern bool balloon_page_isolate(struct page *page);
79extern void balloon_page_putback(struct page *page); 80extern void balloon_page_putback(struct page *page);
80extern int balloon_page_migrate(struct page *newpage, 81extern int balloon_page_migrate(struct page *newpage,
81 struct page *page, enum migrate_mode mode); 82 struct page *page, enum migrate_mode mode);
82extern struct address_space
83*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
84 const struct address_space_operations *a_ops);
85
86static inline void balloon_mapping_free(struct address_space *balloon_mapping)
87{
88 kfree(balloon_mapping);
89}
90 83
91/* 84/*
92 * __is_movable_balloon_page - helper to perform @page PageBalloon tests 85 * __is_movable_balloon_page - helper to perform @page PageBalloon tests
@@ -120,27 +113,25 @@ static inline bool isolated_balloon_page(struct page *page)
120 113
121/* 114/*
122 * balloon_page_insert - insert a page into the balloon's page list and make 115 * balloon_page_insert - insert a page into the balloon's page list and make
123 * the page->mapping assignment accordingly. 116 * the page->private assignment accordingly.
117 * @balloon : pointer to balloon device
124 * @page : page to be assigned as a 'balloon page' 118 * @page : page to be assigned as a 'balloon page'
125 * @mapping : allocated special 'balloon_mapping'
126 * @head : balloon's device page list head
127 * 119 *
128 * Caller must ensure the page is locked and the spin_lock protecting balloon 120 * Caller must ensure the page is locked and the spin_lock protecting balloon
129 * pages list is held before inserting a page into the balloon device. 121 * pages list is held before inserting a page into the balloon device.
130 */ 122 */
131static inline void balloon_page_insert(struct page *page, 123static inline void balloon_page_insert(struct balloon_dev_info *balloon,
132 struct address_space *mapping, 124 struct page *page)
133 struct list_head *head)
134{ 125{
135 __SetPageBalloon(page); 126 __SetPageBalloon(page);
136 SetPagePrivate(page); 127 SetPagePrivate(page);
137 page->mapping = mapping; 128 set_page_private(page, (unsigned long)balloon);
138 list_add(&page->lru, head); 129 list_add(&page->lru, &balloon->pages);
139} 130}
140 131
141/* 132/*
142 * balloon_page_delete - delete a page from balloon's page list and clear 133 * balloon_page_delete - delete a page from balloon's page list and clear
143 * the page->mapping assignement accordingly. 134 * the page->private assignement accordingly.
144 * @page : page to be released from balloon's page list 135 * @page : page to be released from balloon's page list
145 * 136 *
146 * Caller must ensure the page is locked and the spin_lock protecting balloon 137 * Caller must ensure the page is locked and the spin_lock protecting balloon
@@ -149,7 +140,7 @@ static inline void balloon_page_insert(struct page *page,
149static inline void balloon_page_delete(struct page *page) 140static inline void balloon_page_delete(struct page *page)
150{ 141{
151 __ClearPageBalloon(page); 142 __ClearPageBalloon(page);
152 page->mapping = NULL; 143 set_page_private(page, 0);
153 if (PagePrivate(page)) { 144 if (PagePrivate(page)) {
154 ClearPagePrivate(page); 145 ClearPagePrivate(page);
155 list_del(&page->lru); 146 list_del(&page->lru);
@@ -162,11 +153,7 @@ static inline void balloon_page_delete(struct page *page)
162 */ 153 */
163static inline struct balloon_dev_info *balloon_page_device(struct page *page) 154static inline struct balloon_dev_info *balloon_page_device(struct page *page)
164{ 155{
165 struct address_space *mapping = page->mapping; 156 return (struct balloon_dev_info *)page_private(page);
166 if (likely(mapping))
167 return mapping->private_data;
168
169 return NULL;
170} 157}
171 158
172static inline gfp_t balloon_mapping_gfp_mask(void) 159static inline gfp_t balloon_mapping_gfp_mask(void)
@@ -174,29 +161,12 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
174 return GFP_HIGHUSER_MOVABLE; 161 return GFP_HIGHUSER_MOVABLE;
175} 162}
176 163
177static inline bool balloon_compaction_check(void)
178{
179 return true;
180}
181
182#else /* !CONFIG_BALLOON_COMPACTION */ 164#else /* !CONFIG_BALLOON_COMPACTION */
183 165
184static inline void *balloon_mapping_alloc(void *balloon_device, 166static inline void balloon_page_insert(struct balloon_dev_info *balloon,
185 const struct address_space_operations *a_ops) 167 struct page *page)
186{ 168{
187 return ERR_PTR(-EOPNOTSUPP); 169 list_add(&page->lru, &balloon->pages);
188}
189
190static inline void balloon_mapping_free(struct address_space *balloon_mapping)
191{
192 return;
193}
194
195static inline void balloon_page_insert(struct page *page,
196 struct address_space *mapping,
197 struct list_head *head)
198{
199 list_add(&page->lru, head);
200} 170}
201 171
202static inline void balloon_page_delete(struct page *page) 172static inline void balloon_page_delete(struct page *page)
@@ -240,9 +210,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
240 return GFP_HIGHUSER; 210 return GFP_HIGHUSER;
241} 211}
242 212
243static inline bool balloon_compaction_check(void)
244{
245 return false;
246}
247#endif /* CONFIG_BALLOON_COMPACTION */ 213#endif /* CONFIG_BALLOON_COMPACTION */
248#endif /* _LINUX_BALLOON_COMPACTION_H */ 214#endif /* _LINUX_BALLOON_COMPACTION_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 19191d39c4f3..7ea069cd3257 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -24,8 +24,7 @@ enum mapping_flags {
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ 24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ 25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ 26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27 AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ 27 AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
28 AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */
29}; 28};
30 29
31static inline void mapping_set_error(struct address_space *mapping, int error) 30static inline void mapping_set_error(struct address_space *mapping, int error)
@@ -55,21 +54,6 @@ static inline int mapping_unevictable(struct address_space *mapping)
55 return !!mapping; 54 return !!mapping;
56} 55}
57 56
58static inline void mapping_set_balloon(struct address_space *mapping)
59{
60 set_bit(AS_BALLOON_MAP, &mapping->flags);
61}
62
63static inline void mapping_clear_balloon(struct address_space *mapping)
64{
65 clear_bit(AS_BALLOON_MAP, &mapping->flags);
66}
67
68static inline int mapping_balloon(struct address_space *mapping)
69{
70 return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags);
71}
72
73static inline void mapping_set_exiting(struct address_space *mapping) 57static inline void mapping_set_exiting(struct address_space *mapping)
74{ 58{
75 set_bit(AS_EXITING, &mapping->flags); 59 set_bit(AS_EXITING, &mapping->flags);
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 52abeeb3cb9d..3afdabdbc0a4 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -11,32 +11,6 @@
11#include <linux/balloon_compaction.h> 11#include <linux/balloon_compaction.h>
12 12
13/* 13/*
14 * balloon_devinfo_alloc - allocates a balloon device information descriptor.
15 * @balloon_dev_descriptor: pointer to reference the balloon device which
16 * this struct balloon_dev_info will be servicing.
17 *
18 * Driver must call it to properly allocate and initialize an instance of
19 * struct balloon_dev_info which will be used to reference a balloon device
20 * as well as to keep track of the balloon device page list.
21 */
22struct balloon_dev_info *balloon_devinfo_alloc(void *balloon_dev_descriptor)
23{
24 struct balloon_dev_info *b_dev_info;
25 b_dev_info = kmalloc(sizeof(*b_dev_info), GFP_KERNEL);
26 if (!b_dev_info)
27 return ERR_PTR(-ENOMEM);
28
29 b_dev_info->balloon_device = balloon_dev_descriptor;
30 b_dev_info->mapping = NULL;
31 b_dev_info->isolated_pages = 0;
32 spin_lock_init(&b_dev_info->pages_lock);
33 INIT_LIST_HEAD(&b_dev_info->pages);
34
35 return b_dev_info;
36}
37EXPORT_SYMBOL_GPL(balloon_devinfo_alloc);
38
39/*
40 * balloon_page_enqueue - allocates a new page and inserts it into the balloon 14 * balloon_page_enqueue - allocates a new page and inserts it into the balloon
41 * page list. 15 * page list.
42 * @b_dev_info: balloon device decriptor where we will insert a new page to 16 * @b_dev_info: balloon device decriptor where we will insert a new page to
@@ -61,7 +35,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
61 */ 35 */
62 BUG_ON(!trylock_page(page)); 36 BUG_ON(!trylock_page(page));
63 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 37 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
64 balloon_page_insert(page, b_dev_info->mapping, &b_dev_info->pages); 38 balloon_page_insert(b_dev_info, page);
65 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 39 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
66 unlock_page(page); 40 unlock_page(page);
67 return page; 41 return page;
@@ -127,60 +101,10 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
127EXPORT_SYMBOL_GPL(balloon_page_dequeue); 101EXPORT_SYMBOL_GPL(balloon_page_dequeue);
128 102
129#ifdef CONFIG_BALLOON_COMPACTION 103#ifdef CONFIG_BALLOON_COMPACTION
130/*
131 * balloon_mapping_alloc - allocates a special ->mapping for ballooned pages.
132 * @b_dev_info: holds the balloon device information descriptor.
133 * @a_ops: balloon_mapping address_space_operations descriptor.
134 *
135 * Driver must call it to properly allocate and initialize an instance of
136 * struct address_space which will be used as the special page->mapping for
137 * balloon device enlisted page instances.
138 */
139struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info,
140 const struct address_space_operations *a_ops)
141{
142 struct address_space *mapping;
143
144 mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
145 if (!mapping)
146 return ERR_PTR(-ENOMEM);
147
148 /*
149 * Give a clean 'zeroed' status to all elements of this special
150 * balloon page->mapping struct address_space instance.
151 */
152 address_space_init_once(mapping);
153
154 /*
155 * Set mapping->flags appropriately, to allow balloon pages
156 * ->mapping identification.
157 */
158 mapping_set_balloon(mapping);
159 mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask());
160
161 /* balloon's page->mapping->a_ops callback descriptor */
162 mapping->a_ops = a_ops;
163
164 /*
165 * Establish a pointer reference back to the balloon device descriptor
166 * this particular page->mapping will be servicing.
167 * This is used by compaction / migration procedures to identify and
168 * access the balloon device pageset while isolating / migrating pages.
169 *
170 * As some balloon drivers can register multiple balloon devices
171 * for a single guest, this also helps compaction / migration to
172 * properly deal with multiple balloon pagesets, when required.
173 */
174 mapping->private_data = b_dev_info;
175 b_dev_info->mapping = mapping;
176
177 return mapping;
178}
179EXPORT_SYMBOL_GPL(balloon_mapping_alloc);
180 104
181static inline void __isolate_balloon_page(struct page *page) 105static inline void __isolate_balloon_page(struct page *page)
182{ 106{
183 struct balloon_dev_info *b_dev_info = page->mapping->private_data; 107 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
184 unsigned long flags; 108 unsigned long flags;
185 109
186 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 110 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
@@ -192,7 +116,7 @@ static inline void __isolate_balloon_page(struct page *page)
192 116
193static inline void __putback_balloon_page(struct page *page) 117static inline void __putback_balloon_page(struct page *page)
194{ 118{
195 struct balloon_dev_info *b_dev_info = page->mapping->private_data; 119 struct balloon_dev_info *b_dev_info = balloon_page_device(page);
196 unsigned long flags; 120 unsigned long flags;
197 121
198 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 122 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
@@ -202,12 +126,6 @@ static inline void __putback_balloon_page(struct page *page)
202 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 126 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
203} 127}
204 128
205static inline int __migrate_balloon_page(struct address_space *mapping,
206 struct page *newpage, struct page *page, enum migrate_mode mode)
207{
208 return page->mapping->a_ops->migratepage(mapping, newpage, page, mode);
209}
210
211/* __isolate_lru_page() counterpart for a ballooned page */ 129/* __isolate_lru_page() counterpart for a ballooned page */
212bool balloon_page_isolate(struct page *page) 130bool balloon_page_isolate(struct page *page)
213{ 131{
@@ -274,7 +192,7 @@ void balloon_page_putback(struct page *page)
274int balloon_page_migrate(struct page *newpage, 192int balloon_page_migrate(struct page *newpage,
275 struct page *page, enum migrate_mode mode) 193 struct page *page, enum migrate_mode mode)
276{ 194{
277 struct address_space *mapping; 195 struct balloon_dev_info *balloon = balloon_page_device(page);
278 int rc = -EAGAIN; 196 int rc = -EAGAIN;
279 197
280 /* 198 /*
@@ -290,9 +208,8 @@ int balloon_page_migrate(struct page *newpage,
290 return rc; 208 return rc;
291 } 209 }
292 210
293 mapping = page->mapping; 211 if (balloon && balloon->migratepage)
294 if (mapping) 212 rc = balloon->migratepage(balloon, newpage, page, mode);
295 rc = __migrate_balloon_page(mapping, newpage, page, mode);
296 213
297 unlock_page(newpage); 214 unlock_page(newpage);
298 return rc; 215 return rc;