aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/virtio/virtio_balloon.c
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <k.khlebnikov@samsung.com>2014-10-09 18:29:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:26:01 -0400
commitd6d86c0a7f8ddc5b38cf089222cb1d9540762dc2 (patch)
tree839bdb8072211f053a800c25aab3e3da1d30c9ed /drivers/virtio/virtio_balloon.c
parent29e5694054149acd25b0d5538c95fb6d64478315 (diff)
mm/balloon_compaction: redesign ballooned pages management
Sasha Levin reported KASAN splash inside isolate_migratepages_range(). Problem is in the function __is_movable_balloon_page() which tests AS_BALLOON_MAP in page->mapping->flags. This function has no protection against anonymous pages. As result it tried to check address space flags inside struct anon_vma. Further investigation shows more problems in current implementation: * Special branch in __unmap_and_move() never works: balloon_page_movable() checks page flags and page_count. In __unmap_and_move() page is locked, reference counter is elevated, thus balloon_page_movable() always fails. As a result execution goes to the normal migration path. virtballoon_migratepage() returns MIGRATEPAGE_BALLOON_SUCCESS instead of MIGRATEPAGE_SUCCESS, move_to_new_page() thinks this is an error code and assigns newpage->mapping to NULL. Newly migrated page lose connectivity with balloon an all ability for further migration. * lru_lock erroneously required in isolate_migratepages_range() for isolation ballooned page. This function releases lru_lock periodically, this makes migration mostly impossible for some pages. * balloon_page_dequeue have a tight race with balloon_page_isolate: balloon_page_isolate could be executed in parallel with dequeue between picking page from list and locking page_lock. Race is rare because they use trylock_page() for locking. This patch fixes all of them. Instead of fake mapping with special flag this patch uses special state of page->_mapcount: PAGE_BALLOON_MAPCOUNT_VALUE = -256. Buddy allocator uses PAGE_BUDDY_MAPCOUNT_VALUE = -128 for similar purpose. Storing mark directly in struct page makes everything safer and easier. PagePrivate is used to mark pages present in page list (i.e. not isolated, like PageLRU for normal pages). It replaces special rules for reference counter and makes balloon migration similar to migration of normal pages. This flag is protected by page_lock together with link to the balloon device. Signed-off-by: Konstantin Khlebnikov <k.khlebnikov@samsung.com> Reported-by: Sasha Levin <sasha.levin@oracle.com> Link: http://lkml.kernel.org/p/53E6CEAA.9020105@oracle.com Cc: Rafael Aquini <aquini@redhat.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: <stable@vger.kernel.org> [3.8+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/virtio/virtio_balloon.c')
-rw-r--r--drivers/virtio/virtio_balloon.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 25ebe8eecdb7..c3eb93fc9261 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -163,8 +163,8 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
163 /* Find pfns pointing at start of each page, get pages and free them. */ 163 /* Find pfns pointing at start of each page, get pages and free them. */
164 for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { 164 for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
165 struct page *page = balloon_pfn_to_page(pfns[i]); 165 struct page *page = balloon_pfn_to_page(pfns[i]);
166 balloon_page_free(page);
167 adjust_managed_page_count(page, 1); 166 adjust_managed_page_count(page, 1);
167 put_page(page); /* balloon reference */
168 } 168 }
169} 169}
170 170
@@ -395,6 +395,8 @@ static int virtballoon_migratepage(struct address_space *mapping,
395 if (!mutex_trylock(&vb->balloon_lock)) 395 if (!mutex_trylock(&vb->balloon_lock))
396 return -EAGAIN; 396 return -EAGAIN;
397 397
398 get_page(newpage); /* balloon reference */
399
398 /* balloon's page migration 1st step -- inflate "newpage" */ 400 /* balloon's page migration 1st step -- inflate "newpage" */
399 spin_lock_irqsave(&vb_dev_info->pages_lock, flags); 401 spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
400 balloon_page_insert(newpage, mapping, &vb_dev_info->pages); 402 balloon_page_insert(newpage, mapping, &vb_dev_info->pages);
@@ -404,12 +406,7 @@ static int virtballoon_migratepage(struct address_space *mapping,
404 set_page_pfns(vb->pfns, newpage); 406 set_page_pfns(vb->pfns, newpage);
405 tell_host(vb, vb->inflate_vq); 407 tell_host(vb, vb->inflate_vq);
406 408
407 /* 409 /* balloon's page migration 2nd step -- deflate "page" */
408 * balloon's page migration 2nd step -- deflate "page"
409 *
410 * It's safe to delete page->lru here because this page is at
411 * an isolated migration list, and this step is expected to happen here
412 */
413 balloon_page_delete(page); 410 balloon_page_delete(page);
414 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 411 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
415 set_page_pfns(vb->pfns, page); 412 set_page_pfns(vb->pfns, page);
@@ -417,7 +414,9 @@ static int virtballoon_migratepage(struct address_space *mapping,
417 414
418 mutex_unlock(&vb->balloon_lock); 415 mutex_unlock(&vb->balloon_lock);
419 416
420 return MIGRATEPAGE_BALLOON_SUCCESS; 417 put_page(page); /* balloon reference */
418
419 return MIGRATEPAGE_SUCCESS;
421} 420}
422 421
423/* define the balloon_mapping->a_ops callback to allow balloon page migration */ 422/* define the balloon_mapping->a_ops callback to allow balloon page migration */