aboutsummaryrefslogtreecommitdiffstats
path: root/mm/balloon_compaction.c
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <k.khlebnikov@samsung.com>2014-10-09 18:29:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:26:01 -0400
commitd6d86c0a7f8ddc5b38cf089222cb1d9540762dc2 (patch)
tree839bdb8072211f053a800c25aab3e3da1d30c9ed /mm/balloon_compaction.c
parent29e5694054149acd25b0d5538c95fb6d64478315 (diff)
mm/balloon_compaction: redesign ballooned pages management
Sasha Levin reported KASAN splash inside isolate_migratepages_range(). Problem is in the function __is_movable_balloon_page() which tests AS_BALLOON_MAP in page->mapping->flags. This function has no protection against anonymous pages. As result it tried to check address space flags inside struct anon_vma. Further investigation shows more problems in current implementation: * Special branch in __unmap_and_move() never works: balloon_page_movable() checks page flags and page_count. In __unmap_and_move() page is locked, reference counter is elevated, thus balloon_page_movable() always fails. As a result execution goes to the normal migration path. virtballoon_migratepage() returns MIGRATEPAGE_BALLOON_SUCCESS instead of MIGRATEPAGE_SUCCESS, move_to_new_page() thinks this is an error code and assigns newpage->mapping to NULL. Newly migrated page lose connectivity with balloon an all ability for further migration. * lru_lock erroneously required in isolate_migratepages_range() for isolation ballooned page. This function releases lru_lock periodically, this makes migration mostly impossible for some pages. * balloon_page_dequeue have a tight race with balloon_page_isolate: balloon_page_isolate could be executed in parallel with dequeue between picking page from list and locking page_lock. Race is rare because they use trylock_page() for locking. This patch fixes all of them. Instead of fake mapping with special flag this patch uses special state of page->_mapcount: PAGE_BALLOON_MAPCOUNT_VALUE = -256. Buddy allocator uses PAGE_BUDDY_MAPCOUNT_VALUE = -128 for similar purpose. Storing mark directly in struct page makes everything safer and easier. PagePrivate is used to mark pages present in page list (i.e. not isolated, like PageLRU for normal pages). It replaces special rules for reference counter and makes balloon migration similar to migration of normal pages. This flag is protected by page_lock together with link to the balloon device. Signed-off-by: Konstantin Khlebnikov <k.khlebnikov@samsung.com> Reported-by: Sasha Levin <sasha.levin@oracle.com> Link: http://lkml.kernel.org/p/53E6CEAA.9020105@oracle.com Cc: Rafael Aquini <aquini@redhat.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: <stable@vger.kernel.org> [3.8+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/balloon_compaction.c')
-rw-r--r--mm/balloon_compaction.c26
1 files changed, 12 insertions, 14 deletions
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 6e45a5074bf0..52abeeb3cb9d 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -93,17 +93,12 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
93 * to be released by the balloon driver. 93 * to be released by the balloon driver.
94 */ 94 */
95 if (trylock_page(page)) { 95 if (trylock_page(page)) {
96 if (!PagePrivate(page)) {
97 /* raced with isolation */
98 unlock_page(page);
99 continue;
100 }
96 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 101 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
97 /*
98 * Raise the page refcount here to prevent any wrong
99 * attempt to isolate this page, in case of coliding
100 * with balloon_page_isolate() just after we release
101 * the page lock.
102 *
103 * balloon_page_free() will take care of dropping
104 * this extra refcount later.
105 */
106 get_page(page);
107 balloon_page_delete(page); 102 balloon_page_delete(page);
108 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 103 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
109 unlock_page(page); 104 unlock_page(page);
@@ -187,7 +182,9 @@ static inline void __isolate_balloon_page(struct page *page)
187{ 182{
188 struct balloon_dev_info *b_dev_info = page->mapping->private_data; 183 struct balloon_dev_info *b_dev_info = page->mapping->private_data;
189 unsigned long flags; 184 unsigned long flags;
185
190 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 186 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
187 ClearPagePrivate(page);
191 list_del(&page->lru); 188 list_del(&page->lru);
192 b_dev_info->isolated_pages++; 189 b_dev_info->isolated_pages++;
193 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 190 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
@@ -197,7 +194,9 @@ static inline void __putback_balloon_page(struct page *page)
197{ 194{
198 struct balloon_dev_info *b_dev_info = page->mapping->private_data; 195 struct balloon_dev_info *b_dev_info = page->mapping->private_data;
199 unsigned long flags; 196 unsigned long flags;
197
200 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 198 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
199 SetPagePrivate(page);
201 list_add(&page->lru, &b_dev_info->pages); 200 list_add(&page->lru, &b_dev_info->pages);
202 b_dev_info->isolated_pages--; 201 b_dev_info->isolated_pages--;
203 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 202 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
@@ -235,12 +234,11 @@ bool balloon_page_isolate(struct page *page)
235 */ 234 */
236 if (likely(trylock_page(page))) { 235 if (likely(trylock_page(page))) {
237 /* 236 /*
238 * A ballooned page, by default, has just one refcount. 237 * A ballooned page, by default, has PagePrivate set.
239 * Prevent concurrent compaction threads from isolating 238 * Prevent concurrent compaction threads from isolating
240 * an already isolated balloon page by refcount check. 239 * an already isolated balloon page by clearing it.
241 */ 240 */
242 if (__is_movable_balloon_page(page) && 241 if (balloon_page_movable(page)) {
243 page_count(page) == 2) {
244 __isolate_balloon_page(page); 242 __isolate_balloon_page(page);
245 unlock_page(page); 243 unlock_page(page);
246 return true; 244 return true;