aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c37
1 files changed, 26 insertions, 11 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 860ec211ddd6..4298abaae153 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -990,7 +990,7 @@ struct page *follow_trans_huge_pmd(struct mm_struct *mm,
990 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; 990 page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
991 VM_BUG_ON(!PageCompound(page)); 991 VM_BUG_ON(!PageCompound(page));
992 if (flags & FOLL_GET) 992 if (flags & FOLL_GET)
993 get_page(page); 993 get_page_foll(page);
994 994
995out: 995out:
996 return page; 996 return page;
@@ -1202,6 +1202,7 @@ static void __split_huge_page_refcount(struct page *page)
1202 unsigned long head_index = page->index; 1202 unsigned long head_index = page->index;
1203 struct zone *zone = page_zone(page); 1203 struct zone *zone = page_zone(page);
1204 int zonestat; 1204 int zonestat;
1205 int tail_count = 0;
1205 1206
1206 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1207 /* prevent PageLRU to go away from under us, and freeze lru stats */
1207 spin_lock_irq(&zone->lru_lock); 1208 spin_lock_irq(&zone->lru_lock);
@@ -1210,11 +1211,27 @@ static void __split_huge_page_refcount(struct page *page)
1210 for (i = 1; i < HPAGE_PMD_NR; i++) { 1211 for (i = 1; i < HPAGE_PMD_NR; i++) {
1211 struct page *page_tail = page + i; 1212 struct page *page_tail = page + i;
1212 1213
1213 /* tail_page->_count cannot change */ 1214 /* tail_page->_mapcount cannot change */
1214 atomic_sub(atomic_read(&page_tail->_count), &page->_count); 1215 BUG_ON(page_mapcount(page_tail) < 0);
1215 BUG_ON(page_count(page) <= 0); 1216 tail_count += page_mapcount(page_tail);
1216 atomic_add(page_mapcount(page) + 1, &page_tail->_count); 1217 /* check for overflow */
1217 BUG_ON(atomic_read(&page_tail->_count) <= 0); 1218 BUG_ON(tail_count < 0);
1219 BUG_ON(atomic_read(&page_tail->_count) != 0);
1220 /*
1221 * tail_page->_count is zero and not changing from
1222 * under us. But get_page_unless_zero() may be running
1223 * from under us on the tail_page. If we used
1224 * atomic_set() below instead of atomic_add(), we
1225 * would then run atomic_set() concurrently with
1226 * get_page_unless_zero(), and atomic_set() is
1227 * implemented in C not using locked ops. spin_unlock
1228 * on x86 sometime uses locked ops because of PPro
1229 * errata 66, 92, so unless somebody can guarantee
1230 * atomic_set() here would be safe on all archs (and
1231 * not only on x86), it's safer to use atomic_add().
1232 */
1233 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1234 &page_tail->_count);
1218 1235
1219 /* after clearing PageTail the gup refcount can be released */ 1236 /* after clearing PageTail the gup refcount can be released */
1220 smp_mb(); 1237 smp_mb();
@@ -1232,10 +1249,7 @@ static void __split_huge_page_refcount(struct page *page)
1232 (1L << PG_uptodate))); 1249 (1L << PG_uptodate)));
1233 page_tail->flags |= (1L << PG_dirty); 1250 page_tail->flags |= (1L << PG_dirty);
1234 1251
1235 /* 1252 /* clear PageTail before overwriting first_page */
1236 * 1) clear PageTail before overwriting first_page
1237 * 2) clear PageTail before clearing PageHead for VM_BUG_ON
1238 */
1239 smp_wmb(); 1253 smp_wmb();
1240 1254
1241 /* 1255 /*
@@ -1252,7 +1266,6 @@ static void __split_huge_page_refcount(struct page *page)
1252 * status is achieved setting a reserved bit in the 1266 * status is achieved setting a reserved bit in the
1253 * pmd, not by clearing the present bit. 1267 * pmd, not by clearing the present bit.
1254 */ 1268 */
1255 BUG_ON(page_mapcount(page_tail));
1256 page_tail->_mapcount = page->_mapcount; 1269 page_tail->_mapcount = page->_mapcount;
1257 1270
1258 BUG_ON(page_tail->mapping); 1271 BUG_ON(page_tail->mapping);
@@ -1269,6 +1282,8 @@ static void __split_huge_page_refcount(struct page *page)
1269 1282
1270 lru_add_page_tail(zone, page, page_tail); 1283 lru_add_page_tail(zone, page, page_tail);
1271 } 1284 }
1285 atomic_sub(tail_count, &page->_count);
1286 BUG_ON(atomic_read(&page->_count) <= 0);
1272 1287
1273 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1288 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1274 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 1289 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);