summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c29
-rw-r--r--mm/debug.c4
-rw-r--r--mm/kasan/kasan.h5
-rw-r--r--mm/memory.c11
-rw-r--r--mm/memory_hotplug.c19
-rw-r--r--mm/mempolicy.c40
-rw-r--r--mm/migrate.c11
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/page_isolation.c51
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c2
-rw-r--r--mm/slub.c5
-rw-r--r--mm/sparse.c2
14 files changed, 129 insertions, 57 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index f171a83707ce..3319e0872d01 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
242 bool check_target) 242 bool check_target)
243{ 243{
244 struct page *page = pfn_to_online_page(pfn); 244 struct page *page = pfn_to_online_page(pfn);
245 struct page *block_page;
245 struct page *end_page; 246 struct page *end_page;
246 unsigned long block_pfn; 247 unsigned long block_pfn;
247 248
@@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
267 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 268 get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
268 return false; 269 return false;
269 270
271 /* Ensure the start of the pageblock or zone is online and valid */
272 block_pfn = pageblock_start_pfn(pfn);
273 block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
274 if (block_page) {
275 page = block_page;
276 pfn = block_pfn;
277 }
278
279 /* Ensure the end of the pageblock or zone is online and valid */
280 block_pfn += pageblock_nr_pages;
281 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
282 end_page = pfn_to_online_page(block_pfn);
283 if (!end_page)
284 return false;
285
270 /* 286 /*
271 * Only clear the hint if a sample indicates there is either a 287 * Only clear the hint if a sample indicates there is either a
272 * free page or an LRU page in the block. One or other condition 288 * free page or an LRU page in the block. One or other condition
273 * is necessary for the block to be a migration source/target. 289 * is necessary for the block to be a migration source/target.
274 */ 290 */
275 block_pfn = pageblock_start_pfn(pfn);
276 pfn = max(block_pfn, zone->zone_start_pfn);
277 page = pfn_to_page(pfn);
278 if (zone != page_zone(page))
279 return false;
280 pfn = block_pfn + pageblock_nr_pages;
281 pfn = min(pfn, zone_end_pfn(zone));
282 end_page = pfn_to_page(pfn);
283
284 do { 291 do {
285 if (pfn_valid_within(pfn)) { 292 if (pfn_valid_within(pfn)) {
286 if (check_source && PageLRU(page)) { 293 if (check_source && PageLRU(page)) {
@@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
309static void __reset_isolation_suitable(struct zone *zone) 316static void __reset_isolation_suitable(struct zone *zone)
310{ 317{
311 unsigned long migrate_pfn = zone->zone_start_pfn; 318 unsigned long migrate_pfn = zone->zone_start_pfn;
312 unsigned long free_pfn = zone_end_pfn(zone); 319 unsigned long free_pfn = zone_end_pfn(zone) - 1;
313 unsigned long reset_migrate = free_pfn; 320 unsigned long reset_migrate = free_pfn;
314 unsigned long reset_free = migrate_pfn; 321 unsigned long reset_free = migrate_pfn;
315 bool source_set = false; 322 bool source_set = false;
@@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc)
1363 count_compact_events(COMPACTISOLATED, nr_isolated); 1370 count_compact_events(COMPACTISOLATED, nr_isolated);
1364 } else { 1371 } else {
1365 /* If isolation fails, abort the search */ 1372 /* If isolation fails, abort the search */
1366 order = -1; 1373 order = cc->search_order + 1;
1367 page = NULL; 1374 page = NULL;
1368 } 1375 }
1369 } 1376 }
diff --git a/mm/debug.c b/mm/debug.c
index c0b31b6c3877..eee9c221280c 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
79 pr_warn("ksm "); 79 pr_warn("ksm ");
80 else if (mapping) { 80 else if (mapping) {
81 pr_warn("%ps ", mapping->a_ops); 81 pr_warn("%ps ", mapping->a_ops);
82 if (mapping->host->i_dentry.first) { 82 if (mapping->host && mapping->host->i_dentry.first) {
83 struct dentry *dentry; 83 struct dentry *dentry;
84 dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); 84 dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
85 pr_warn("name:\"%pd\" ", dentry); 85 pr_warn("name:\"%pd\" ", dentry);
@@ -168,7 +168,7 @@ void dump_mm(const struct mm_struct *mm)
168 mm_pgtables_bytes(mm), 168 mm_pgtables_bytes(mm),
169 mm->map_count, 169 mm->map_count,
170 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, 170 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
171 atomic64_read(&mm->pinned_vm), 171 (u64)atomic64_read(&mm->pinned_vm),
172 mm->data_vm, mm->exec_vm, mm->stack_vm, 172 mm->data_vm, mm->exec_vm, mm->stack_vm,
173 mm->start_code, mm->end_code, mm->start_data, mm->end_data, 173 mm->start_code, mm->end_code, mm->start_data, mm->end_data,
174 mm->start_brk, mm->brk, mm->start_stack, 174 mm->start_brk, mm->brk, mm->start_stack,
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 3e0c11f7d7a1..3ce956efa0cb 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -163,7 +163,10 @@ static inline u8 random_tag(void)
163#endif 163#endif
164 164
165#ifndef arch_kasan_set_tag 165#ifndef arch_kasan_set_tag
166#define arch_kasan_set_tag(addr, tag) ((void *)(addr)) 166static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
167{
168 return addr;
169}
167#endif 170#endif
168#ifndef arch_kasan_reset_tag 171#ifndef arch_kasan_reset_tag
169#define arch_kasan_reset_tag(addr) ((void *)(addr)) 172#define arch_kasan_reset_tag(addr) ((void *)(addr))
diff --git a/mm/memory.c b/mm/memory.c
index 47fe250307c7..ab650c21bccd 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1549,10 +1549,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1549 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); 1549 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
1550 goto out_unlock; 1550 goto out_unlock;
1551 } 1551 }
1552 entry = *pte; 1552 entry = pte_mkyoung(*pte);
1553 goto out_mkwrite; 1553 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1554 } else 1554 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
1555 goto out_unlock; 1555 update_mmu_cache(vma, addr, pte);
1556 }
1557 goto out_unlock;
1556 } 1558 }
1557 1559
1558 /* Ok, finally just insert the thing.. */ 1560 /* Ok, finally just insert the thing.. */
@@ -1561,7 +1563,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1561 else 1563 else
1562 entry = pte_mkspecial(pfn_t_pte(pfn, prot)); 1564 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
1563 1565
1564out_mkwrite:
1565 if (mkwrite) { 1566 if (mkwrite) {
1566 entry = pte_mkyoung(entry); 1567 entry = pte_mkyoung(entry);
1567 entry = maybe_mkwrite(pte_mkdirty(entry), vma); 1568 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index f767582af4f8..0082d699be94 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1576,7 +1576,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
1576{ 1576{
1577 unsigned long pfn, nr_pages; 1577 unsigned long pfn, nr_pages;
1578 long offlined_pages; 1578 long offlined_pages;
1579 int ret, node; 1579 int ret, node, nr_isolate_pageblock;
1580 unsigned long flags; 1580 unsigned long flags;
1581 unsigned long valid_start, valid_end; 1581 unsigned long valid_start, valid_end;
1582 struct zone *zone; 1582 struct zone *zone;
@@ -1602,10 +1602,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
1602 ret = start_isolate_page_range(start_pfn, end_pfn, 1602 ret = start_isolate_page_range(start_pfn, end_pfn,
1603 MIGRATE_MOVABLE, 1603 MIGRATE_MOVABLE,
1604 SKIP_HWPOISON | REPORT_FAILURE); 1604 SKIP_HWPOISON | REPORT_FAILURE);
1605 if (ret) { 1605 if (ret < 0) {
1606 reason = "failure to isolate range"; 1606 reason = "failure to isolate range";
1607 goto failed_removal; 1607 goto failed_removal;
1608 } 1608 }
1609 nr_isolate_pageblock = ret;
1609 1610
1610 arg.start_pfn = start_pfn; 1611 arg.start_pfn = start_pfn;
1611 arg.nr_pages = nr_pages; 1612 arg.nr_pages = nr_pages;
@@ -1657,8 +1658,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
1657 /* Ok, all of our target is isolated. 1658 /* Ok, all of our target is isolated.
1658 We cannot do rollback at this point. */ 1659 We cannot do rollback at this point. */
1659 offline_isolated_pages(start_pfn, end_pfn); 1660 offline_isolated_pages(start_pfn, end_pfn);
1660 /* reset pagetype flags and makes migrate type to be MOVABLE */ 1661
1661 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1662 /*
1663 * Onlining will reset pagetype flags and makes migrate type
1664 * MOVABLE, so just need to decrease the number of isolated
1665 * pageblocks zone counter here.
1666 */
1667 spin_lock_irqsave(&zone->lock, flags);
1668 zone->nr_isolate_pageblock -= nr_isolate_pageblock;
1669 spin_unlock_irqrestore(&zone->lock, flags);
1670
1662 /* removal success */ 1671 /* removal success */
1663 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages); 1672 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
1664 zone->present_pages -= offlined_pages; 1673 zone->present_pages -= offlined_pages;
@@ -1690,12 +1699,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
1690 1699
1691failed_removal_isolated: 1700failed_removal_isolated:
1692 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE); 1701 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1702 memory_notify(MEM_CANCEL_OFFLINE, &arg);
1693failed_removal: 1703failed_removal:
1694 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n", 1704 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
1695 (unsigned long long) start_pfn << PAGE_SHIFT, 1705 (unsigned long long) start_pfn << PAGE_SHIFT,
1696 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1, 1706 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
1697 reason); 1707 reason);
1698 memory_notify(MEM_CANCEL_OFFLINE, &arg);
1699 /* pushback to free area */ 1708 /* pushback to free area */
1700 mem_hotplug_done(); 1709 mem_hotplug_done();
1701 return ret; 1710 return ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index af171ccb56a2..2219e747df49 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT); 428 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
429} 429}
430 430
431/*
432 * queue_pages_pmd() has three possible return values:
433 * 1 - pages are placed on the right node or queued successfully.
434 * 0 - THP was split.
435 * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
436 * page was already on a node that does not follow the policy.
437 */
431static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, 438static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
432 unsigned long end, struct mm_walk *walk) 439 unsigned long end, struct mm_walk *walk)
433{ 440{
@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
437 unsigned long flags; 444 unsigned long flags;
438 445
439 if (unlikely(is_pmd_migration_entry(*pmd))) { 446 if (unlikely(is_pmd_migration_entry(*pmd))) {
440 ret = 1; 447 ret = -EIO;
441 goto unlock; 448 goto unlock;
442 } 449 }
443 page = pmd_page(*pmd); 450 page = pmd_page(*pmd);
@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
454 ret = 1; 461 ret = 1;
455 flags = qp->flags; 462 flags = qp->flags;
456 /* go to thp migration */ 463 /* go to thp migration */
457 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 464 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
465 if (!vma_migratable(walk->vma)) {
466 ret = -EIO;
467 goto unlock;
468 }
469
458 migrate_page_add(page, qp->pagelist, flags); 470 migrate_page_add(page, qp->pagelist, flags);
471 } else
472 ret = -EIO;
459unlock: 473unlock:
460 spin_unlock(ptl); 474 spin_unlock(ptl);
461out: 475out:
@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
480 ptl = pmd_trans_huge_lock(pmd, vma); 494 ptl = pmd_trans_huge_lock(pmd, vma);
481 if (ptl) { 495 if (ptl) {
482 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); 496 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
483 if (ret) 497 if (ret > 0)
484 return 0; 498 return 0;
499 else if (ret < 0)
500 return ret;
485 } 501 }
486 502
487 if (pmd_trans_unstable(pmd)) 503 if (pmd_trans_unstable(pmd))
@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
502 continue; 518 continue;
503 if (!queue_pages_required(page, qp)) 519 if (!queue_pages_required(page, qp))
504 continue; 520 continue;
505 migrate_page_add(page, qp->pagelist, flags); 521 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
522 if (!vma_migratable(vma))
523 break;
524 migrate_page_add(page, qp->pagelist, flags);
525 } else
526 break;
506 } 527 }
507 pte_unmap_unlock(pte - 1, ptl); 528 pte_unmap_unlock(pte - 1, ptl);
508 cond_resched(); 529 cond_resched();
509 return 0; 530 return addr != end ? -EIO : 0;
510} 531}
511 532
512static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask, 533static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
576 unsigned long endvma = vma->vm_end; 597 unsigned long endvma = vma->vm_end;
577 unsigned long flags = qp->flags; 598 unsigned long flags = qp->flags;
578 599
579 if (!vma_migratable(vma)) 600 /*
601 * Need check MPOL_MF_STRICT to return -EIO if possible
602 * regardless of vma_migratable
603 */
604 if (!vma_migratable(vma) &&
605 !(flags & MPOL_MF_STRICT))
580 return 1; 606 return 1;
581 607
582 if (endvma > end) 608 if (endvma > end)
@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
603 } 629 }
604 630
605 /* queue pages from current vma */ 631 /* queue pages from current vma */
606 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) 632 if (flags & MPOL_MF_VALID)
607 return 0; 633 return 0;
608 return 1; 634 return 1;
609} 635}
diff --git a/mm/migrate.c b/mm/migrate.c
index ac6f4939bb59..663a5449367a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
248 pte = swp_entry_to_pte(entry); 248 pte = swp_entry_to_pte(entry);
249 } else if (is_device_public_page(new)) { 249 } else if (is_device_public_page(new)) {
250 pte = pte_mkdevmap(pte); 250 pte = pte_mkdevmap(pte);
251 flush_dcache_page(new);
252 } 251 }
253 } else 252 }
254 flush_dcache_page(new);
255 253
256#ifdef CONFIG_HUGETLB_PAGE 254#ifdef CONFIG_HUGETLB_PAGE
257 if (PageHuge(new)) { 255 if (PageHuge(new)) {
@@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
995 */ 993 */
996 if (!PageMappingFlags(page)) 994 if (!PageMappingFlags(page))
997 page->mapping = NULL; 995 page->mapping = NULL;
996
997 if (unlikely(is_zone_device_page(newpage))) {
998 if (is_device_public_page(newpage))
999 flush_dcache_page(newpage);
1000 } else
1001 flush_dcache_page(newpage);
1002
998 } 1003 }
999out: 1004out:
1000 return rc; 1005 return rc;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 03fcf73d47da..d96ca5bc555b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -8233,7 +8233,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
8233 8233
8234 ret = start_isolate_page_range(pfn_max_align_down(start), 8234 ret = start_isolate_page_range(pfn_max_align_down(start),
8235 pfn_max_align_up(end), migratetype, 0); 8235 pfn_max_align_up(end), migratetype, 0);
8236 if (ret) 8236 if (ret < 0)
8237 return ret; 8237 return ret;
8238 8238
8239 /* 8239 /*
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index ce323e56b34d..019280712e1b 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
59 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself. 59 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
60 * We just check MOVABLE pages. 60 * We just check MOVABLE pages.
61 */ 61 */
62 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags)) 62 if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
63 isol_flags))
63 ret = 0; 64 ret = 0;
64 65
65 /* 66 /*
@@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
160 return NULL; 161 return NULL;
161} 162}
162 163
163/* 164/**
164 * start_isolate_page_range() -- make page-allocation-type of range of pages 165 * start_isolate_page_range() - make page-allocation-type of range of pages to
165 * to be MIGRATE_ISOLATE. 166 * be MIGRATE_ISOLATE.
166 * @start_pfn: The lower PFN of the range to be isolated. 167 * @start_pfn: The lower PFN of the range to be isolated.
167 * @end_pfn: The upper PFN of the range to be isolated. 168 * @end_pfn: The upper PFN of the range to be isolated.
168 * @migratetype: migrate type to set in error recovery. 169 * start_pfn/end_pfn must be aligned to pageblock_order.
170 * @migratetype: Migrate type to set in error recovery.
171 * @flags: The following flags are allowed (they can be combined in
172 * a bit mask)
173 * SKIP_HWPOISON - ignore hwpoison pages
174 * REPORT_FAILURE - report details about the failure to
175 * isolate the range
169 * 176 *
170 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in 177 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
171 * the range will never be allocated. Any free pages and pages freed in the 178 * the range will never be allocated. Any free pages and pages freed in the
172 * future will not be allocated again. 179 * future will not be allocated again. If specified range includes migrate types
173 * 180 * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
174 * start_pfn/end_pfn must be aligned to pageblock_order. 181 * pages in the range finally, the caller have to free all pages in the range.
175 * Return 0 on success and -EBUSY if any part of range cannot be isolated. 182 * test_page_isolated() can be used for test it.
176 * 183 *
177 * There is no high level synchronization mechanism that prevents two threads 184 * There is no high level synchronization mechanism that prevents two threads
178 * from trying to isolate overlapping ranges. If this happens, one thread 185 * from trying to isolate overlapping ranges. If this happens, one thread
179 * will notice pageblocks in the overlapping range already set to isolate. 186 * will notice pageblocks in the overlapping range already set to isolate.
180 * This happens in set_migratetype_isolate, and set_migratetype_isolate 187 * This happens in set_migratetype_isolate, and set_migratetype_isolate
181 * returns an error. We then clean up by restoring the migration type on 188 * returns an error. We then clean up by restoring the migration type on
182 * pageblocks we may have modified and return -EBUSY to caller. This 189 * pageblocks we may have modified and return -EBUSY to caller. This
183 * prevents two threads from simultaneously working on overlapping ranges. 190 * prevents two threads from simultaneously working on overlapping ranges.
191 *
192 * Return: the number of isolated pageblocks on success and -EBUSY if any part
193 * of range cannot be isolated.
184 */ 194 */
185int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 195int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
186 unsigned migratetype, int flags) 196 unsigned migratetype, int flags)
@@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
188 unsigned long pfn; 198 unsigned long pfn;
189 unsigned long undo_pfn; 199 unsigned long undo_pfn;
190 struct page *page; 200 struct page *page;
201 int nr_isolate_pageblock = 0;
191 202
192 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages)); 203 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
193 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages)); 204 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
@@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
196 pfn < end_pfn; 207 pfn < end_pfn;
197 pfn += pageblock_nr_pages) { 208 pfn += pageblock_nr_pages) {
198 page = __first_valid_page(pfn, pageblock_nr_pages); 209 page = __first_valid_page(pfn, pageblock_nr_pages);
199 if (page && 210 if (page) {
200 set_migratetype_isolate(page, migratetype, flags)) { 211 if (set_migratetype_isolate(page, migratetype, flags)) {
201 undo_pfn = pfn; 212 undo_pfn = pfn;
202 goto undo; 213 goto undo;
214 }
215 nr_isolate_pageblock++;
203 } 216 }
204 } 217 }
205 return 0; 218 return nr_isolate_pageblock;
206undo: 219undo:
207 for (pfn = start_pfn; 220 for (pfn = start_pfn;
208 pfn < undo_pfn; 221 pfn < undo_pfn;
diff --git a/mm/slab.c b/mm/slab.c
index 28652e4218e0..329bfe67f2ca 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2115,6 +2115,8 @@ done:
2115 cachep->allocflags = __GFP_COMP; 2115 cachep->allocflags = __GFP_COMP;
2116 if (flags & SLAB_CACHE_DMA) 2116 if (flags & SLAB_CACHE_DMA)
2117 cachep->allocflags |= GFP_DMA; 2117 cachep->allocflags |= GFP_DMA;
2118 if (flags & SLAB_CACHE_DMA32)
2119 cachep->allocflags |= GFP_DMA32;
2118 if (flags & SLAB_RECLAIM_ACCOUNT) 2120 if (flags & SLAB_RECLAIM_ACCOUNT)
2119 cachep->allocflags |= __GFP_RECLAIMABLE; 2121 cachep->allocflags |= __GFP_RECLAIMABLE;
2120 cachep->size = size; 2122 cachep->size = size;
diff --git a/mm/slab.h b/mm/slab.h
index e5e6658eeacc..43ac818b8592 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
127 127
128 128
129/* Legal flag mask for kmem_cache_create(), for various configurations */ 129/* Legal flag mask for kmem_cache_create(), for various configurations */
130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
131 SLAB_CACHE_DMA32 | SLAB_PANIC | \
131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 132 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
132 133
133#if defined(CONFIG_DEBUG_SLAB) 134#if defined(CONFIG_DEBUG_SLAB)
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 03eeb8b7b4b1..58251ba63e4a 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
53 SLAB_FAILSLAB | SLAB_KASAN) 53 SLAB_FAILSLAB | SLAB_KASAN)
54 54
55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
56 SLAB_ACCOUNT) 56 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
57 57
58/* 58/*
59 * Merge control. If this is set then no merging of slab caches will occur. 59 * Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/slub.c b/mm/slub.c
index 1b08fbcb7e61..d30ede89f4a6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3589,6 +3589,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3589 if (s->flags & SLAB_CACHE_DMA) 3589 if (s->flags & SLAB_CACHE_DMA)
3590 s->allocflags |= GFP_DMA; 3590 s->allocflags |= GFP_DMA;
3591 3591
3592 if (s->flags & SLAB_CACHE_DMA32)
3593 s->allocflags |= GFP_DMA32;
3594
3592 if (s->flags & SLAB_RECLAIM_ACCOUNT) 3595 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3593 s->allocflags |= __GFP_RECLAIMABLE; 3596 s->allocflags |= __GFP_RECLAIMABLE;
3594 3597
@@ -5679,6 +5682,8 @@ static char *create_unique_id(struct kmem_cache *s)
5679 */ 5682 */
5680 if (s->flags & SLAB_CACHE_DMA) 5683 if (s->flags & SLAB_CACHE_DMA)
5681 *p++ = 'd'; 5684 *p++ = 'd';
5685 if (s->flags & SLAB_CACHE_DMA32)
5686 *p++ = 'D';
5682 if (s->flags & SLAB_RECLAIM_ACCOUNT) 5687 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5683 *p++ = 'a'; 5688 *p++ = 'a';
5684 if (s->flags & SLAB_CONSISTENCY_CHECKS) 5689 if (s->flags & SLAB_CONSISTENCY_CHECKS)
diff --git a/mm/sparse.c b/mm/sparse.c
index 69904aa6165b..56e057c432f9 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
567} 567}
568 568
569#ifdef CONFIG_MEMORY_HOTREMOVE 569#ifdef CONFIG_MEMORY_HOTREMOVE
570/* Mark all memory sections within the pfn range as online */ 570/* Mark all memory sections within the pfn range as offline */
571void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) 571void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
572{ 572{
573 unsigned long pfn; 573 unsigned long pfn;